Make sure all changes to md/sync_action are notified.
[safe/jmp/linux-2.6] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46 #include <linux/freezer.h>
47
48 #include <linux/init.h>
49
50 #include <linux/file.h>
51
52 #ifdef CONFIG_KMOD
53 #include <linux/kmod.h>
54 #endif
55
56 #include <asm/unaligned.h>
57
58 #define MAJOR_NR MD_MAJOR
59 #define MD_DRIVER
60
61 /* 63 partitions with the alternate major number (mdp) */
62 #define MdpMinorShift 6
63
64 #define DEBUG 0
65 #define dprintk(x...) ((void)(DEBUG && printk(x)))
66
67
68 #ifndef MODULE
69 static void autostart_arrays (int part);
70 #endif
71
72 static LIST_HEAD(pers_list);
73 static DEFINE_SPINLOCK(pers_lock);
74
75 static void md_print_devices(void);
76
77 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
78
79 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
80
81 /*
82  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
83  * is 1000 KB/sec, so the extra system load does not show up that much.
84  * Increase it if you want to have more _guaranteed_ speed. Note that
85  * the RAID driver will use the maximum available bandwidth if the IO
86  * subsystem is idle. There is also an 'absolute maximum' reconstruction
87  * speed limit - in case reconstruction slows down your system despite
88  * idle IO detection.
89  *
90  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
91  * or /sys/block/mdX/md/sync_speed_{min,max}
92  */
93
94 static int sysctl_speed_limit_min = 1000;
95 static int sysctl_speed_limit_max = 200000;
96 static inline int speed_min(mddev_t *mddev)
97 {
98         return mddev->sync_speed_min ?
99                 mddev->sync_speed_min : sysctl_speed_limit_min;
100 }
101
102 static inline int speed_max(mddev_t *mddev)
103 {
104         return mddev->sync_speed_max ?
105                 mddev->sync_speed_max : sysctl_speed_limit_max;
106 }
107
108 static struct ctl_table_header *raid_table_header;
109
110 static ctl_table raid_table[] = {
111         {
112                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
113                 .procname       = "speed_limit_min",
114                 .data           = &sysctl_speed_limit_min,
115                 .maxlen         = sizeof(int),
116                 .mode           = S_IRUGO|S_IWUSR,
117                 .proc_handler   = &proc_dointvec,
118         },
119         {
120                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
121                 .procname       = "speed_limit_max",
122                 .data           = &sysctl_speed_limit_max,
123                 .maxlen         = sizeof(int),
124                 .mode           = S_IRUGO|S_IWUSR,
125                 .proc_handler   = &proc_dointvec,
126         },
127         { .ctl_name = 0 }
128 };
129
130 static ctl_table raid_dir_table[] = {
131         {
132                 .ctl_name       = DEV_RAID,
133                 .procname       = "raid",
134                 .maxlen         = 0,
135                 .mode           = S_IRUGO|S_IXUGO,
136                 .child          = raid_table,
137         },
138         { .ctl_name = 0 }
139 };
140
141 static ctl_table raid_root_table[] = {
142         {
143                 .ctl_name       = CTL_DEV,
144                 .procname       = "dev",
145                 .maxlen         = 0,
146                 .mode           = 0555,
147                 .child          = raid_dir_table,
148         },
149         { .ctl_name = 0 }
150 };
151
152 static struct block_device_operations md_fops;
153
154 static int start_readonly;
155
156 /*
157  * We have a system wide 'event count' that is incremented
158  * on any 'interesting' event, and readers of /proc/mdstat
159  * can use 'poll' or 'select' to find out when the event
160  * count increases.
161  *
162  * Events are:
163  *  start array, stop array, error, add device, remove device,
164  *  start build, activate spare
165  */
166 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
167 static atomic_t md_event_count;
168 void md_new_event(mddev_t *mddev)
169 {
170         atomic_inc(&md_event_count);
171         wake_up(&md_event_waiters);
172 }
173 EXPORT_SYMBOL_GPL(md_new_event);
174
175 /* Alternate version that can be called from interrupts
176  * when calling sysfs_notify isn't needed.
177  */
178 static void md_new_event_inintr(mddev_t *mddev)
179 {
180         atomic_inc(&md_event_count);
181         wake_up(&md_event_waiters);
182 }
183
184 /*
185  * Enables to iterate over all existing md arrays
186  * all_mddevs_lock protects this list.
187  */
188 static LIST_HEAD(all_mddevs);
189 static DEFINE_SPINLOCK(all_mddevs_lock);
190
191
192 /*
193  * iterates through all used mddevs in the system.
194  * We take care to grab the all_mddevs_lock whenever navigating
195  * the list, and to always hold a refcount when unlocked.
196  * Any code which breaks out of this loop while own
197  * a reference to the current mddev and must mddev_put it.
198  */
199 #define for_each_mddev(mddev,tmp)                                       \
200                                                                         \
201         for (({ spin_lock(&all_mddevs_lock);                            \
202                 tmp = all_mddevs.next;                                  \
203                 mddev = NULL;});                                        \
204              ({ if (tmp != &all_mddevs)                                 \
205                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
206                 spin_unlock(&all_mddevs_lock);                          \
207                 if (mddev) mddev_put(mddev);                            \
208                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
209                 tmp != &all_mddevs;});                                  \
210              ({ spin_lock(&all_mddevs_lock);                            \
211                 tmp = tmp->next;})                                      \
212                 )
213
214
215 static int md_fail_request (struct request_queue *q, struct bio *bio)
216 {
217         bio_io_error(bio);
218         return 0;
219 }
220
221 static inline mddev_t *mddev_get(mddev_t *mddev)
222 {
223         atomic_inc(&mddev->active);
224         return mddev;
225 }
226
227 static void mddev_put(mddev_t *mddev)
228 {
229         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
230                 return;
231         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
232                 list_del(&mddev->all_mddevs);
233                 spin_unlock(&all_mddevs_lock);
234                 blk_cleanup_queue(mddev->queue);
235                 kobject_put(&mddev->kobj);
236         } else
237                 spin_unlock(&all_mddevs_lock);
238 }
239
240 static mddev_t * mddev_find(dev_t unit)
241 {
242         mddev_t *mddev, *new = NULL;
243
244  retry:
245         spin_lock(&all_mddevs_lock);
246         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
247                 if (mddev->unit == unit) {
248                         mddev_get(mddev);
249                         spin_unlock(&all_mddevs_lock);
250                         kfree(new);
251                         return mddev;
252                 }
253
254         if (new) {
255                 list_add(&new->all_mddevs, &all_mddevs);
256                 spin_unlock(&all_mddevs_lock);
257                 return new;
258         }
259         spin_unlock(&all_mddevs_lock);
260
261         new = kzalloc(sizeof(*new), GFP_KERNEL);
262         if (!new)
263                 return NULL;
264
265         new->unit = unit;
266         if (MAJOR(unit) == MD_MAJOR)
267                 new->md_minor = MINOR(unit);
268         else
269                 new->md_minor = MINOR(unit) >> MdpMinorShift;
270
271         mutex_init(&new->reconfig_mutex);
272         INIT_LIST_HEAD(&new->disks);
273         INIT_LIST_HEAD(&new->all_mddevs);
274         init_timer(&new->safemode_timer);
275         atomic_set(&new->active, 1);
276         spin_lock_init(&new->write_lock);
277         init_waitqueue_head(&new->sb_wait);
278         init_waitqueue_head(&new->recovery_wait);
279         new->reshape_position = MaxSector;
280         new->resync_min = 0;
281         new->resync_max = MaxSector;
282         new->level = LEVEL_NONE;
283
284         new->queue = blk_alloc_queue(GFP_KERNEL);
285         if (!new->queue) {
286                 kfree(new);
287                 return NULL;
288         }
289         /* Can be unlocked because the queue is new: no concurrency */
290         queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
291
292         blk_queue_make_request(new->queue, md_fail_request);
293
294         goto retry;
295 }
296
297 static inline int mddev_lock(mddev_t * mddev)
298 {
299         return mutex_lock_interruptible(&mddev->reconfig_mutex);
300 }
301
302 static inline int mddev_trylock(mddev_t * mddev)
303 {
304         return mutex_trylock(&mddev->reconfig_mutex);
305 }
306
307 static inline void mddev_unlock(mddev_t * mddev)
308 {
309         mutex_unlock(&mddev->reconfig_mutex);
310
311         md_wakeup_thread(mddev->thread);
312 }
313
314 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
315 {
316         mdk_rdev_t * rdev;
317         struct list_head *tmp;
318
319         rdev_for_each(rdev, tmp, mddev) {
320                 if (rdev->desc_nr == nr)
321                         return rdev;
322         }
323         return NULL;
324 }
325
326 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
327 {
328         struct list_head *tmp;
329         mdk_rdev_t *rdev;
330
331         rdev_for_each(rdev, tmp, mddev) {
332                 if (rdev->bdev->bd_dev == dev)
333                         return rdev;
334         }
335         return NULL;
336 }
337
338 static struct mdk_personality *find_pers(int level, char *clevel)
339 {
340         struct mdk_personality *pers;
341         list_for_each_entry(pers, &pers_list, list) {
342                 if (level != LEVEL_NONE && pers->level == level)
343                         return pers;
344                 if (strcmp(pers->name, clevel)==0)
345                         return pers;
346         }
347         return NULL;
348 }
349
350 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
351 {
352         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
353         return MD_NEW_SIZE_BLOCKS(size);
354 }
355
356 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
357 {
358         sector_t size;
359
360         size = rdev->sb_offset;
361
362         if (chunk_size)
363                 size &= ~((sector_t)chunk_size/1024 - 1);
364         return size;
365 }
366
367 static int alloc_disk_sb(mdk_rdev_t * rdev)
368 {
369         if (rdev->sb_page)
370                 MD_BUG();
371
372         rdev->sb_page = alloc_page(GFP_KERNEL);
373         if (!rdev->sb_page) {
374                 printk(KERN_ALERT "md: out of memory.\n");
375                 return -EINVAL;
376         }
377
378         return 0;
379 }
380
381 static void free_disk_sb(mdk_rdev_t * rdev)
382 {
383         if (rdev->sb_page) {
384                 put_page(rdev->sb_page);
385                 rdev->sb_loaded = 0;
386                 rdev->sb_page = NULL;
387                 rdev->sb_offset = 0;
388                 rdev->size = 0;
389         }
390 }
391
392
393 static void super_written(struct bio *bio, int error)
394 {
395         mdk_rdev_t *rdev = bio->bi_private;
396         mddev_t *mddev = rdev->mddev;
397
398         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
399                 printk("md: super_written gets error=%d, uptodate=%d\n",
400                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
401                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
402                 md_error(mddev, rdev);
403         }
404
405         if (atomic_dec_and_test(&mddev->pending_writes))
406                 wake_up(&mddev->sb_wait);
407         bio_put(bio);
408 }
409
410 static void super_written_barrier(struct bio *bio, int error)
411 {
412         struct bio *bio2 = bio->bi_private;
413         mdk_rdev_t *rdev = bio2->bi_private;
414         mddev_t *mddev = rdev->mddev;
415
416         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
417             error == -EOPNOTSUPP) {
418                 unsigned long flags;
419                 /* barriers don't appear to be supported :-( */
420                 set_bit(BarriersNotsupp, &rdev->flags);
421                 mddev->barriers_work = 0;
422                 spin_lock_irqsave(&mddev->write_lock, flags);
423                 bio2->bi_next = mddev->biolist;
424                 mddev->biolist = bio2;
425                 spin_unlock_irqrestore(&mddev->write_lock, flags);
426                 wake_up(&mddev->sb_wait);
427                 bio_put(bio);
428         } else {
429                 bio_put(bio2);
430                 bio->bi_private = rdev;
431                 super_written(bio, error);
432         }
433 }
434
435 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
436                    sector_t sector, int size, struct page *page)
437 {
438         /* write first size bytes of page to sector of rdev
439          * Increment mddev->pending_writes before returning
440          * and decrement it on completion, waking up sb_wait
441          * if zero is reached.
442          * If an error occurred, call md_error
443          *
444          * As we might need to resubmit the request if BIO_RW_BARRIER
445          * causes ENOTSUPP, we allocate a spare bio...
446          */
447         struct bio *bio = bio_alloc(GFP_NOIO, 1);
448         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
449
450         bio->bi_bdev = rdev->bdev;
451         bio->bi_sector = sector;
452         bio_add_page(bio, page, size, 0);
453         bio->bi_private = rdev;
454         bio->bi_end_io = super_written;
455         bio->bi_rw = rw;
456
457         atomic_inc(&mddev->pending_writes);
458         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
459                 struct bio *rbio;
460                 rw |= (1<<BIO_RW_BARRIER);
461                 rbio = bio_clone(bio, GFP_NOIO);
462                 rbio->bi_private = bio;
463                 rbio->bi_end_io = super_written_barrier;
464                 submit_bio(rw, rbio);
465         } else
466                 submit_bio(rw, bio);
467 }
468
469 void md_super_wait(mddev_t *mddev)
470 {
471         /* wait for all superblock writes that were scheduled to complete.
472          * if any had to be retried (due to BARRIER problems), retry them
473          */
474         DEFINE_WAIT(wq);
475         for(;;) {
476                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
477                 if (atomic_read(&mddev->pending_writes)==0)
478                         break;
479                 while (mddev->biolist) {
480                         struct bio *bio;
481                         spin_lock_irq(&mddev->write_lock);
482                         bio = mddev->biolist;
483                         mddev->biolist = bio->bi_next ;
484                         bio->bi_next = NULL;
485                         spin_unlock_irq(&mddev->write_lock);
486                         submit_bio(bio->bi_rw, bio);
487                 }
488                 schedule();
489         }
490         finish_wait(&mddev->sb_wait, &wq);
491 }
492
493 static void bi_complete(struct bio *bio, int error)
494 {
495         complete((struct completion*)bio->bi_private);
496 }
497
498 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
499                    struct page *page, int rw)
500 {
501         struct bio *bio = bio_alloc(GFP_NOIO, 1);
502         struct completion event;
503         int ret;
504
505         rw |= (1 << BIO_RW_SYNC);
506
507         bio->bi_bdev = bdev;
508         bio->bi_sector = sector;
509         bio_add_page(bio, page, size, 0);
510         init_completion(&event);
511         bio->bi_private = &event;
512         bio->bi_end_io = bi_complete;
513         submit_bio(rw, bio);
514         wait_for_completion(&event);
515
516         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
517         bio_put(bio);
518         return ret;
519 }
520 EXPORT_SYMBOL_GPL(sync_page_io);
521
522 static int read_disk_sb(mdk_rdev_t * rdev, int size)
523 {
524         char b[BDEVNAME_SIZE];
525         if (!rdev->sb_page) {
526                 MD_BUG();
527                 return -EINVAL;
528         }
529         if (rdev->sb_loaded)
530                 return 0;
531
532
533         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
534                 goto fail;
535         rdev->sb_loaded = 1;
536         return 0;
537
538 fail:
539         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
540                 bdevname(rdev->bdev,b));
541         return -EINVAL;
542 }
543
544 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
545 {
546         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
547                 (sb1->set_uuid1 == sb2->set_uuid1) &&
548                 (sb1->set_uuid2 == sb2->set_uuid2) &&
549                 (sb1->set_uuid3 == sb2->set_uuid3))
550
551                 return 1;
552
553         return 0;
554 }
555
556
557 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
558 {
559         int ret;
560         mdp_super_t *tmp1, *tmp2;
561
562         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
563         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
564
565         if (!tmp1 || !tmp2) {
566                 ret = 0;
567                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
568                 goto abort;
569         }
570
571         *tmp1 = *sb1;
572         *tmp2 = *sb2;
573
574         /*
575          * nr_disks is not constant
576          */
577         tmp1->nr_disks = 0;
578         tmp2->nr_disks = 0;
579
580         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
581                 ret = 0;
582         else
583                 ret = 1;
584
585 abort:
586         kfree(tmp1);
587         kfree(tmp2);
588         return ret;
589 }
590
591
592 static u32 md_csum_fold(u32 csum)
593 {
594         csum = (csum & 0xffff) + (csum >> 16);
595         return (csum & 0xffff) + (csum >> 16);
596 }
597
598 static unsigned int calc_sb_csum(mdp_super_t * sb)
599 {
600         u64 newcsum = 0;
601         u32 *sb32 = (u32*)sb;
602         int i;
603         unsigned int disk_csum, csum;
604
605         disk_csum = sb->sb_csum;
606         sb->sb_csum = 0;
607
608         for (i = 0; i < MD_SB_BYTES/4 ; i++)
609                 newcsum += sb32[i];
610         csum = (newcsum & 0xffffffff) + (newcsum>>32);
611
612
613 #ifdef CONFIG_ALPHA
614         /* This used to use csum_partial, which was wrong for several
615          * reasons including that different results are returned on
616          * different architectures.  It isn't critical that we get exactly
617          * the same return value as before (we always csum_fold before
618          * testing, and that removes any differences).  However as we
619          * know that csum_partial always returned a 16bit value on
620          * alphas, do a fold to maximise conformity to previous behaviour.
621          */
622         sb->sb_csum = md_csum_fold(disk_csum);
623 #else
624         sb->sb_csum = disk_csum;
625 #endif
626         return csum;
627 }
628
629
630 /*
631  * Handle superblock details.
632  * We want to be able to handle multiple superblock formats
633  * so we have a common interface to them all, and an array of
634  * different handlers.
635  * We rely on user-space to write the initial superblock, and support
636  * reading and updating of superblocks.
637  * Interface methods are:
638  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
639  *      loads and validates a superblock on dev.
640  *      if refdev != NULL, compare superblocks on both devices
641  *    Return:
642  *      0 - dev has a superblock that is compatible with refdev
643  *      1 - dev has a superblock that is compatible and newer than refdev
644  *          so dev should be used as the refdev in future
645  *     -EINVAL superblock incompatible or invalid
646  *     -othererror e.g. -EIO
647  *
648  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
649  *      Verify that dev is acceptable into mddev.
650  *       The first time, mddev->raid_disks will be 0, and data from
651  *       dev should be merged in.  Subsequent calls check that dev
652  *       is new enough.  Return 0 or -EINVAL
653  *
654  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
655  *     Update the superblock for rdev with data in mddev
656  *     This does not write to disc.
657  *
658  */
659
660 struct super_type  {
661         char            *name;
662         struct module   *owner;
663         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
664         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
665         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
666 };
667
668 /*
669  * load_super for 0.90.0 
670  */
671 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
672 {
673         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
674         mdp_super_t *sb;
675         int ret;
676         sector_t sb_offset;
677
678         /*
679          * Calculate the position of the superblock,
680          * it's at the end of the disk.
681          *
682          * It also happens to be a multiple of 4Kb.
683          */
684         sb_offset = calc_dev_sboffset(rdev->bdev);
685         rdev->sb_offset = sb_offset;
686
687         ret = read_disk_sb(rdev, MD_SB_BYTES);
688         if (ret) return ret;
689
690         ret = -EINVAL;
691
692         bdevname(rdev->bdev, b);
693         sb = (mdp_super_t*)page_address(rdev->sb_page);
694
695         if (sb->md_magic != MD_SB_MAGIC) {
696                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
697                        b);
698                 goto abort;
699         }
700
701         if (sb->major_version != 0 ||
702             sb->minor_version < 90 ||
703             sb->minor_version > 91) {
704                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
705                         sb->major_version, sb->minor_version,
706                         b);
707                 goto abort;
708         }
709
710         if (sb->raid_disks <= 0)
711                 goto abort;
712
713         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
714                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
715                         b);
716                 goto abort;
717         }
718
719         rdev->preferred_minor = sb->md_minor;
720         rdev->data_offset = 0;
721         rdev->sb_size = MD_SB_BYTES;
722
723         if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
724                 if (sb->level != 1 && sb->level != 4
725                     && sb->level != 5 && sb->level != 6
726                     && sb->level != 10) {
727                         /* FIXME use a better test */
728                         printk(KERN_WARNING
729                                "md: bitmaps not supported for this level.\n");
730                         goto abort;
731                 }
732         }
733
734         if (sb->level == LEVEL_MULTIPATH)
735                 rdev->desc_nr = -1;
736         else
737                 rdev->desc_nr = sb->this_disk.number;
738
739         if (!refdev) {
740                 ret = 1;
741         } else {
742                 __u64 ev1, ev2;
743                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
744                 if (!uuid_equal(refsb, sb)) {
745                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
746                                 b, bdevname(refdev->bdev,b2));
747                         goto abort;
748                 }
749                 if (!sb_equal(refsb, sb)) {
750                         printk(KERN_WARNING "md: %s has same UUID"
751                                " but different superblock to %s\n",
752                                b, bdevname(refdev->bdev, b2));
753                         goto abort;
754                 }
755                 ev1 = md_event(sb);
756                 ev2 = md_event(refsb);
757                 if (ev1 > ev2)
758                         ret = 1;
759                 else 
760                         ret = 0;
761         }
762         rdev->size = calc_dev_size(rdev, sb->chunk_size);
763
764         if (rdev->size < sb->size && sb->level > 1)
765                 /* "this cannot possibly happen" ... */
766                 ret = -EINVAL;
767
768  abort:
769         return ret;
770 }
771
772 /*
773  * validate_super for 0.90.0
774  */
775 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
776 {
777         mdp_disk_t *desc;
778         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
779         __u64 ev1 = md_event(sb);
780
781         rdev->raid_disk = -1;
782         clear_bit(Faulty, &rdev->flags);
783         clear_bit(In_sync, &rdev->flags);
784         clear_bit(WriteMostly, &rdev->flags);
785         clear_bit(BarriersNotsupp, &rdev->flags);
786
787         if (mddev->raid_disks == 0) {
788                 mddev->major_version = 0;
789                 mddev->minor_version = sb->minor_version;
790                 mddev->patch_version = sb->patch_version;
791                 mddev->external = 0;
792                 mddev->chunk_size = sb->chunk_size;
793                 mddev->ctime = sb->ctime;
794                 mddev->utime = sb->utime;
795                 mddev->level = sb->level;
796                 mddev->clevel[0] = 0;
797                 mddev->layout = sb->layout;
798                 mddev->raid_disks = sb->raid_disks;
799                 mddev->size = sb->size;
800                 mddev->events = ev1;
801                 mddev->bitmap_offset = 0;
802                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
803
804                 if (mddev->minor_version >= 91) {
805                         mddev->reshape_position = sb->reshape_position;
806                         mddev->delta_disks = sb->delta_disks;
807                         mddev->new_level = sb->new_level;
808                         mddev->new_layout = sb->new_layout;
809                         mddev->new_chunk = sb->new_chunk;
810                 } else {
811                         mddev->reshape_position = MaxSector;
812                         mddev->delta_disks = 0;
813                         mddev->new_level = mddev->level;
814                         mddev->new_layout = mddev->layout;
815                         mddev->new_chunk = mddev->chunk_size;
816                 }
817
818                 if (sb->state & (1<<MD_SB_CLEAN))
819                         mddev->recovery_cp = MaxSector;
820                 else {
821                         if (sb->events_hi == sb->cp_events_hi && 
822                                 sb->events_lo == sb->cp_events_lo) {
823                                 mddev->recovery_cp = sb->recovery_cp;
824                         } else
825                                 mddev->recovery_cp = 0;
826                 }
827
828                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
829                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
830                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
831                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
832
833                 mddev->max_disks = MD_SB_DISKS;
834
835                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
836                     mddev->bitmap_file == NULL)
837                         mddev->bitmap_offset = mddev->default_bitmap_offset;
838
839         } else if (mddev->pers == NULL) {
840                 /* Insist on good event counter while assembling */
841                 ++ev1;
842                 if (ev1 < mddev->events) 
843                         return -EINVAL;
844         } else if (mddev->bitmap) {
845                 /* if adding to array with a bitmap, then we can accept an
846                  * older device ... but not too old.
847                  */
848                 if (ev1 < mddev->bitmap->events_cleared)
849                         return 0;
850         } else {
851                 if (ev1 < mddev->events)
852                         /* just a hot-add of a new device, leave raid_disk at -1 */
853                         return 0;
854         }
855
856         if (mddev->level != LEVEL_MULTIPATH) {
857                 desc = sb->disks + rdev->desc_nr;
858
859                 if (desc->state & (1<<MD_DISK_FAULTY))
860                         set_bit(Faulty, &rdev->flags);
861                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
862                             desc->raid_disk < mddev->raid_disks */) {
863                         set_bit(In_sync, &rdev->flags);
864                         rdev->raid_disk = desc->raid_disk;
865                 }
866                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
867                         set_bit(WriteMostly, &rdev->flags);
868         } else /* MULTIPATH are always insync */
869                 set_bit(In_sync, &rdev->flags);
870         return 0;
871 }
872
873 /*
874  * sync_super for 0.90.0
875  */
876 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
877 {
878         mdp_super_t *sb;
879         struct list_head *tmp;
880         mdk_rdev_t *rdev2;
881         int next_spare = mddev->raid_disks;
882
883
884         /* make rdev->sb match mddev data..
885          *
886          * 1/ zero out disks
887          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
888          * 3/ any empty disks < next_spare become removed
889          *
890          * disks[0] gets initialised to REMOVED because
891          * we cannot be sure from other fields if it has
892          * been initialised or not.
893          */
894         int i;
895         int active=0, working=0,failed=0,spare=0,nr_disks=0;
896
897         rdev->sb_size = MD_SB_BYTES;
898
899         sb = (mdp_super_t*)page_address(rdev->sb_page);
900
901         memset(sb, 0, sizeof(*sb));
902
903         sb->md_magic = MD_SB_MAGIC;
904         sb->major_version = mddev->major_version;
905         sb->patch_version = mddev->patch_version;
906         sb->gvalid_words  = 0; /* ignored */
907         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
908         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
909         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
910         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
911
912         sb->ctime = mddev->ctime;
913         sb->level = mddev->level;
914         sb->size  = mddev->size;
915         sb->raid_disks = mddev->raid_disks;
916         sb->md_minor = mddev->md_minor;
917         sb->not_persistent = 0;
918         sb->utime = mddev->utime;
919         sb->state = 0;
920         sb->events_hi = (mddev->events>>32);
921         sb->events_lo = (u32)mddev->events;
922
923         if (mddev->reshape_position == MaxSector)
924                 sb->minor_version = 90;
925         else {
926                 sb->minor_version = 91;
927                 sb->reshape_position = mddev->reshape_position;
928                 sb->new_level = mddev->new_level;
929                 sb->delta_disks = mddev->delta_disks;
930                 sb->new_layout = mddev->new_layout;
931                 sb->new_chunk = mddev->new_chunk;
932         }
933         mddev->minor_version = sb->minor_version;
934         if (mddev->in_sync)
935         {
936                 sb->recovery_cp = mddev->recovery_cp;
937                 sb->cp_events_hi = (mddev->events>>32);
938                 sb->cp_events_lo = (u32)mddev->events;
939                 if (mddev->recovery_cp == MaxSector)
940                         sb->state = (1<< MD_SB_CLEAN);
941         } else
942                 sb->recovery_cp = 0;
943
944         sb->layout = mddev->layout;
945         sb->chunk_size = mddev->chunk_size;
946
947         if (mddev->bitmap && mddev->bitmap_file == NULL)
948                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
949
950         sb->disks[0].state = (1<<MD_DISK_REMOVED);
951         rdev_for_each(rdev2, tmp, mddev) {
952                 mdp_disk_t *d;
953                 int desc_nr;
954                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
955                     && !test_bit(Faulty, &rdev2->flags))
956                         desc_nr = rdev2->raid_disk;
957                 else
958                         desc_nr = next_spare++;
959                 rdev2->desc_nr = desc_nr;
960                 d = &sb->disks[rdev2->desc_nr];
961                 nr_disks++;
962                 d->number = rdev2->desc_nr;
963                 d->major = MAJOR(rdev2->bdev->bd_dev);
964                 d->minor = MINOR(rdev2->bdev->bd_dev);
965                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
966                     && !test_bit(Faulty, &rdev2->flags))
967                         d->raid_disk = rdev2->raid_disk;
968                 else
969                         d->raid_disk = rdev2->desc_nr; /* compatibility */
970                 if (test_bit(Faulty, &rdev2->flags))
971                         d->state = (1<<MD_DISK_FAULTY);
972                 else if (test_bit(In_sync, &rdev2->flags)) {
973                         d->state = (1<<MD_DISK_ACTIVE);
974                         d->state |= (1<<MD_DISK_SYNC);
975                         active++;
976                         working++;
977                 } else {
978                         d->state = 0;
979                         spare++;
980                         working++;
981                 }
982                 if (test_bit(WriteMostly, &rdev2->flags))
983                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
984         }
985         /* now set the "removed" and "faulty" bits on any missing devices */
986         for (i=0 ; i < mddev->raid_disks ; i++) {
987                 mdp_disk_t *d = &sb->disks[i];
988                 if (d->state == 0 && d->number == 0) {
989                         d->number = i;
990                         d->raid_disk = i;
991                         d->state = (1<<MD_DISK_REMOVED);
992                         d->state |= (1<<MD_DISK_FAULTY);
993                         failed++;
994                 }
995         }
996         sb->nr_disks = nr_disks;
997         sb->active_disks = active;
998         sb->working_disks = working;
999         sb->failed_disks = failed;
1000         sb->spare_disks = spare;
1001
1002         sb->this_disk = sb->disks[rdev->desc_nr];
1003         sb->sb_csum = calc_sb_csum(sb);
1004 }
1005
1006 /*
1007  * version 1 superblock
1008  */
1009
1010 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1011 {
1012         __le32 disk_csum;
1013         u32 csum;
1014         unsigned long long newcsum;
1015         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1016         __le32 *isuper = (__le32*)sb;
1017         int i;
1018
1019         disk_csum = sb->sb_csum;
1020         sb->sb_csum = 0;
1021         newcsum = 0;
1022         for (i=0; size>=4; size -= 4 )
1023                 newcsum += le32_to_cpu(*isuper++);
1024
1025         if (size == 2)
1026                 newcsum += le16_to_cpu(*(__le16*) isuper);
1027
1028         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1029         sb->sb_csum = disk_csum;
1030         return cpu_to_le32(csum);
1031 }
1032
1033 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1034 {
1035         struct mdp_superblock_1 *sb;
1036         int ret;
1037         sector_t sb_offset;
1038         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1039         int bmask;
1040
1041         /*
1042          * Calculate the position of the superblock.
1043          * It is always aligned to a 4K boundary and
1044          * depeding on minor_version, it can be:
1045          * 0: At least 8K, but less than 12K, from end of device
1046          * 1: At start of device
1047          * 2: 4K from start of device.
1048          */
1049         switch(minor_version) {
1050         case 0:
1051                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1052                 sb_offset -= 8*2;
1053                 sb_offset &= ~(sector_t)(4*2-1);
1054                 /* convert from sectors to K */
1055                 sb_offset /= 2;
1056                 break;
1057         case 1:
1058                 sb_offset = 0;
1059                 break;
1060         case 2:
1061                 sb_offset = 4;
1062                 break;
1063         default:
1064                 return -EINVAL;
1065         }
1066         rdev->sb_offset = sb_offset;
1067
1068         /* superblock is rarely larger than 1K, but it can be larger,
1069          * and it is safe to read 4k, so we do that
1070          */
1071         ret = read_disk_sb(rdev, 4096);
1072         if (ret) return ret;
1073
1074
1075         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1076
1077         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1078             sb->major_version != cpu_to_le32(1) ||
1079             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1080             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1081             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1082                 return -EINVAL;
1083
1084         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1085                 printk("md: invalid superblock checksum on %s\n",
1086                         bdevname(rdev->bdev,b));
1087                 return -EINVAL;
1088         }
1089         if (le64_to_cpu(sb->data_size) < 10) {
1090                 printk("md: data_size too small on %s\n",
1091                        bdevname(rdev->bdev,b));
1092                 return -EINVAL;
1093         }
1094         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1095                 if (sb->level != cpu_to_le32(1) &&
1096                     sb->level != cpu_to_le32(4) &&
1097                     sb->level != cpu_to_le32(5) &&
1098                     sb->level != cpu_to_le32(6) &&
1099                     sb->level != cpu_to_le32(10)) {
1100                         printk(KERN_WARNING
1101                                "md: bitmaps not supported for this level.\n");
1102                         return -EINVAL;
1103                 }
1104         }
1105
1106         rdev->preferred_minor = 0xffff;
1107         rdev->data_offset = le64_to_cpu(sb->data_offset);
1108         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1109
1110         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1111         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1112         if (rdev->sb_size & bmask)
1113                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1114
1115         if (minor_version
1116             && rdev->data_offset < sb_offset + (rdev->sb_size/512))
1117                 return -EINVAL;
1118
1119         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1120                 rdev->desc_nr = -1;
1121         else
1122                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1123
1124         if (!refdev) {
1125                 ret = 1;
1126         } else {
1127                 __u64 ev1, ev2;
1128                 struct mdp_superblock_1 *refsb = 
1129                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1130
1131                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1132                     sb->level != refsb->level ||
1133                     sb->layout != refsb->layout ||
1134                     sb->chunksize != refsb->chunksize) {
1135                         printk(KERN_WARNING "md: %s has strangely different"
1136                                 " superblock to %s\n",
1137                                 bdevname(rdev->bdev,b),
1138                                 bdevname(refdev->bdev,b2));
1139                         return -EINVAL;
1140                 }
1141                 ev1 = le64_to_cpu(sb->events);
1142                 ev2 = le64_to_cpu(refsb->events);
1143
1144                 if (ev1 > ev2)
1145                         ret = 1;
1146                 else
1147                         ret = 0;
1148         }
1149         if (minor_version)
1150                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1151         else
1152                 rdev->size = rdev->sb_offset;
1153         if (rdev->size < le64_to_cpu(sb->data_size)/2)
1154                 return -EINVAL;
1155         rdev->size = le64_to_cpu(sb->data_size)/2;
1156         if (le32_to_cpu(sb->chunksize))
1157                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1158
1159         if (le64_to_cpu(sb->size) > rdev->size*2)
1160                 return -EINVAL;
1161         return ret;
1162 }
1163
1164 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1165 {
1166         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1167         __u64 ev1 = le64_to_cpu(sb->events);
1168
1169         rdev->raid_disk = -1;
1170         clear_bit(Faulty, &rdev->flags);
1171         clear_bit(In_sync, &rdev->flags);
1172         clear_bit(WriteMostly, &rdev->flags);
1173         clear_bit(BarriersNotsupp, &rdev->flags);
1174
1175         if (mddev->raid_disks == 0) {
1176                 mddev->major_version = 1;
1177                 mddev->patch_version = 0;
1178                 mddev->external = 0;
1179                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1180                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1181                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1182                 mddev->level = le32_to_cpu(sb->level);
1183                 mddev->clevel[0] = 0;
1184                 mddev->layout = le32_to_cpu(sb->layout);
1185                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1186                 mddev->size = le64_to_cpu(sb->size)/2;
1187                 mddev->events = ev1;
1188                 mddev->bitmap_offset = 0;
1189                 mddev->default_bitmap_offset = 1024 >> 9;
1190                 
1191                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1192                 memcpy(mddev->uuid, sb->set_uuid, 16);
1193
1194                 mddev->max_disks =  (4096-256)/2;
1195
1196                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1197                     mddev->bitmap_file == NULL )
1198                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1199
1200                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1201                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1202                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1203                         mddev->new_level = le32_to_cpu(sb->new_level);
1204                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1205                         mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1206                 } else {
1207                         mddev->reshape_position = MaxSector;
1208                         mddev->delta_disks = 0;
1209                         mddev->new_level = mddev->level;
1210                         mddev->new_layout = mddev->layout;
1211                         mddev->new_chunk = mddev->chunk_size;
1212                 }
1213
1214         } else if (mddev->pers == NULL) {
1215                 /* Insist of good event counter while assembling */
1216                 ++ev1;
1217                 if (ev1 < mddev->events)
1218                         return -EINVAL;
1219         } else if (mddev->bitmap) {
1220                 /* If adding to array with a bitmap, then we can accept an
1221                  * older device, but not too old.
1222                  */
1223                 if (ev1 < mddev->bitmap->events_cleared)
1224                         return 0;
1225         } else {
1226                 if (ev1 < mddev->events)
1227                         /* just a hot-add of a new device, leave raid_disk at -1 */
1228                         return 0;
1229         }
1230         if (mddev->level != LEVEL_MULTIPATH) {
1231                 int role;
1232                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1233                 switch(role) {
1234                 case 0xffff: /* spare */
1235                         break;
1236                 case 0xfffe: /* faulty */
1237                         set_bit(Faulty, &rdev->flags);
1238                         break;
1239                 default:
1240                         if ((le32_to_cpu(sb->feature_map) &
1241                              MD_FEATURE_RECOVERY_OFFSET))
1242                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1243                         else
1244                                 set_bit(In_sync, &rdev->flags);
1245                         rdev->raid_disk = role;
1246                         break;
1247                 }
1248                 if (sb->devflags & WriteMostly1)
1249                         set_bit(WriteMostly, &rdev->flags);
1250         } else /* MULTIPATH are always insync */
1251                 set_bit(In_sync, &rdev->flags);
1252
1253         return 0;
1254 }
1255
1256 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1257 {
1258         struct mdp_superblock_1 *sb;
1259         struct list_head *tmp;
1260         mdk_rdev_t *rdev2;
1261         int max_dev, i;
1262         /* make rdev->sb match mddev and rdev data. */
1263
1264         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1265
1266         sb->feature_map = 0;
1267         sb->pad0 = 0;
1268         sb->recovery_offset = cpu_to_le64(0);
1269         memset(sb->pad1, 0, sizeof(sb->pad1));
1270         memset(sb->pad2, 0, sizeof(sb->pad2));
1271         memset(sb->pad3, 0, sizeof(sb->pad3));
1272
1273         sb->utime = cpu_to_le64((__u64)mddev->utime);
1274         sb->events = cpu_to_le64(mddev->events);
1275         if (mddev->in_sync)
1276                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1277         else
1278                 sb->resync_offset = cpu_to_le64(0);
1279
1280         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1281
1282         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1283         sb->size = cpu_to_le64(mddev->size<<1);
1284
1285         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1286                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1287                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1288         }
1289
1290         if (rdev->raid_disk >= 0 &&
1291             !test_bit(In_sync, &rdev->flags) &&
1292             rdev->recovery_offset > 0) {
1293                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1294                 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1295         }
1296
1297         if (mddev->reshape_position != MaxSector) {
1298                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1299                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1300                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1301                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1302                 sb->new_level = cpu_to_le32(mddev->new_level);
1303                 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1304         }
1305
1306         max_dev = 0;
1307         rdev_for_each(rdev2, tmp, mddev)
1308                 if (rdev2->desc_nr+1 > max_dev)
1309                         max_dev = rdev2->desc_nr+1;
1310
1311         if (max_dev > le32_to_cpu(sb->max_dev))
1312                 sb->max_dev = cpu_to_le32(max_dev);
1313         for (i=0; i<max_dev;i++)
1314                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1315         
1316         rdev_for_each(rdev2, tmp, mddev) {
1317                 i = rdev2->desc_nr;
1318                 if (test_bit(Faulty, &rdev2->flags))
1319                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1320                 else if (test_bit(In_sync, &rdev2->flags))
1321                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1322                 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1323                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1324                 else
1325                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1326         }
1327
1328         sb->sb_csum = calc_sb_1_csum(sb);
1329 }
1330
1331
1332 static struct super_type super_types[] = {
1333         [0] = {
1334                 .name   = "0.90.0",
1335                 .owner  = THIS_MODULE,
1336                 .load_super     = super_90_load,
1337                 .validate_super = super_90_validate,
1338                 .sync_super     = super_90_sync,
1339         },
1340         [1] = {
1341                 .name   = "md-1",
1342                 .owner  = THIS_MODULE,
1343                 .load_super     = super_1_load,
1344                 .validate_super = super_1_validate,
1345                 .sync_super     = super_1_sync,
1346         },
1347 };
1348
1349 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1350 {
1351         struct list_head *tmp, *tmp2;
1352         mdk_rdev_t *rdev, *rdev2;
1353
1354         rdev_for_each(rdev, tmp, mddev1)
1355                 rdev_for_each(rdev2, tmp2, mddev2)
1356                         if (rdev->bdev->bd_contains ==
1357                             rdev2->bdev->bd_contains)
1358                                 return 1;
1359
1360         return 0;
1361 }
1362
1363 static LIST_HEAD(pending_raid_disks);
1364
1365 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1366 {
1367         char b[BDEVNAME_SIZE];
1368         struct kobject *ko;
1369         char *s;
1370         int err;
1371
1372         if (rdev->mddev) {
1373                 MD_BUG();
1374                 return -EINVAL;
1375         }
1376
1377         /* prevent duplicates */
1378         if (find_rdev(mddev, rdev->bdev->bd_dev))
1379                 return -EEXIST;
1380
1381         /* make sure rdev->size exceeds mddev->size */
1382         if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1383                 if (mddev->pers) {
1384                         /* Cannot change size, so fail
1385                          * If mddev->level <= 0, then we don't care
1386                          * about aligning sizes (e.g. linear)
1387                          */
1388                         if (mddev->level > 0)
1389                                 return -ENOSPC;
1390                 } else
1391                         mddev->size = rdev->size;
1392         }
1393
1394         /* Verify rdev->desc_nr is unique.
1395          * If it is -1, assign a free number, else
1396          * check number is not in use
1397          */
1398         if (rdev->desc_nr < 0) {
1399                 int choice = 0;
1400                 if (mddev->pers) choice = mddev->raid_disks;
1401                 while (find_rdev_nr(mddev, choice))
1402                         choice++;
1403                 rdev->desc_nr = choice;
1404         } else {
1405                 if (find_rdev_nr(mddev, rdev->desc_nr))
1406                         return -EBUSY;
1407         }
1408         bdevname(rdev->bdev,b);
1409         while ( (s=strchr(b, '/')) != NULL)
1410                 *s = '!';
1411
1412         rdev->mddev = mddev;
1413         printk(KERN_INFO "md: bind<%s>\n", b);
1414
1415         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1416                 goto fail;
1417
1418         if (rdev->bdev->bd_part)
1419                 ko = &rdev->bdev->bd_part->dev.kobj;
1420         else
1421                 ko = &rdev->bdev->bd_disk->dev.kobj;
1422         if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1423                 kobject_del(&rdev->kobj);
1424                 goto fail;
1425         }
1426         list_add(&rdev->same_set, &mddev->disks);
1427         bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1428         return 0;
1429
1430  fail:
1431         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1432                b, mdname(mddev));
1433         return err;
1434 }
1435
1436 static void md_delayed_delete(struct work_struct *ws)
1437 {
1438         mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1439         kobject_del(&rdev->kobj);
1440         kobject_put(&rdev->kobj);
1441 }
1442
1443 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1444 {
1445         char b[BDEVNAME_SIZE];
1446         if (!rdev->mddev) {
1447                 MD_BUG();
1448                 return;
1449         }
1450         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1451         list_del_init(&rdev->same_set);
1452         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1453         rdev->mddev = NULL;
1454         sysfs_remove_link(&rdev->kobj, "block");
1455
1456         /* We need to delay this, otherwise we can deadlock when
1457          * writing to 'remove' to "dev/state"
1458          */
1459         INIT_WORK(&rdev->del_work, md_delayed_delete);
1460         kobject_get(&rdev->kobj);
1461         schedule_work(&rdev->del_work);
1462 }
1463
1464 /*
1465  * prevent the device from being mounted, repartitioned or
1466  * otherwise reused by a RAID array (or any other kernel
1467  * subsystem), by bd_claiming the device.
1468  */
1469 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1470 {
1471         int err = 0;
1472         struct block_device *bdev;
1473         char b[BDEVNAME_SIZE];
1474
1475         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1476         if (IS_ERR(bdev)) {
1477                 printk(KERN_ERR "md: could not open %s.\n",
1478                         __bdevname(dev, b));
1479                 return PTR_ERR(bdev);
1480         }
1481         err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1482         if (err) {
1483                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1484                         bdevname(bdev, b));
1485                 blkdev_put(bdev);
1486                 return err;
1487         }
1488         if (!shared)
1489                 set_bit(AllReserved, &rdev->flags);
1490         rdev->bdev = bdev;
1491         return err;
1492 }
1493
1494 static void unlock_rdev(mdk_rdev_t *rdev)
1495 {
1496         struct block_device *bdev = rdev->bdev;
1497         rdev->bdev = NULL;
1498         if (!bdev)
1499                 MD_BUG();
1500         bd_release(bdev);
1501         blkdev_put(bdev);
1502 }
1503
1504 void md_autodetect_dev(dev_t dev);
1505
1506 static void export_rdev(mdk_rdev_t * rdev)
1507 {
1508         char b[BDEVNAME_SIZE];
1509         printk(KERN_INFO "md: export_rdev(%s)\n",
1510                 bdevname(rdev->bdev,b));
1511         if (rdev->mddev)
1512                 MD_BUG();
1513         free_disk_sb(rdev);
1514         list_del_init(&rdev->same_set);
1515 #ifndef MODULE
1516         if (test_bit(AutoDetected, &rdev->flags))
1517                 md_autodetect_dev(rdev->bdev->bd_dev);
1518 #endif
1519         unlock_rdev(rdev);
1520         kobject_put(&rdev->kobj);
1521 }
1522
1523 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1524 {
1525         unbind_rdev_from_array(rdev);
1526         export_rdev(rdev);
1527 }
1528
1529 static void export_array(mddev_t *mddev)
1530 {
1531         struct list_head *tmp;
1532         mdk_rdev_t *rdev;
1533
1534         rdev_for_each(rdev, tmp, mddev) {
1535                 if (!rdev->mddev) {
1536                         MD_BUG();
1537                         continue;
1538                 }
1539                 kick_rdev_from_array(rdev);
1540         }
1541         if (!list_empty(&mddev->disks))
1542                 MD_BUG();
1543         mddev->raid_disks = 0;
1544         mddev->major_version = 0;
1545 }
1546
1547 static void print_desc(mdp_disk_t *desc)
1548 {
1549         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1550                 desc->major,desc->minor,desc->raid_disk,desc->state);
1551 }
1552
1553 static void print_sb(mdp_super_t *sb)
1554 {
1555         int i;
1556
1557         printk(KERN_INFO 
1558                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1559                 sb->major_version, sb->minor_version, sb->patch_version,
1560                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1561                 sb->ctime);
1562         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1563                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1564                 sb->md_minor, sb->layout, sb->chunk_size);
1565         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1566                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1567                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1568                 sb->failed_disks, sb->spare_disks,
1569                 sb->sb_csum, (unsigned long)sb->events_lo);
1570
1571         printk(KERN_INFO);
1572         for (i = 0; i < MD_SB_DISKS; i++) {
1573                 mdp_disk_t *desc;
1574
1575                 desc = sb->disks + i;
1576                 if (desc->number || desc->major || desc->minor ||
1577                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1578                         printk("     D %2d: ", i);
1579                         print_desc(desc);
1580                 }
1581         }
1582         printk(KERN_INFO "md:     THIS: ");
1583         print_desc(&sb->this_disk);
1584
1585 }
1586
1587 static void print_rdev(mdk_rdev_t *rdev)
1588 {
1589         char b[BDEVNAME_SIZE];
1590         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1591                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1592                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1593                 rdev->desc_nr);
1594         if (rdev->sb_loaded) {
1595                 printk(KERN_INFO "md: rdev superblock:\n");
1596                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1597         } else
1598                 printk(KERN_INFO "md: no rdev superblock!\n");
1599 }
1600
1601 static void md_print_devices(void)
1602 {
1603         struct list_head *tmp, *tmp2;
1604         mdk_rdev_t *rdev;
1605         mddev_t *mddev;
1606         char b[BDEVNAME_SIZE];
1607
1608         printk("\n");
1609         printk("md:     **********************************\n");
1610         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1611         printk("md:     **********************************\n");
1612         for_each_mddev(mddev, tmp) {
1613
1614                 if (mddev->bitmap)
1615                         bitmap_print_sb(mddev->bitmap);
1616                 else
1617                         printk("%s: ", mdname(mddev));
1618                 rdev_for_each(rdev, tmp2, mddev)
1619                         printk("<%s>", bdevname(rdev->bdev,b));
1620                 printk("\n");
1621
1622                 rdev_for_each(rdev, tmp2, mddev)
1623                         print_rdev(rdev);
1624         }
1625         printk("md:     **********************************\n");
1626         printk("\n");
1627 }
1628
1629
1630 static void sync_sbs(mddev_t * mddev, int nospares)
1631 {
1632         /* Update each superblock (in-memory image), but
1633          * if we are allowed to, skip spares which already
1634          * have the right event counter, or have one earlier
1635          * (which would mean they aren't being marked as dirty
1636          * with the rest of the array)
1637          */
1638         mdk_rdev_t *rdev;
1639         struct list_head *tmp;
1640
1641         rdev_for_each(rdev, tmp, mddev) {
1642                 if (rdev->sb_events == mddev->events ||
1643                     (nospares &&
1644                      rdev->raid_disk < 0 &&
1645                      (rdev->sb_events&1)==0 &&
1646                      rdev->sb_events+1 == mddev->events)) {
1647                         /* Don't update this superblock */
1648                         rdev->sb_loaded = 2;
1649                 } else {
1650                         super_types[mddev->major_version].
1651                                 sync_super(mddev, rdev);
1652                         rdev->sb_loaded = 1;
1653                 }
1654         }
1655 }
1656
1657 static void md_update_sb(mddev_t * mddev, int force_change)
1658 {
1659         struct list_head *tmp;
1660         mdk_rdev_t *rdev;
1661         int sync_req;
1662         int nospares = 0;
1663
1664         if (mddev->external)
1665                 return;
1666 repeat:
1667         spin_lock_irq(&mddev->write_lock);
1668
1669         set_bit(MD_CHANGE_PENDING, &mddev->flags);
1670         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1671                 force_change = 1;
1672         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1673                 /* just a clean<-> dirty transition, possibly leave spares alone,
1674                  * though if events isn't the right even/odd, we will have to do
1675                  * spares after all
1676                  */
1677                 nospares = 1;
1678         if (force_change)
1679                 nospares = 0;
1680         if (mddev->degraded)
1681                 /* If the array is degraded, then skipping spares is both
1682                  * dangerous and fairly pointless.
1683                  * Dangerous because a device that was removed from the array
1684                  * might have a event_count that still looks up-to-date,
1685                  * so it can be re-added without a resync.
1686                  * Pointless because if there are any spares to skip,
1687                  * then a recovery will happen and soon that array won't
1688                  * be degraded any more and the spare can go back to sleep then.
1689                  */
1690                 nospares = 0;
1691
1692         sync_req = mddev->in_sync;
1693         mddev->utime = get_seconds();
1694
1695         /* If this is just a dirty<->clean transition, and the array is clean
1696          * and 'events' is odd, we can roll back to the previous clean state */
1697         if (nospares
1698             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1699             && (mddev->events & 1)
1700             && mddev->events != 1)
1701                 mddev->events--;
1702         else {
1703                 /* otherwise we have to go forward and ... */
1704                 mddev->events ++;
1705                 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1706                         /* .. if the array isn't clean, insist on an odd 'events' */
1707                         if ((mddev->events&1)==0) {
1708                                 mddev->events++;
1709                                 nospares = 0;
1710                         }
1711                 } else {
1712                         /* otherwise insist on an even 'events' (for clean states) */
1713                         if ((mddev->events&1)) {
1714                                 mddev->events++;
1715                                 nospares = 0;
1716                         }
1717                 }
1718         }
1719
1720         if (!mddev->events) {
1721                 /*
1722                  * oops, this 64-bit counter should never wrap.
1723                  * Either we are in around ~1 trillion A.C., assuming
1724                  * 1 reboot per second, or we have a bug:
1725                  */
1726                 MD_BUG();
1727                 mddev->events --;
1728         }
1729
1730         /*
1731          * do not write anything to disk if using
1732          * nonpersistent superblocks
1733          */
1734         if (!mddev->persistent) {
1735                 if (!mddev->external)
1736                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1737
1738                 spin_unlock_irq(&mddev->write_lock);
1739                 wake_up(&mddev->sb_wait);
1740                 return;
1741         }
1742         sync_sbs(mddev, nospares);
1743         spin_unlock_irq(&mddev->write_lock);
1744
1745         dprintk(KERN_INFO 
1746                 "md: updating %s RAID superblock on device (in sync %d)\n",
1747                 mdname(mddev),mddev->in_sync);
1748
1749         bitmap_update_sb(mddev->bitmap);
1750         rdev_for_each(rdev, tmp, mddev) {
1751                 char b[BDEVNAME_SIZE];
1752                 dprintk(KERN_INFO "md: ");
1753                 if (rdev->sb_loaded != 1)
1754                         continue; /* no noise on spare devices */
1755                 if (test_bit(Faulty, &rdev->flags))
1756                         dprintk("(skipping faulty ");
1757
1758                 dprintk("%s ", bdevname(rdev->bdev,b));
1759                 if (!test_bit(Faulty, &rdev->flags)) {
1760                         md_super_write(mddev,rdev,
1761                                        rdev->sb_offset<<1, rdev->sb_size,
1762                                        rdev->sb_page);
1763                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1764                                 bdevname(rdev->bdev,b),
1765                                 (unsigned long long)rdev->sb_offset);
1766                         rdev->sb_events = mddev->events;
1767
1768                 } else
1769                         dprintk(")\n");
1770                 if (mddev->level == LEVEL_MULTIPATH)
1771                         /* only need to write one superblock... */
1772                         break;
1773         }
1774         md_super_wait(mddev);
1775         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1776
1777         spin_lock_irq(&mddev->write_lock);
1778         if (mddev->in_sync != sync_req ||
1779             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1780                 /* have to write it out again */
1781                 spin_unlock_irq(&mddev->write_lock);
1782                 goto repeat;
1783         }
1784         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1785         spin_unlock_irq(&mddev->write_lock);
1786         wake_up(&mddev->sb_wait);
1787
1788 }
1789
1790 /* words written to sysfs files may, or my not, be \n terminated.
1791  * We want to accept with case. For this we use cmd_match.
1792  */
1793 static int cmd_match(const char *cmd, const char *str)
1794 {
1795         /* See if cmd, written into a sysfs file, matches
1796          * str.  They must either be the same, or cmd can
1797          * have a trailing newline
1798          */
1799         while (*cmd && *str && *cmd == *str) {
1800                 cmd++;
1801                 str++;
1802         }
1803         if (*cmd == '\n')
1804                 cmd++;
1805         if (*str || *cmd)
1806                 return 0;
1807         return 1;
1808 }
1809
1810 struct rdev_sysfs_entry {
1811         struct attribute attr;
1812         ssize_t (*show)(mdk_rdev_t *, char *);
1813         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1814 };
1815
1816 static ssize_t
1817 state_show(mdk_rdev_t *rdev, char *page)
1818 {
1819         char *sep = "";
1820         size_t len = 0;
1821
1822         if (test_bit(Faulty, &rdev->flags)) {
1823                 len+= sprintf(page+len, "%sfaulty",sep);
1824                 sep = ",";
1825         }
1826         if (test_bit(In_sync, &rdev->flags)) {
1827                 len += sprintf(page+len, "%sin_sync",sep);
1828                 sep = ",";
1829         }
1830         if (test_bit(WriteMostly, &rdev->flags)) {
1831                 len += sprintf(page+len, "%swrite_mostly",sep);
1832                 sep = ",";
1833         }
1834         if (test_bit(Blocked, &rdev->flags)) {
1835                 len += sprintf(page+len, "%sblocked", sep);
1836                 sep = ",";
1837         }
1838         if (!test_bit(Faulty, &rdev->flags) &&
1839             !test_bit(In_sync, &rdev->flags)) {
1840                 len += sprintf(page+len, "%sspare", sep);
1841                 sep = ",";
1842         }
1843         return len+sprintf(page+len, "\n");
1844 }
1845
1846 static ssize_t
1847 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1848 {
1849         /* can write
1850          *  faulty  - simulates and error
1851          *  remove  - disconnects the device
1852          *  writemostly - sets write_mostly
1853          *  -writemostly - clears write_mostly
1854          *  blocked - sets the Blocked flag
1855          *  -blocked - clears the Blocked flag
1856          */
1857         int err = -EINVAL;
1858         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1859                 md_error(rdev->mddev, rdev);
1860                 err = 0;
1861         } else if (cmd_match(buf, "remove")) {
1862                 if (rdev->raid_disk >= 0)
1863                         err = -EBUSY;
1864                 else {
1865                         mddev_t *mddev = rdev->mddev;
1866                         kick_rdev_from_array(rdev);
1867                         if (mddev->pers)
1868                                 md_update_sb(mddev, 1);
1869                         md_new_event(mddev);
1870                         err = 0;
1871                 }
1872         } else if (cmd_match(buf, "writemostly")) {
1873                 set_bit(WriteMostly, &rdev->flags);
1874                 err = 0;
1875         } else if (cmd_match(buf, "-writemostly")) {
1876                 clear_bit(WriteMostly, &rdev->flags);
1877                 err = 0;
1878         } else if (cmd_match(buf, "blocked")) {
1879                 set_bit(Blocked, &rdev->flags);
1880                 err = 0;
1881         } else if (cmd_match(buf, "-blocked")) {
1882                 clear_bit(Blocked, &rdev->flags);
1883                 wake_up(&rdev->blocked_wait);
1884                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
1885                 md_wakeup_thread(rdev->mddev->thread);
1886
1887                 err = 0;
1888         }
1889         return err ? err : len;
1890 }
1891 static struct rdev_sysfs_entry rdev_state =
1892 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1893
1894 static ssize_t
1895 errors_show(mdk_rdev_t *rdev, char *page)
1896 {
1897         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1898 }
1899
1900 static ssize_t
1901 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1902 {
1903         char *e;
1904         unsigned long n = simple_strtoul(buf, &e, 10);
1905         if (*buf && (*e == 0 || *e == '\n')) {
1906                 atomic_set(&rdev->corrected_errors, n);
1907                 return len;
1908         }
1909         return -EINVAL;
1910 }
1911 static struct rdev_sysfs_entry rdev_errors =
1912 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1913
1914 static ssize_t
1915 slot_show(mdk_rdev_t *rdev, char *page)
1916 {
1917         if (rdev->raid_disk < 0)
1918                 return sprintf(page, "none\n");
1919         else
1920                 return sprintf(page, "%d\n", rdev->raid_disk);
1921 }
1922
1923 static ssize_t
1924 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1925 {
1926         char *e;
1927         int err;
1928         char nm[20];
1929         int slot = simple_strtoul(buf, &e, 10);
1930         if (strncmp(buf, "none", 4)==0)
1931                 slot = -1;
1932         else if (e==buf || (*e && *e!= '\n'))
1933                 return -EINVAL;
1934         if (rdev->mddev->pers && slot == -1) {
1935                 /* Setting 'slot' on an active array requires also
1936                  * updating the 'rd%d' link, and communicating
1937                  * with the personality with ->hot_*_disk.
1938                  * For now we only support removing
1939                  * failed/spare devices.  This normally happens automatically,
1940                  * but not when the metadata is externally managed.
1941                  */
1942                 if (rdev->raid_disk == -1)
1943                         return -EEXIST;
1944                 /* personality does all needed checks */
1945                 if (rdev->mddev->pers->hot_add_disk == NULL)
1946                         return -EINVAL;
1947                 err = rdev->mddev->pers->
1948                         hot_remove_disk(rdev->mddev, rdev->raid_disk);
1949                 if (err)
1950                         return err;
1951                 sprintf(nm, "rd%d", rdev->raid_disk);
1952                 sysfs_remove_link(&rdev->mddev->kobj, nm);
1953                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
1954                 md_wakeup_thread(rdev->mddev->thread);
1955         } else if (rdev->mddev->pers) {
1956                 mdk_rdev_t *rdev2;
1957                 struct list_head *tmp;
1958                 /* Activating a spare .. or possibly reactivating
1959                  * if we every get bitmaps working here.
1960                  */
1961
1962                 if (rdev->raid_disk != -1)
1963                         return -EBUSY;
1964
1965                 if (rdev->mddev->pers->hot_add_disk == NULL)
1966                         return -EINVAL;
1967
1968                 rdev_for_each(rdev2, tmp, rdev->mddev)
1969                         if (rdev2->raid_disk == slot)
1970                                 return -EEXIST;
1971
1972                 rdev->raid_disk = slot;
1973                 if (test_bit(In_sync, &rdev->flags))
1974                         rdev->saved_raid_disk = slot;
1975                 else
1976                         rdev->saved_raid_disk = -1;
1977                 err = rdev->mddev->pers->
1978                         hot_add_disk(rdev->mddev, rdev);
1979                 if (err) {
1980                         rdev->raid_disk = -1;
1981                         return err;
1982                 }
1983                 sprintf(nm, "rd%d", rdev->raid_disk);
1984                 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
1985                         printk(KERN_WARNING
1986                                "md: cannot register "
1987                                "%s for %s\n",
1988                                nm, mdname(rdev->mddev));
1989
1990                 /* don't wakeup anyone, leave that to userspace. */
1991         } else {
1992                 if (slot >= rdev->mddev->raid_disks)
1993                         return -ENOSPC;
1994                 rdev->raid_disk = slot;
1995                 /* assume it is working */
1996                 clear_bit(Faulty, &rdev->flags);
1997                 clear_bit(WriteMostly, &rdev->flags);
1998                 set_bit(In_sync, &rdev->flags);
1999         }
2000         return len;
2001 }
2002
2003
2004 static struct rdev_sysfs_entry rdev_slot =
2005 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2006
2007 static ssize_t
2008 offset_show(mdk_rdev_t *rdev, char *page)
2009 {
2010         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2011 }
2012
2013 static ssize_t
2014 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2015 {
2016         char *e;
2017         unsigned long long offset = simple_strtoull(buf, &e, 10);
2018         if (e==buf || (*e && *e != '\n'))
2019                 return -EINVAL;
2020         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2021                 return -EBUSY;
2022         if (rdev->size && rdev->mddev->external)
2023                 /* Must set offset before size, so overlap checks
2024                  * can be sane */
2025                 return -EBUSY;
2026         rdev->data_offset = offset;
2027         return len;
2028 }
2029
2030 static struct rdev_sysfs_entry rdev_offset =
2031 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2032
2033 static ssize_t
2034 rdev_size_show(mdk_rdev_t *rdev, char *page)
2035 {
2036         return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
2037 }
2038
2039 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2040 {
2041         /* check if two start/length pairs overlap */
2042         if (s1+l1 <= s2)
2043                 return 0;
2044         if (s2+l2 <= s1)
2045                 return 0;
2046         return 1;
2047 }
2048
2049 static ssize_t
2050 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2051 {
2052         char *e;
2053         unsigned long long size = simple_strtoull(buf, &e, 10);
2054         unsigned long long oldsize = rdev->size;
2055         mddev_t *my_mddev = rdev->mddev;
2056
2057         if (e==buf || (*e && *e != '\n'))
2058                 return -EINVAL;
2059         if (my_mddev->pers && rdev->raid_disk >= 0)
2060                 return -EBUSY;
2061         rdev->size = size;
2062         if (size > oldsize && rdev->mddev->external) {
2063                 /* need to check that all other rdevs with the same ->bdev
2064                  * do not overlap.  We need to unlock the mddev to avoid
2065                  * a deadlock.  We have already changed rdev->size, and if
2066                  * we have to change it back, we will have the lock again.
2067                  */
2068                 mddev_t *mddev;
2069                 int overlap = 0;
2070                 struct list_head *tmp, *tmp2;
2071
2072                 mddev_unlock(my_mddev);
2073                 for_each_mddev(mddev, tmp) {
2074                         mdk_rdev_t *rdev2;
2075
2076                         mddev_lock(mddev);
2077                         rdev_for_each(rdev2, tmp2, mddev)
2078                                 if (test_bit(AllReserved, &rdev2->flags) ||
2079                                     (rdev->bdev == rdev2->bdev &&
2080                                      rdev != rdev2 &&
2081                                      overlaps(rdev->data_offset, rdev->size,
2082                                             rdev2->data_offset, rdev2->size))) {
2083                                         overlap = 1;
2084                                         break;
2085                                 }
2086                         mddev_unlock(mddev);
2087                         if (overlap) {
2088                                 mddev_put(mddev);
2089                                 break;
2090                         }
2091                 }
2092                 mddev_lock(my_mddev);
2093                 if (overlap) {
2094                         /* Someone else could have slipped in a size
2095                          * change here, but doing so is just silly.
2096                          * We put oldsize back because we *know* it is
2097                          * safe, and trust userspace not to race with
2098                          * itself
2099                          */
2100                         rdev->size = oldsize;
2101                         return -EBUSY;
2102                 }
2103         }
2104         if (size < my_mddev->size || my_mddev->size == 0)
2105                 my_mddev->size = size;
2106         return len;
2107 }
2108
2109 static struct rdev_sysfs_entry rdev_size =
2110 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2111
2112 static struct attribute *rdev_default_attrs[] = {
2113         &rdev_state.attr,
2114         &rdev_errors.attr,
2115         &rdev_slot.attr,
2116         &rdev_offset.attr,
2117         &rdev_size.attr,
2118         NULL,
2119 };
2120 static ssize_t
2121 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2122 {
2123         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2124         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2125         mddev_t *mddev = rdev->mddev;
2126         ssize_t rv;
2127
2128         if (!entry->show)
2129                 return -EIO;
2130
2131         rv = mddev ? mddev_lock(mddev) : -EBUSY;
2132         if (!rv) {
2133                 if (rdev->mddev == NULL)
2134                         rv = -EBUSY;
2135                 else
2136                         rv = entry->show(rdev, page);
2137                 mddev_unlock(mddev);
2138         }
2139         return rv;
2140 }
2141
2142 static ssize_t
2143 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2144               const char *page, size_t length)
2145 {
2146         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2147         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2148         ssize_t rv;
2149         mddev_t *mddev = rdev->mddev;
2150
2151         if (!entry->store)
2152                 return -EIO;
2153         if (!capable(CAP_SYS_ADMIN))
2154                 return -EACCES;
2155         rv = mddev ? mddev_lock(mddev): -EBUSY;
2156         if (!rv) {
2157                 if (rdev->mddev == NULL)
2158                         rv = -EBUSY;
2159                 else
2160                         rv = entry->store(rdev, page, length);
2161                 mddev_unlock(mddev);
2162         }
2163         return rv;
2164 }
2165
2166 static void rdev_free(struct kobject *ko)
2167 {
2168         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2169         kfree(rdev);
2170 }
2171 static struct sysfs_ops rdev_sysfs_ops = {
2172         .show           = rdev_attr_show,
2173         .store          = rdev_attr_store,
2174 };
2175 static struct kobj_type rdev_ktype = {
2176         .release        = rdev_free,
2177         .sysfs_ops      = &rdev_sysfs_ops,
2178         .default_attrs  = rdev_default_attrs,
2179 };
2180
2181 /*
2182  * Import a device. If 'super_format' >= 0, then sanity check the superblock
2183  *
2184  * mark the device faulty if:
2185  *
2186  *   - the device is nonexistent (zero size)
2187  *   - the device has no valid superblock
2188  *
2189  * a faulty rdev _never_ has rdev->sb set.
2190  */
2191 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2192 {
2193         char b[BDEVNAME_SIZE];
2194         int err;
2195         mdk_rdev_t *rdev;
2196         sector_t size;
2197
2198         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2199         if (!rdev) {
2200                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2201                 return ERR_PTR(-ENOMEM);
2202         }
2203
2204         if ((err = alloc_disk_sb(rdev)))
2205                 goto abort_free;
2206
2207         err = lock_rdev(rdev, newdev, super_format == -2);
2208         if (err)
2209                 goto abort_free;
2210
2211         kobject_init(&rdev->kobj, &rdev_ktype);
2212
2213         rdev->desc_nr = -1;
2214         rdev->saved_raid_disk = -1;
2215         rdev->raid_disk = -1;
2216         rdev->flags = 0;
2217         rdev->data_offset = 0;
2218         rdev->sb_events = 0;
2219         atomic_set(&rdev->nr_pending, 0);
2220         atomic_set(&rdev->read_errors, 0);
2221         atomic_set(&rdev->corrected_errors, 0);
2222
2223         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2224         if (!size) {
2225                 printk(KERN_WARNING 
2226                         "md: %s has zero or unknown size, marking faulty!\n",
2227                         bdevname(rdev->bdev,b));
2228                 err = -EINVAL;
2229                 goto abort_free;
2230         }
2231
2232         if (super_format >= 0) {
2233                 err = super_types[super_format].
2234                         load_super(rdev, NULL, super_minor);
2235                 if (err == -EINVAL) {
2236                         printk(KERN_WARNING
2237                                 "md: %s does not have a valid v%d.%d "
2238                                "superblock, not importing!\n",
2239                                 bdevname(rdev->bdev,b),
2240                                super_format, super_minor);
2241                         goto abort_free;
2242                 }
2243                 if (err < 0) {
2244                         printk(KERN_WARNING 
2245                                 "md: could not read %s's sb, not importing!\n",
2246                                 bdevname(rdev->bdev,b));
2247                         goto abort_free;
2248                 }
2249         }
2250
2251         INIT_LIST_HEAD(&rdev->same_set);
2252         init_waitqueue_head(&rdev->blocked_wait);
2253
2254         return rdev;
2255
2256 abort_free:
2257         if (rdev->sb_page) {
2258                 if (rdev->bdev)
2259                         unlock_rdev(rdev);
2260                 free_disk_sb(rdev);
2261         }
2262         kfree(rdev);
2263         return ERR_PTR(err);
2264 }
2265
2266 /*
2267  * Check a full RAID array for plausibility
2268  */
2269
2270
2271 static void analyze_sbs(mddev_t * mddev)
2272 {
2273         int i;
2274         struct list_head *tmp;
2275         mdk_rdev_t *rdev, *freshest;
2276         char b[BDEVNAME_SIZE];
2277
2278         freshest = NULL;
2279         rdev_for_each(rdev, tmp, mddev)
2280                 switch (super_types[mddev->major_version].
2281                         load_super(rdev, freshest, mddev->minor_version)) {
2282                 case 1:
2283                         freshest = rdev;
2284                         break;
2285                 case 0:
2286                         break;
2287                 default:
2288                         printk( KERN_ERR \
2289                                 "md: fatal superblock inconsistency in %s"
2290                                 " -- removing from array\n", 
2291                                 bdevname(rdev->bdev,b));
2292                         kick_rdev_from_array(rdev);
2293                 }
2294
2295
2296         super_types[mddev->major_version].
2297                 validate_super(mddev, freshest);
2298
2299         i = 0;
2300         rdev_for_each(rdev, tmp, mddev) {
2301                 if (rdev != freshest)
2302                         if (super_types[mddev->major_version].
2303                             validate_super(mddev, rdev)) {
2304                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2305                                         " from array!\n",
2306                                         bdevname(rdev->bdev,b));
2307                                 kick_rdev_from_array(rdev);
2308                                 continue;
2309                         }
2310                 if (mddev->level == LEVEL_MULTIPATH) {
2311                         rdev->desc_nr = i++;
2312                         rdev->raid_disk = rdev->desc_nr;
2313                         set_bit(In_sync, &rdev->flags);
2314                 } else if (rdev->raid_disk >= mddev->raid_disks) {
2315                         rdev->raid_disk = -1;
2316                         clear_bit(In_sync, &rdev->flags);
2317                 }
2318         }
2319
2320
2321
2322         if (mddev->recovery_cp != MaxSector &&
2323             mddev->level >= 1)
2324                 printk(KERN_ERR "md: %s: raid array is not clean"
2325                        " -- starting background reconstruction\n",
2326                        mdname(mddev));
2327
2328 }
2329
2330 static ssize_t
2331 safe_delay_show(mddev_t *mddev, char *page)
2332 {
2333         int msec = (mddev->safemode_delay*1000)/HZ;
2334         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2335 }
2336 static ssize_t
2337 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2338 {
2339         int scale=1;
2340         int dot=0;
2341         int i;
2342         unsigned long msec;
2343         char buf[30];
2344         char *e;
2345         /* remove a period, and count digits after it */
2346         if (len >= sizeof(buf))
2347                 return -EINVAL;
2348         strlcpy(buf, cbuf, len);
2349         buf[len] = 0;
2350         for (i=0; i<len; i++) {
2351                 if (dot) {
2352                         if (isdigit(buf[i])) {
2353                                 buf[i-1] = buf[i];
2354                                 scale *= 10;
2355                         }
2356                         buf[i] = 0;
2357                 } else if (buf[i] == '.') {
2358                         dot=1;
2359                         buf[i] = 0;
2360                 }
2361         }
2362         msec = simple_strtoul(buf, &e, 10);
2363         if (e == buf || (*e && *e != '\n'))
2364                 return -EINVAL;
2365         msec = (msec * 1000) / scale;
2366         if (msec == 0)
2367                 mddev->safemode_delay = 0;
2368         else {
2369                 mddev->safemode_delay = (msec*HZ)/1000;
2370                 if (mddev->safemode_delay == 0)
2371                         mddev->safemode_delay = 1;
2372         }
2373         return len;
2374 }
2375 static struct md_sysfs_entry md_safe_delay =
2376 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2377
2378 static ssize_t
2379 level_show(mddev_t *mddev, char *page)
2380 {
2381         struct mdk_personality *p = mddev->pers;
2382         if (p)
2383                 return sprintf(page, "%s\n", p->name);
2384         else if (mddev->clevel[0])
2385                 return sprintf(page, "%s\n", mddev->clevel);
2386         else if (mddev->level != LEVEL_NONE)
2387                 return sprintf(page, "%d\n", mddev->level);
2388         else
2389                 return 0;
2390 }
2391
2392 static ssize_t
2393 level_store(mddev_t *mddev, const char *buf, size_t len)
2394 {
2395         ssize_t rv = len;
2396         if (mddev->pers)
2397                 return -EBUSY;
2398         if (len == 0)
2399                 return 0;
2400         if (len >= sizeof(mddev->clevel))
2401                 return -ENOSPC;
2402         strncpy(mddev->clevel, buf, len);
2403         if (mddev->clevel[len-1] == '\n')
2404                 len--;
2405         mddev->clevel[len] = 0;
2406         mddev->level = LEVEL_NONE;
2407         return rv;
2408 }
2409
2410 static struct md_sysfs_entry md_level =
2411 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2412
2413
2414 static ssize_t
2415 layout_show(mddev_t *mddev, char *page)
2416 {
2417         /* just a number, not meaningful for all levels */
2418         if (mddev->reshape_position != MaxSector &&
2419             mddev->layout != mddev->new_layout)
2420                 return sprintf(page, "%d (%d)\n",
2421                                mddev->new_layout, mddev->layout);
2422         return sprintf(page, "%d\n", mddev->layout);
2423 }
2424
2425 static ssize_t
2426 layout_store(mddev_t *mddev, const char *buf, size_t len)
2427 {
2428         char *e;
2429         unsigned long n = simple_strtoul(buf, &e, 10);
2430
2431         if (!*buf || (*e && *e != '\n'))
2432                 return -EINVAL;
2433
2434         if (mddev->pers)
2435                 return -EBUSY;
2436         if (mddev->reshape_position != MaxSector)
2437                 mddev->new_layout = n;
2438         else
2439                 mddev->layout = n;
2440         return len;
2441 }
2442 static struct md_sysfs_entry md_layout =
2443 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2444
2445
2446 static ssize_t
2447 raid_disks_show(mddev_t *mddev, char *page)
2448 {
2449         if (mddev->raid_disks == 0)
2450                 return 0;
2451         if (mddev->reshape_position != MaxSector &&
2452             mddev->delta_disks != 0)
2453                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2454                                mddev->raid_disks - mddev->delta_disks);
2455         return sprintf(page, "%d\n", mddev->raid_disks);
2456 }
2457
2458 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2459
2460 static ssize_t
2461 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2462 {
2463         char *e;
2464         int rv = 0;
2465         unsigned long n = simple_strtoul(buf, &e, 10);
2466
2467         if (!*buf || (*e && *e != '\n'))
2468                 return -EINVAL;
2469
2470         if (mddev->pers)
2471                 rv = update_raid_disks(mddev, n);
2472         else if (mddev->reshape_position != MaxSector) {
2473                 int olddisks = mddev->raid_disks - mddev->delta_disks;
2474                 mddev->delta_disks = n - olddisks;
2475                 mddev->raid_disks = n;
2476         } else
2477                 mddev->raid_disks = n;
2478         return rv ? rv : len;
2479 }
2480 static struct md_sysfs_entry md_raid_disks =
2481 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2482
2483 static ssize_t
2484 chunk_size_show(mddev_t *mddev, char *page)
2485 {
2486         if (mddev->reshape_position != MaxSector &&
2487             mddev->chunk_size != mddev->new_chunk)
2488                 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2489                                mddev->chunk_size);
2490         return sprintf(page, "%d\n", mddev->chunk_size);
2491 }
2492
2493 static ssize_t
2494 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2495 {
2496         /* can only set chunk_size if array is not yet active */
2497         char *e;
2498         unsigned long n = simple_strtoul(buf, &e, 10);
2499
2500         if (!*buf || (*e && *e != '\n'))
2501                 return -EINVAL;
2502
2503         if (mddev->pers)
2504                 return -EBUSY;
2505         else if (mddev->reshape_position != MaxSector)
2506                 mddev->new_chunk = n;
2507         else
2508                 mddev->chunk_size = n;
2509         return len;
2510 }
2511 static struct md_sysfs_entry md_chunk_size =
2512 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2513
2514 static ssize_t
2515 resync_start_show(mddev_t *mddev, char *page)
2516 {
2517         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2518 }
2519
2520 static ssize_t
2521 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2522 {
2523         char *e;
2524         unsigned long long n = simple_strtoull(buf, &e, 10);
2525
2526         if (mddev->pers)
2527                 return -EBUSY;
2528         if (!*buf || (*e && *e != '\n'))
2529                 return -EINVAL;
2530
2531         mddev->recovery_cp = n;
2532         return len;
2533 }
2534 static struct md_sysfs_entry md_resync_start =
2535 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2536
2537 /*
2538  * The array state can be:
2539  *
2540  * clear
2541  *     No devices, no size, no level
2542  *     Equivalent to STOP_ARRAY ioctl
2543  * inactive
2544  *     May have some settings, but array is not active
2545  *        all IO results in error
2546  *     When written, doesn't tear down array, but just stops it
2547  * suspended (not supported yet)
2548  *     All IO requests will block. The array can be reconfigured.
2549  *     Writing this, if accepted, will block until array is quiessent
2550  * readonly
2551  *     no resync can happen.  no superblocks get written.
2552  *     write requests fail
2553  * read-auto
2554  *     like readonly, but behaves like 'clean' on a write request.
2555  *
2556  * clean - no pending writes, but otherwise active.
2557  *     When written to inactive array, starts without resync
2558  *     If a write request arrives then
2559  *       if metadata is known, mark 'dirty' and switch to 'active'.
2560  *       if not known, block and switch to write-pending
2561  *     If written to an active array that has pending writes, then fails.
2562  * active
2563  *     fully active: IO and resync can be happening.
2564  *     When written to inactive array, starts with resync
2565  *
2566  * write-pending
2567  *     clean, but writes are blocked waiting for 'active' to be written.
2568  *
2569  * active-idle
2570  *     like active, but no writes have been seen for a while (100msec).
2571  *
2572  */
2573 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2574                    write_pending, active_idle, bad_word};
2575 static char *array_states[] = {
2576         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2577         "write-pending", "active-idle", NULL };
2578
2579 static int match_word(const char *word, char **list)
2580 {
2581         int n;
2582         for (n=0; list[n]; n++)
2583                 if (cmd_match(word, list[n]))
2584                         break;
2585         return n;
2586 }
2587
2588 static ssize_t
2589 array_state_show(mddev_t *mddev, char *page)
2590 {
2591         enum array_state st = inactive;
2592
2593         if (mddev->pers)
2594                 switch(mddev->ro) {
2595                 case 1:
2596                         st = readonly;
2597                         break;
2598                 case 2:
2599                         st = read_auto;
2600                         break;
2601                 case 0:
2602                         if (mddev->in_sync)
2603                                 st = clean;
2604                         else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
2605                                 st = write_pending;
2606                         else if (mddev->safemode)
2607                                 st = active_idle;
2608                         else
2609                                 st = active;
2610                 }
2611         else {
2612                 if (list_empty(&mddev->disks) &&
2613                     mddev->raid_disks == 0 &&
2614                     mddev->size == 0)
2615                         st = clear;
2616                 else
2617                         st = inactive;
2618         }
2619         return sprintf(page, "%s\n", array_states[st]);
2620 }
2621
2622 static int do_md_stop(mddev_t * mddev, int ro);
2623 static int do_md_run(mddev_t * mddev);
2624 static int restart_array(mddev_t *mddev);
2625
2626 static ssize_t
2627 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2628 {
2629         int err = -EINVAL;
2630         enum array_state st = match_word(buf, array_states);
2631         switch(st) {
2632         case bad_word:
2633                 break;
2634         case clear:
2635                 /* stopping an active array */
2636                 if (atomic_read(&mddev->active) > 1)
2637                         return -EBUSY;
2638                 err = do_md_stop(mddev, 0);
2639                 break;
2640         case inactive:
2641                 /* stopping an active array */
2642                 if (mddev->pers) {
2643                         if (atomic_read(&mddev->active) > 1)
2644                                 return -EBUSY;
2645                         err = do_md_stop(mddev, 2);
2646                 } else
2647                         err = 0; /* already inactive */
2648                 break;
2649         case suspended:
2650                 break; /* not supported yet */
2651         case readonly:
2652                 if (mddev->pers)
2653                         err = do_md_stop(mddev, 1);
2654                 else {
2655                         mddev->ro = 1;
2656                         set_disk_ro(mddev->gendisk, 1);
2657                         err = do_md_run(mddev);
2658                 }
2659                 break;
2660         case read_auto:
2661                 if (mddev->pers) {
2662                         if (mddev->ro != 1)
2663                                 err = do_md_stop(mddev, 1);
2664                         else
2665                                 err = restart_array(mddev);
2666                         if (err == 0) {
2667                                 mddev->ro = 2;
2668                                 set_disk_ro(mddev->gendisk, 0);
2669                         }
2670                 } else {
2671                         mddev->ro = 2;
2672                         err = do_md_run(mddev);
2673                 }
2674                 break;
2675         case clean:
2676                 if (mddev->pers) {
2677                         restart_array(mddev);
2678                         spin_lock_irq(&mddev->write_lock);
2679                         if (atomic_read(&mddev->writes_pending) == 0) {
2680                                 if (mddev->in_sync == 0) {
2681                                         mddev->in_sync = 1;
2682                                         if (mddev->safemode == 1)
2683                                                 mddev->safemode = 0;
2684                                         if (mddev->persistent)
2685                                                 set_bit(MD_CHANGE_CLEAN,
2686                                                         &mddev->flags);
2687                                 }
2688                                 err = 0;
2689                         } else
2690                                 err = -EBUSY;
2691                         spin_unlock_irq(&mddev->write_lock);
2692                 } else {
2693                         mddev->ro = 0;
2694                         mddev->recovery_cp = MaxSector;
2695                         err = do_md_run(mddev);
2696                 }
2697                 break;
2698         case active:
2699                 if (mddev->pers) {
2700                         restart_array(mddev);
2701                         if (mddev->external)
2702                                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2703                         wake_up(&mddev->sb_wait);
2704                         err = 0;
2705                 } else {
2706                         mddev->ro = 0;
2707                         set_disk_ro(mddev->gendisk, 0);
2708                         err = do_md_run(mddev);
2709                 }
2710                 break;
2711         case write_pending:
2712         case active_idle:
2713                 /* these cannot be set */
2714                 break;
2715         }
2716         if (err)
2717                 return err;
2718         else {
2719                 sysfs_notify(&mddev->kobj, NULL, "array_state");
2720                 return len;
2721         }
2722 }
2723 static struct md_sysfs_entry md_array_state =
2724 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2725
2726 static ssize_t
2727 null_show(mddev_t *mddev, char *page)
2728 {
2729         return -EINVAL;
2730 }
2731
2732 static ssize_t
2733 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2734 {
2735         /* buf must be %d:%d\n? giving major and minor numbers */
2736         /* The new device is added to the array.
2737          * If the array has a persistent superblock, we read the
2738          * superblock to initialise info and check validity.
2739          * Otherwise, only checking done is that in bind_rdev_to_array,
2740          * which mainly checks size.
2741          */
2742         char *e;
2743         int major = simple_strtoul(buf, &e, 10);
2744         int minor;
2745         dev_t dev;
2746         mdk_rdev_t *rdev;
2747         int err;
2748
2749         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2750                 return -EINVAL;
2751         minor = simple_strtoul(e+1, &e, 10);
2752         if (*e && *e != '\n')
2753                 return -EINVAL;
2754         dev = MKDEV(major, minor);
2755         if (major != MAJOR(dev) ||
2756             minor != MINOR(dev))
2757                 return -EOVERFLOW;
2758
2759
2760         if (mddev->persistent) {
2761                 rdev = md_import_device(dev, mddev->major_version,
2762                                         mddev->minor_version);
2763                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2764                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2765                                                        mdk_rdev_t, same_set);
2766                         err = super_types[mddev->major_version]
2767                                 .load_super(rdev, rdev0, mddev->minor_version);
2768                         if (err < 0)
2769                                 goto out;
2770                 }
2771         } else if (mddev->external)
2772                 rdev = md_import_device(dev, -2, -1);
2773         else
2774                 rdev = md_import_device(dev, -1, -1);
2775
2776         if (IS_ERR(rdev))
2777                 return PTR_ERR(rdev);
2778         err = bind_rdev_to_array(rdev, mddev);
2779  out:
2780         if (err)
2781                 export_rdev(rdev);
2782         return err ? err : len;
2783 }
2784
2785 static struct md_sysfs_entry md_new_device =
2786 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2787
2788 static ssize_t
2789 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2790 {
2791         char *end;
2792         unsigned long chunk, end_chunk;
2793
2794         if (!mddev->bitmap)
2795                 goto out;
2796         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2797         while (*buf) {
2798                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2799                 if (buf == end) break;
2800                 if (*end == '-') { /* range */
2801                         buf = end + 1;
2802                         end_chunk = simple_strtoul(buf, &end, 0);
2803                         if (buf == end) break;
2804                 }
2805                 if (*end && !isspace(*end)) break;
2806                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2807                 buf = end;
2808                 while (isspace(*buf)) buf++;
2809         }
2810         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2811 out:
2812         return len;
2813 }
2814
2815 static struct md_sysfs_entry md_bitmap =
2816 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2817
2818 static ssize_t
2819 size_show(mddev_t *mddev, char *page)
2820 {
2821         return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2822 }
2823
2824 static int update_size(mddev_t *mddev, unsigned long size);
2825
2826 static ssize_t
2827 size_store(mddev_t *mddev, const char *buf, size_t len)
2828 {
2829         /* If array is inactive, we can reduce the component size, but
2830          * not increase it (except from 0).
2831          * If array is active, we can try an on-line resize
2832          */
2833         char *e;
2834         int err = 0;
2835         unsigned long long size = simple_strtoull(buf, &e, 10);
2836         if (!*buf || *buf == '\n' ||
2837             (*e && *e != '\n'))
2838                 return -EINVAL;
2839
2840         if (mddev->pers) {
2841                 err = update_size(mddev, size);
2842                 md_update_sb(mddev, 1);
2843         } else {
2844                 if (mddev->size == 0 ||
2845                     mddev->size > size)
2846                         mddev->size = size;
2847                 else
2848                         err = -ENOSPC;
2849         }
2850         return err ? err : len;
2851 }
2852
2853 static struct md_sysfs_entry md_size =
2854 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2855
2856
2857 /* Metdata version.
2858  * This is one of
2859  *   'none' for arrays with no metadata (good luck...)
2860  *   'external' for arrays with externally managed metadata,
2861  * or N.M for internally known formats
2862  */
2863 static ssize_t
2864 metadata_show(mddev_t *mddev, char *page)
2865 {
2866         if (mddev->persistent)
2867                 return sprintf(page, "%d.%d\n",
2868                                mddev->major_version, mddev->minor_version);
2869         else if (mddev->external)
2870                 return sprintf(page, "external:%s\n", mddev->metadata_type);
2871         else
2872                 return sprintf(page, "none\n");
2873 }
2874
2875 static ssize_t
2876 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2877 {
2878         int major, minor;
2879         char *e;
2880         if (!list_empty(&mddev->disks))
2881                 return -EBUSY;
2882
2883         if (cmd_match(buf, "none")) {
2884                 mddev->persistent = 0;
2885                 mddev->external = 0;
2886                 mddev->major_version = 0;
2887                 mddev->minor_version = 90;
2888                 return len;
2889         }
2890         if (strncmp(buf, "external:", 9) == 0) {
2891                 size_t namelen = len-9;
2892                 if (namelen >= sizeof(mddev->metadata_type))
2893                         namelen = sizeof(mddev->metadata_type)-1;
2894                 strncpy(mddev->metadata_type, buf+9, namelen);
2895                 mddev->metadata_type[namelen] = 0;
2896                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
2897                         mddev->metadata_type[--namelen] = 0;
2898                 mddev->persistent = 0;
2899                 mddev->external = 1;
2900                 mddev->major_version = 0;
2901                 mddev->minor_version = 90;
2902                 return len;
2903         }
2904         major = simple_strtoul(buf, &e, 10);
2905         if (e==buf || *e != '.')
2906                 return -EINVAL;
2907         buf = e+1;
2908         minor = simple_strtoul(buf, &e, 10);
2909         if (e==buf || (*e && *e != '\n') )
2910                 return -EINVAL;
2911         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2912                 return -ENOENT;
2913         mddev->major_version = major;
2914         mddev->minor_version = minor;
2915         mddev->persistent = 1;
2916         mddev->external = 0;
2917         return len;
2918 }
2919
2920 static struct md_sysfs_entry md_metadata =
2921 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2922
2923 static ssize_t
2924 action_show(mddev_t *mddev, char *page)
2925 {
2926         char *type = "idle";
2927         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2928             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
2929                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2930                         type = "reshape";
2931                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2932                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2933                                 type = "resync";
2934                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2935                                 type = "check";
2936                         else
2937                                 type = "repair";
2938                 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
2939                         type = "recover";
2940         }
2941         return sprintf(page, "%s\n", type);
2942 }
2943
2944 static ssize_t
2945 action_store(mddev_t *mddev, const char *page, size_t len)
2946 {
2947         if (!mddev->pers || !mddev->pers->sync_request)
2948                 return -EINVAL;
2949
2950         if (cmd_match(page, "idle")) {
2951                 if (mddev->sync_thread) {
2952                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2953                         md_unregister_thread(mddev->sync_thread);
2954                         mddev->sync_thread = NULL;
2955                         mddev->recovery = 0;
2956                 }
2957         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2958                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2959                 return -EBUSY;
2960         else if (cmd_match(page, "resync"))
2961                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2962         else if (cmd_match(page, "recover")) {
2963                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2964                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2965         } else if (cmd_match(page, "reshape")) {
2966                 int err;
2967                 if (mddev->pers->start_reshape == NULL)
2968                         return -EINVAL;
2969                 err = mddev->pers->start_reshape(mddev);
2970                 if (err)
2971                         return err;
2972         } else {
2973                 if (cmd_match(page, "check"))
2974                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2975                 else if (!cmd_match(page, "repair"))
2976                         return -EINVAL;
2977                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2978                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2979         }
2980         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2981         md_wakeup_thread(mddev->thread);
2982         sysfs_notify(&mddev->kobj, NULL, "sync_action");
2983         return len;
2984 }
2985
2986 static ssize_t
2987 mismatch_cnt_show(mddev_t *mddev, char *page)
2988 {
2989         return sprintf(page, "%llu\n",
2990                        (unsigned long long) mddev->resync_mismatches);
2991 }
2992
2993 static struct md_sysfs_entry md_scan_mode =
2994 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2995
2996
2997 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
2998
2999 static ssize_t
3000 sync_min_show(mddev_t *mddev, char *page)
3001 {
3002         return sprintf(page, "%d (%s)\n", speed_min(mddev),
3003                        mddev->sync_speed_min ? "local": "system");
3004 }
3005
3006 static ssize_t
3007 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3008 {
3009         int min;
3010         char *e;
3011         if (strncmp(buf, "system", 6)==0) {
3012                 mddev->sync_speed_min = 0;
3013                 return len;
3014         }
3015         min = simple_strtoul(buf, &e, 10);
3016         if (buf == e || (*e && *e != '\n') || min <= 0)
3017                 return -EINVAL;
3018         mddev->sync_speed_min = min;
3019         return len;
3020 }
3021
3022 static struct md_sysfs_entry md_sync_min =
3023 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3024
3025 static ssize_t
3026 sync_max_show(mddev_t *mddev, char *page)
3027 {
3028         return sprintf(page, "%d (%s)\n", speed_max(mddev),
3029                        mddev->sync_speed_max ? "local": "system");
3030 }
3031
3032 static ssize_t
3033 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3034 {
3035         int max;
3036         char *e;
3037         if (strncmp(buf, "system", 6)==0) {
3038                 mddev->sync_speed_max = 0;
3039                 return len;
3040         }
3041         max = simple_strtoul(buf, &e, 10);
3042         if (buf == e || (*e && *e != '\n') || max <= 0)
3043                 return -EINVAL;
3044         mddev->sync_speed_max = max;
3045         return len;
3046 }
3047
3048 static struct md_sysfs_entry md_sync_max =
3049 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3050
3051 static ssize_t
3052 degraded_show(mddev_t *mddev, char *page)
3053 {
3054         return sprintf(page, "%d\n", mddev->degraded);
3055 }
3056 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3057
3058 static ssize_t
3059 sync_force_parallel_show(mddev_t *mddev, char *page)
3060 {
3061         return sprintf(page, "%d\n", mddev->parallel_resync);
3062 }
3063
3064 static ssize_t
3065 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3066 {
3067         long n;
3068
3069         if (strict_strtol(buf, 10, &n))
3070                 return -EINVAL;
3071
3072         if (n != 0 && n != 1)
3073                 return -EINVAL;
3074
3075         mddev->parallel_resync = n;
3076
3077         if (mddev->sync_thread)
3078                 wake_up(&resync_wait);
3079
3080         return len;
3081 }
3082
3083 /* force parallel resync, even with shared block devices */
3084 static struct md_sysfs_entry md_sync_force_parallel =
3085 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3086        sync_force_parallel_show, sync_force_parallel_store);
3087
3088 static ssize_t
3089 sync_speed_show(mddev_t *mddev, char *page)
3090 {
3091         unsigned long resync, dt, db;
3092         resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
3093         dt = ((jiffies - mddev->resync_mark) / HZ);
3094         if (!dt) dt++;
3095         db = resync - (mddev->resync_mark_cnt);
3096         return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
3097 }
3098
3099 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3100
3101 static ssize_t
3102 sync_completed_show(mddev_t *mddev, char *page)
3103 {
3104         unsigned long max_blocks, resync;
3105
3106         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3107                 max_blocks = mddev->resync_max_sectors;
3108         else
3109                 max_blocks = mddev->size << 1;
3110
3111         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
3112         return sprintf(page, "%lu / %lu\n", resync, max_blocks);
3113 }
3114
3115 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3116
3117 static ssize_t
3118 min_sync_show(mddev_t *mddev, char *page)
3119 {
3120         return sprintf(page, "%llu\n",
3121                        (unsigned long long)mddev->resync_min);
3122 }
3123 static ssize_t
3124 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3125 {
3126         unsigned long long min;
3127         if (strict_strtoull(buf, 10, &min))
3128                 return -EINVAL;
3129         if (min > mddev->resync_max)
3130                 return -EINVAL;
3131         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3132                 return -EBUSY;
3133
3134         /* Must be a multiple of chunk_size */
3135         if (mddev->chunk_size) {
3136                 if (min & (sector_t)((mddev->chunk_size>>9)-1))
3137                         return -EINVAL;
3138         }
3139         mddev->resync_min = min;
3140
3141         return len;
3142 }
3143
3144 static struct md_sysfs_entry md_min_sync =
3145 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3146
3147 static ssize_t
3148 max_sync_show(mddev_t *mddev, char *page)
3149 {
3150         if (mddev->resync_max == MaxSector)
3151                 return sprintf(page, "max\n");
3152         else
3153                 return sprintf(page, "%llu\n",
3154                                (unsigned long long)mddev->resync_max);
3155 }
3156 static ssize_t
3157 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3158 {
3159         if (strncmp(buf, "max", 3) == 0)
3160                 mddev->resync_max = MaxSector;
3161         else {
3162                 unsigned long long max;
3163                 if (strict_strtoull(buf, 10, &max))
3164                         return -EINVAL;
3165                 if (max < mddev->resync_min)
3166                         return -EINVAL;
3167                 if (max < mddev->resync_max &&
3168                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3169                         return -EBUSY;
3170
3171                 /* Must be a multiple of chunk_size */
3172                 if (mddev->chunk_size) {
3173                         if (max & (sector_t)((mddev->chunk_size>>9)-1))
3174                                 return -EINVAL;
3175                 }
3176                 mddev->resync_max = max;
3177         }
3178         wake_up(&mddev->recovery_wait);
3179         return len;
3180 }
3181
3182 static struct md_sysfs_entry md_max_sync =
3183 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3184
3185 static ssize_t
3186 suspend_lo_show(mddev_t *mddev, char *page)
3187 {
3188         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3189 }
3190
3191 static ssize_t
3192 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3193 {
3194         char *e;
3195         unsigned long long new = simple_strtoull(buf, &e, 10);
3196
3197         if (mddev->pers->quiesce == NULL)
3198                 return -EINVAL;
3199         if (buf == e || (*e && *e != '\n'))
3200                 return -EINVAL;
3201         if (new >= mddev->suspend_hi ||
3202             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3203                 mddev->suspend_lo = new;
3204                 mddev->pers->quiesce(mddev, 2);
3205                 return len;
3206         } else
3207                 return -EINVAL;
3208 }
3209 static struct md_sysfs_entry md_suspend_lo =
3210 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3211
3212
3213 static ssize_t
3214 suspend_hi_show(mddev_t *mddev, char *page)
3215 {
3216         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3217 }
3218
3219 static ssize_t
3220 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3221 {
3222         char *e;
3223         unsigned long long new = simple_strtoull(buf, &e, 10);
3224
3225         if (mddev->pers->quiesce == NULL)
3226                 return -EINVAL;
3227         if (buf == e || (*e && *e != '\n'))
3228                 return -EINVAL;
3229         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3230             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3231                 mddev->suspend_hi = new;
3232                 mddev->pers->quiesce(mddev, 1);
3233                 mddev->pers->quiesce(mddev, 0);
3234                 return len;
3235         } else
3236                 return -EINVAL;
3237 }
3238 static struct md_sysfs_entry md_suspend_hi =
3239 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3240
3241 static ssize_t
3242 reshape_position_show(mddev_t *mddev, char *page)
3243 {
3244         if (mddev->reshape_position != MaxSector)
3245                 return sprintf(page, "%llu\n",
3246                                (unsigned long long)mddev->reshape_position);
3247         strcpy(page, "none\n");
3248         return 5;
3249 }
3250
3251 static ssize_t
3252 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3253 {
3254         char *e;
3255         unsigned long long new = simple_strtoull(buf, &e, 10);
3256         if (mddev->pers)
3257                 return -EBUSY;
3258         if (buf == e || (*e && *e != '\n'))
3259                 return -EINVAL;
3260         mddev->reshape_position = new;
3261         mddev->delta_disks = 0;
3262         mddev->new_level = mddev->level;
3263         mddev->new_layout = mddev->layout;
3264         mddev->new_chunk = mddev->chunk_size;
3265         return len;
3266 }
3267
3268 static struct md_sysfs_entry md_reshape_position =
3269 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
3270        reshape_position_store);
3271
3272
3273 static struct attribute *md_default_attrs[] = {
3274         &md_level.attr,
3275         &md_layout.attr,
3276         &md_raid_disks.attr,
3277         &md_chunk_size.attr,
3278         &md_size.attr,
3279         &md_resync_start.attr,
3280         &md_metadata.attr,
3281         &md_new_device.attr,
3282         &md_safe_delay.attr,
3283         &md_array_state.attr,
3284         &md_reshape_position.attr,
3285         NULL,
3286 };
3287
3288 static struct attribute *md_redundancy_attrs[] = {
3289         &md_scan_mode.attr,
3290         &md_mismatches.attr,
3291         &md_sync_min.attr,
3292         &md_sync_max.attr,
3293         &md_sync_speed.attr,
3294         &md_sync_force_parallel.attr,
3295         &md_sync_completed.attr,
3296         &md_min_sync.attr,
3297         &md_max_sync.attr,
3298         &md_suspend_lo.attr,
3299         &md_suspend_hi.attr,
3300         &md_bitmap.attr,
3301         &md_degraded.attr,
3302         NULL,
3303 };
3304 static struct attribute_group md_redundancy_group = {
3305         .name = NULL,
3306         .attrs = md_redundancy_attrs,
3307 };
3308
3309
3310 static ssize_t
3311 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3312 {
3313         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3314         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3315         ssize_t rv;
3316
3317         if (!entry->show)
3318                 return -EIO;
3319         rv = mddev_lock(mddev);
3320         if (!rv) {
3321                 rv = entry->show(mddev, page);
3322                 mddev_unlock(mddev);
3323         }
3324         return rv;
3325 }
3326
3327 static ssize_t
3328 md_attr_store(struct kobject *kobj, struct attribute *attr,
3329               const char *page, size_t length)
3330 {
3331         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3332         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3333         ssize_t rv;
3334
3335         if (!entry->store)
3336                 return -EIO;
3337         if (!capable(CAP_SYS_ADMIN))
3338                 return -EACCES;
3339         rv = mddev_lock(mddev);
3340         if (!rv) {
3341                 rv = entry->store(mddev, page, length);
3342                 mddev_unlock(mddev);
3343         }
3344         return rv;
3345 }
3346
3347 static void md_free(struct kobject *ko)
3348 {
3349         mddev_t *mddev = container_of(ko, mddev_t, kobj);
3350         kfree(mddev);
3351 }
3352
3353 static struct sysfs_ops md_sysfs_ops = {
3354         .show   = md_attr_show,
3355         .store  = md_attr_store,
3356 };
3357 static struct kobj_type md_ktype = {
3358         .release        = md_free,
3359         .sysfs_ops      = &md_sysfs_ops,
3360         .default_attrs  = md_default_attrs,
3361 };
3362
3363 int mdp_major = 0;
3364
3365 static struct kobject *md_probe(dev_t dev, int *part, void *data)
3366 {
3367         static DEFINE_MUTEX(disks_mutex);
3368         mddev_t *mddev = mddev_find(dev);
3369         struct gendisk *disk;
3370         int partitioned = (MAJOR(dev) != MD_MAJOR);
3371         int shift = partitioned ? MdpMinorShift : 0;
3372         int unit = MINOR(dev) >> shift;
3373         int error;
3374
3375         if (!mddev)
3376                 return NULL;
3377
3378         mutex_lock(&disks_mutex);
3379         if (mddev->gendisk) {
3380                 mutex_unlock(&disks_mutex);
3381                 mddev_put(mddev);
3382                 return NULL;
3383         }
3384         disk = alloc_disk(1 << shift);
3385         if (!disk) {
3386                 mutex_unlock(&disks_mutex);
3387                 mddev_put(mddev);
3388                 return NULL;
3389         }
3390         disk->major = MAJOR(dev);
3391         disk->first_minor = unit << shift;
3392         if (partitioned)
3393                 sprintf(disk->disk_name, "md_d%d", unit);
3394         else
3395                 sprintf(disk->disk_name, "md%d", unit);
3396         disk->fops = &md_fops;
3397         disk->private_data = mddev;
3398         disk->queue = mddev->queue;
3399         add_disk(disk);
3400         mddev->gendisk = disk;
3401         error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
3402                                      "%s", "md");
3403         mutex_unlock(&disks_mutex);
3404         if (error)
3405                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3406                        disk->disk_name);
3407         else
3408                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3409         return NULL;
3410 }
3411
3412 static void md_safemode_timeout(unsigned long data)
3413 {
3414         mddev_t *mddev = (mddev_t *) data;
3415
3416         if (!atomic_read(&mddev->writes_pending)) {
3417                 mddev->safemode = 1;
3418                 if (mddev->external)
3419                         sysfs_notify(&mddev->kobj, NULL, "array_state");
3420         }
3421         md_wakeup_thread(mddev->thread);
3422 }
3423
3424 static int start_dirty_degraded;
3425
3426 static int do_md_run(mddev_t * mddev)
3427 {
3428         int err;
3429         int chunk_size;
3430         struct list_head *tmp;
3431         mdk_rdev_t *rdev;
3432         struct gendisk *disk;
3433         struct mdk_personality *pers;
3434         char b[BDEVNAME_SIZE];
3435
3436         if (list_empty(&mddev->disks))
3437                 /* cannot run an array with no devices.. */
3438                 return -EINVAL;
3439
3440         if (mddev->pers)
3441                 return -EBUSY;
3442
3443         /*
3444          * Analyze all RAID superblock(s)
3445          */
3446         if (!mddev->raid_disks) {
3447                 if (!mddev->persistent)
3448                         return -EINVAL;
3449                 analyze_sbs(mddev);
3450         }
3451
3452         chunk_size = mddev->chunk_size;
3453
3454         if (chunk_size) {
3455                 if (chunk_size > MAX_CHUNK_SIZE) {
3456                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
3457                                 chunk_size, MAX_CHUNK_SIZE);
3458                         return -EINVAL;
3459                 }
3460                 /*
3461                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3462                  */
3463                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3464                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3465                         return -EINVAL;
3466                 }
3467                 if (chunk_size < PAGE_SIZE) {
3468                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3469                                 chunk_size, PAGE_SIZE);
3470                         return -EINVAL;
3471                 }
3472
3473                 /* devices must have minimum size of one chunk */
3474                 rdev_for_each(rdev, tmp, mddev) {
3475                         if (test_bit(Faulty, &rdev->flags))
3476                                 continue;
3477                         if (rdev->size < chunk_size / 1024) {
3478                                 printk(KERN_WARNING
3479                                         "md: Dev %s smaller than chunk_size:"
3480                                         " %lluk < %dk\n",
3481                                         bdevname(rdev->bdev,b),
3482                                         (unsigned long long)rdev->size,
3483                                         chunk_size / 1024);
3484                                 return -EINVAL;
3485                         }
3486                 }
3487         }
3488
3489 #ifdef CONFIG_KMOD
3490         if (mddev->level != LEVEL_NONE)
3491                 request_module("md-level-%d", mddev->level);
3492         else if (mddev->clevel[0])
3493                 request_module("md-%s", mddev->clevel);
3494 #endif
3495
3496         /*
3497          * Drop all container device buffers, from now on
3498          * the only valid external interface is through the md
3499          * device.
3500          */
3501         rdev_for_each(rdev, tmp, mddev) {
3502                 if (test_bit(Faulty, &rdev->flags))
3503                         continue;
3504                 sync_blockdev(rdev->bdev);
3505                 invalidate_bdev(rdev->bdev);
3506
3507                 /* perform some consistency tests on the device.
3508                  * We don't want the data to overlap the metadata,
3509                  * Internal Bitmap issues has handled elsewhere.
3510                  */
3511                 if (rdev->data_offset < rdev->sb_offset) {
3512                         if (mddev->size &&
3513                             rdev->data_offset + mddev->size*2
3514                             > rdev->sb_offset*2) {
3515                                 printk("md: %s: data overlaps metadata\n",
3516                                        mdname(mddev));
3517                                 return -EINVAL;
3518                         }
3519                 } else {
3520                         if (rdev->sb_offset*2 + rdev->sb_size/512
3521                             > rdev->data_offset) {
3522                                 printk("md: %s: metadata overlaps data\n",
3523                                        mdname(mddev));
3524                                 return -EINVAL;
3525                         }
3526                 }
3527         }
3528
3529         md_probe(mddev->unit, NULL, NULL);
3530         disk = mddev->gendisk;
3531         if (!disk)
3532                 return -ENOMEM;
3533
3534         spin_lock(&pers_lock);
3535         pers = find_pers(mddev->level, mddev->clevel);
3536         if (!pers || !try_module_get(pers->owner)) {
3537                 spin_unlock(&pers_lock);
3538                 if (mddev->level != LEVEL_NONE)
3539                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3540                                mddev->level);
3541                 else
3542                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3543                                mddev->clevel);
3544                 return -EINVAL;
3545         }
3546         mddev->pers = pers;
3547         spin_unlock(&pers_lock);
3548         mddev->level = pers->level;
3549         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3550
3551         if (mddev->reshape_position != MaxSector &&
3552             pers->start_reshape == NULL) {
3553                 /* This personality cannot handle reshaping... */
3554                 mddev->pers = NULL;
3555                 module_put(pers->owner);
3556                 return -EINVAL;
3557         }
3558
3559         if (pers->sync_request) {
3560                 /* Warn if this is a potentially silly
3561                  * configuration.
3562                  */
3563                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3564                 mdk_rdev_t *rdev2;
3565                 struct list_head *tmp2;
3566                 int warned = 0;
3567                 rdev_for_each(rdev, tmp, mddev) {
3568                         rdev_for_each(rdev2, tmp2, mddev) {
3569                                 if (rdev < rdev2 &&
3570                                     rdev->bdev->bd_contains ==
3571                                     rdev2->bdev->bd_contains) {
3572                                         printk(KERN_WARNING
3573                                                "%s: WARNING: %s appears to be"
3574                                                " on the same physical disk as"
3575                                                " %s.\n",
3576                                                mdname(mddev),
3577                                                bdevname(rdev->bdev,b),
3578                                                bdevname(rdev2->bdev,b2));
3579                                         warned = 1;
3580                                 }
3581                         }
3582                 }
3583                 if (warned)
3584                         printk(KERN_WARNING
3585                                "True protection against single-disk"
3586                                " failure might be compromised.\n");
3587         }
3588
3589         mddev->recovery = 0;
3590         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3591         mddev->barriers_work = 1;
3592         mddev->ok_start_degraded = start_dirty_degraded;
3593
3594         if (start_readonly)
3595                 mddev->ro = 2; /* read-only, but switch on first write */
3596
3597         err = mddev->pers->run(mddev);
3598         if (!err && mddev->pers->sync_request) {
3599                 err = bitmap_create(mddev);
3600                 if (err) {
3601                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3602                                mdname(mddev), err);
3603                         mddev->pers->stop(mddev);
3604                 }
3605         }
3606         if (err) {
3607                 printk(KERN_ERR "md: pers->run() failed ...\n");
3608                 module_put(mddev->pers->owner);
3609                 mddev->pers = NULL;
3610                 bitmap_destroy(mddev);
3611                 return err;
3612         }
3613         if (mddev->pers->sync_request) {
3614                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3615                         printk(KERN_WARNING
3616                                "md: cannot register extra attributes for %s\n",
3617                                mdname(mddev));
3618         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
3619                 mddev->ro = 0;
3620
3621         atomic_set(&mddev->writes_pending,0);
3622         mddev->safemode = 0;
3623         mddev->safemode_timer.function = md_safemode_timeout;
3624         mddev->safemode_timer.data = (unsigned long) mddev;
3625         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3626         mddev->in_sync = 1;
3627
3628         rdev_for_each(rdev, tmp, mddev)
3629                 if (rdev->raid_disk >= 0) {
3630                         char nm[20];
3631                         sprintf(nm, "rd%d", rdev->raid_disk);
3632                         if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3633                                 printk("md: cannot register %s for %s\n",
3634                                        nm, mdname(mddev));
3635                 }
3636         
3637         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3638         
3639         if (mddev->flags)
3640                 md_update_sb(mddev, 0);
3641
3642         set_capacity(disk, mddev->array_size<<1);
3643
3644         /* If we call blk_queue_make_request here, it will
3645          * re-initialise max_sectors etc which may have been
3646          * refined inside -> run.  So just set the bits we need to set.
3647          * Most initialisation happended when we called
3648          * blk_queue_make_request(..., md_fail_request)
3649          * earlier.
3650          */
3651         mddev->queue->queuedata = mddev;
3652         mddev->queue->make_request_fn = mddev->pers->make_request;
3653
3654         /* If there is a partially-recovered drive we need to
3655          * start recovery here.  If we leave it to md_check_recovery,
3656          * it will remove the drives and not do the right thing
3657          */
3658         if (mddev->degraded && !mddev->sync_thread) {
3659                 struct list_head *rtmp;
3660                 int spares = 0;
3661                 rdev_for_each(rdev, rtmp, mddev)
3662                         if (rdev->raid_disk >= 0 &&
3663                             !test_bit(In_sync, &rdev->flags) &&
3664                             !test_bit(Faulty, &rdev->flags))
3665                                 /* complete an interrupted recovery */
3666                                 spares++;
3667                 if (spares && mddev->pers->sync_request) {
3668                         mddev->recovery = 0;
3669                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3670                         mddev->sync_thread = md_register_thread(md_do_sync,
3671                                                                 mddev,
3672                                                                 "%s_resync");
3673                         if (!mddev->sync_thread) {
3674                                 printk(KERN_ERR "%s: could not start resync"
3675                                        " thread...\n",
3676                                        mdname(mddev));
3677                                 /* leave the spares where they are, it shouldn't hurt */
3678                                 mddev->recovery = 0;
3679                         }
3680                 }
3681         }
3682         md_wakeup_thread(mddev->thread);
3683         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3684
3685         mddev->changed = 1;
3686         md_new_event(mddev);
3687         sysfs_notify(&mddev->kobj, NULL, "array_state");
3688         sysfs_notify(&mddev->kobj, NULL, "sync_action");
3689         kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
3690         return 0;
3691 }
3692
3693 static int restart_array(mddev_t *mddev)
3694 {
3695         struct gendisk *disk = mddev->gendisk;
3696         int err;
3697
3698         /*
3699          * Complain if it has no devices
3700          */
3701         err = -ENXIO;
3702         if (list_empty(&mddev->disks))
3703                 goto out;
3704
3705         if (mddev->pers) {
3706                 err = -EBUSY;
3707                 if (!mddev->ro)
3708                         goto out;
3709
3710                 mddev->safemode = 0;
3711                 mddev->ro = 0;
3712                 set_disk_ro(disk, 0);
3713
3714                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3715                         mdname(mddev));
3716                 /*
3717                  * Kick recovery or resync if necessary
3718                  */
3719                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3720                 md_wakeup_thread(mddev->thread);
3721                 md_wakeup_thread(mddev->sync_thread);
3722                 err = 0;
3723                 sysfs_notify(&mddev->kobj, NULL, "array_state");
3724
3725         } else
3726                 err = -EINVAL;
3727
3728 out:
3729         return err;
3730 }
3731
3732 /* similar to deny_write_access, but accounts for our holding a reference
3733  * to the file ourselves */
3734 static int deny_bitmap_write_access(struct file * file)
3735 {
3736         struct inode *inode = file->f_mapping->host;
3737
3738         spin_lock(&inode->i_lock);
3739         if (atomic_read(&inode->i_writecount) > 1) {
3740                 spin_unlock(&inode->i_lock);
3741                 return -ETXTBSY;
3742         }
3743         atomic_set(&inode->i_writecount, -1);
3744         spin_unlock(&inode->i_lock);
3745
3746         return 0;
3747 }
3748
3749 static void restore_bitmap_write_access(struct file *file)
3750 {
3751         struct inode *inode = file->f_mapping->host;
3752
3753         spin_lock(&inode->i_lock);
3754         atomic_set(&inode->i_writecount, 1);
3755         spin_unlock(&inode->i_lock);
3756 }
3757
3758 /* mode:
3759  *   0 - completely stop and dis-assemble array
3760  *   1 - switch to readonly
3761  *   2 - stop but do not disassemble array
3762  */
3763 static int do_md_stop(mddev_t * mddev, int mode)
3764 {
3765         int err = 0;
3766         struct gendisk *disk = mddev->gendisk;
3767
3768         if (mddev->pers) {
3769                 if (atomic_read(&mddev->active)>2) {
3770                         printk("md: %s still in use.\n",mdname(mddev));
3771                         return -EBUSY;
3772                 }
3773
3774                 if (mddev->sync_thread) {
3775                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3776                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3777                         md_unregister_thread(mddev->sync_thread);
3778                         mddev->sync_thread = NULL;
3779                 }
3780
3781                 del_timer_sync(&mddev->safemode_timer);
3782
3783                 invalidate_partition(disk, 0);
3784
3785                 switch(mode) {
3786                 case 1: /* readonly */
3787                         err  = -ENXIO;
3788                         if (mddev->ro==1)
3789                                 goto out;
3790                         mddev->ro = 1;
3791                         break;
3792                 case 0: /* disassemble */
3793                 case 2: /* stop */
3794                         bitmap_flush(mddev);
3795                         md_super_wait(mddev);
3796                         if (mddev->ro)
3797                                 set_disk_ro(disk, 0);
3798                         blk_queue_make_request(mddev->queue, md_fail_request);
3799                         mddev->pers->stop(mddev);
3800                         mddev->queue->merge_bvec_fn = NULL;
3801                         mddev->queue->unplug_fn = NULL;
3802                         mddev->queue->backing_dev_info.congested_fn = NULL;
3803                         if (mddev->pers->sync_request)
3804                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3805
3806                         module_put(mddev->pers->owner);
3807                         mddev->pers = NULL;
3808                         /* tell userspace to handle 'inactive' */
3809                         sysfs_notify(&mddev->kobj, NULL, "array_state");
3810
3811                         set_capacity(disk, 0);
3812                         mddev->changed = 1;
3813
3814                         if (mddev->ro)
3815                                 mddev->ro = 0;
3816                 }
3817                 if (!mddev->in_sync || mddev->flags) {
3818                         /* mark array as shutdown cleanly */
3819                         mddev->in_sync = 1;
3820                         md_update_sb(mddev, 1);
3821                 }
3822                 if (mode == 1)
3823                         set_disk_ro(disk, 1);
3824                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3825         }
3826
3827         /*
3828          * Free resources if final stop
3829          */
3830         if (mode == 0) {
3831                 mdk_rdev_t *rdev;
3832                 struct list_head *tmp;
3833
3834                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3835
3836                 bitmap_destroy(mddev);
3837                 if (mddev->bitmap_file) {
3838                         restore_bitmap_write_access(mddev->bitmap_file);
3839                         fput(mddev->bitmap_file);
3840                         mddev->bitmap_file = NULL;
3841                 }
3842                 mddev->bitmap_offset = 0;
3843
3844                 rdev_for_each(rdev, tmp, mddev)
3845                         if (rdev->raid_disk >= 0) {
3846                                 char nm[20];
3847                                 sprintf(nm, "rd%d", rdev->raid_disk);
3848                                 sysfs_remove_link(&mddev->kobj, nm);
3849                         }
3850
3851                 /* make sure all md_delayed_delete calls have finished */
3852                 flush_scheduled_work();
3853
3854                 export_array(mddev);
3855
3856                 mddev->array_size = 0;
3857                 mddev->size = 0;
3858                 mddev->raid_disks = 0;
3859                 mddev->recovery_cp = 0;
3860                 mddev->resync_min = 0;
3861                 mddev->resync_max = MaxSector;
3862                 mddev->reshape_position = MaxSector;
3863                 mddev->external = 0;
3864                 mddev->persistent = 0;
3865                 mddev->level = LEVEL_NONE;
3866                 mddev->clevel[0] = 0;
3867                 mddev->flags = 0;
3868                 mddev->ro = 0;
3869                 mddev->metadata_type[0] = 0;
3870                 mddev->chunk_size = 0;
3871                 mddev->ctime = mddev->utime = 0;
3872                 mddev->layout = 0;
3873                 mddev->max_disks = 0;
3874                 mddev->events = 0;
3875                 mddev->delta_disks = 0;
3876                 mddev->new_level = LEVEL_NONE;
3877                 mddev->new_layout = 0;
3878                 mddev->new_chunk = 0;
3879                 mddev->curr_resync = 0;
3880                 mddev->resync_mismatches = 0;
3881                 mddev->suspend_lo = mddev->suspend_hi = 0;
3882                 mddev->sync_speed_min = mddev->sync_speed_max = 0;
3883                 mddev->recovery = 0;
3884                 mddev->in_sync = 0;
3885                 mddev->changed = 0;
3886                 mddev->degraded = 0;
3887                 mddev->barriers_work = 0;
3888                 mddev->safemode = 0;
3889
3890         } else if (mddev->pers)
3891                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3892                         mdname(mddev));
3893         err = 0;
3894         md_new_event(mddev);
3895         sysfs_notify(&mddev->kobj, NULL, "array_state");
3896 out:
3897         return err;
3898 }
3899
3900 #ifndef MODULE
3901 static void autorun_array(mddev_t *mddev)
3902 {
3903         mdk_rdev_t *rdev;
3904         struct list_head *tmp;
3905         int err;
3906
3907         if (list_empty(&mddev->disks))
3908                 return;
3909
3910         printk(KERN_INFO "md: running: ");
3911
3912         rdev_for_each(rdev, tmp, mddev) {
3913                 char b[BDEVNAME_SIZE];
3914                 printk("<%s>", bdevname(rdev->bdev,b));
3915         }
3916         printk("\n");
3917
3918         err = do_md_run (mddev);
3919         if (err) {
3920                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3921                 do_md_stop (mddev, 0);
3922         }
3923 }
3924
3925 /*
3926  * lets try to run arrays based on all disks that have arrived
3927  * until now. (those are in pending_raid_disks)
3928  *
3929  * the method: pick the first pending disk, collect all disks with
3930  * the same UUID, remove all from the pending list and put them into
3931  * the 'same_array' list. Then order this list based on superblock
3932  * update time (freshest comes first), kick out 'old' disks and
3933  * compare superblocks. If everything's fine then run it.
3934  *
3935  * If "unit" is allocated, then bump its reference count
3936  */
3937 static void autorun_devices(int part)
3938 {
3939         struct list_head *tmp;
3940         mdk_rdev_t *rdev0, *rdev;
3941         mddev_t *mddev;
3942         char b[BDEVNAME_SIZE];
3943
3944         printk(KERN_INFO "md: autorun ...\n");
3945         while (!list_empty(&pending_raid_disks)) {
3946                 int unit;
3947                 dev_t dev;
3948                 LIST_HEAD(candidates);
3949                 rdev0 = list_entry(pending_raid_disks.next,
3950                                          mdk_rdev_t, same_set);
3951
3952                 printk(KERN_INFO "md: considering %s ...\n",
3953                         bdevname(rdev0->bdev,b));
3954                 INIT_LIST_HEAD(&candidates);
3955                 rdev_for_each_list(rdev, tmp, pending_raid_disks)
3956                         if (super_90_load(rdev, rdev0, 0) >= 0) {
3957                                 printk(KERN_INFO "md:  adding %s ...\n",
3958                                         bdevname(rdev->bdev,b));
3959                                 list_move(&rdev->same_set, &candidates);
3960                         }
3961                 /*
3962                  * now we have a set of devices, with all of them having
3963                  * mostly sane superblocks. It's time to allocate the
3964                  * mddev.
3965                  */
3966                 if (part) {
3967                         dev = MKDEV(mdp_major,
3968                                     rdev0->preferred_minor << MdpMinorShift);
3969                         unit = MINOR(dev) >> MdpMinorShift;
3970                 } else {
3971                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3972                         unit = MINOR(dev);
3973                 }
3974                 if (rdev0->preferred_minor != unit) {
3975                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3976                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3977                         break;
3978                 }
3979
3980                 md_probe(dev, NULL, NULL);
3981                 mddev = mddev_find(dev);
3982                 if (!mddev || !mddev->gendisk) {
3983                         if (mddev)
3984                                 mddev_put(mddev);
3985                         printk(KERN_ERR
3986                                 "md: cannot allocate memory for md drive.\n");
3987                         break;
3988                 }
3989                 if (mddev_lock(mddev)) 
3990                         printk(KERN_WARNING "md: %s locked, cannot run\n",
3991                                mdname(mddev));
3992                 else if (mddev->raid_disks || mddev->major_version
3993                          || !list_empty(&mddev->disks)) {
3994                         printk(KERN_WARNING 
3995                                 "md: %s already running, cannot run %s\n",
3996                                 mdname(mddev), bdevname(rdev0->bdev,b));
3997                         mddev_unlock(mddev);
3998                 } else {
3999                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
4000                         mddev->persistent = 1;
4001                         rdev_for_each_list(rdev, tmp, candidates) {
4002                                 list_del_init(&rdev->same_set);
4003                                 if (bind_rdev_to_array(rdev, mddev))
4004                                         export_rdev(rdev);
4005                         }
4006                         autorun_array(mddev);
4007                         mddev_unlock(mddev);
4008                 }
4009                 /* on success, candidates will be empty, on error
4010                  * it won't...
4011                  */
4012                 rdev_for_each_list(rdev, tmp, candidates)
4013                         export_rdev(rdev);
4014                 mddev_put(mddev);
4015         }
4016         printk(KERN_INFO "md: ... autorun DONE.\n");
4017 }
4018 #endif /* !MODULE */
4019
4020 static int get_version(void __user * arg)
4021 {
4022         mdu_version_t ver;
4023
4024         ver.major = MD_MAJOR_VERSION;
4025         ver.minor = MD_MINOR_VERSION;
4026         ver.patchlevel = MD_PATCHLEVEL_VERSION;
4027
4028         if (copy_to_user(arg, &ver, sizeof(ver)))
4029                 return -EFAULT;
4030
4031         return 0;
4032 }
4033
4034 static int get_array_info(mddev_t * mddev, void __user * arg)
4035 {
4036         mdu_array_info_t info;
4037         int nr,working,active,failed,spare;
4038         mdk_rdev_t *rdev;
4039         struct list_head *tmp;
4040
4041         nr=working=active=failed=spare=0;
4042         rdev_for_each(rdev, tmp, mddev) {
4043                 nr++;
4044                 if (test_bit(Faulty, &rdev->flags))
4045                         failed++;
4046                 else {
4047                         working++;
4048                         if (test_bit(In_sync, &rdev->flags))
4049                                 active++;       
4050                         else
4051                                 spare++;
4052                 }
4053         }
4054
4055         info.major_version = mddev->major_version;
4056         info.minor_version = mddev->minor_version;
4057         info.patch_version = MD_PATCHLEVEL_VERSION;
4058         info.ctime         = mddev->ctime;
4059         info.level         = mddev->level;
4060         info.size          = mddev->size;
4061         if (info.size != mddev->size) /* overflow */
4062                 info.size = -1;
4063         info.nr_disks      = nr;
4064         info.raid_disks    = mddev->raid_disks;
4065         info.md_minor      = mddev->md_minor;
4066         info.not_persistent= !mddev->persistent;
4067
4068         info.utime         = mddev->utime;
4069         info.state         = 0;
4070         if (mddev->in_sync)
4071                 info.state = (1<<MD_SB_CLEAN);
4072         if (mddev->bitmap && mddev->bitmap_offset)
4073                 info.state = (1<<MD_SB_BITMAP_PRESENT);
4074         info.active_disks  = active;
4075         info.working_disks = working;
4076         info.failed_disks  = failed;
4077         info.spare_disks   = spare;
4078
4079         info.layout        = mddev->layout;
4080         info.chunk_size    = mddev->chunk_size;
4081
4082         if (copy_to_user(arg, &info, sizeof(info)))
4083                 return -EFAULT;
4084
4085         return 0;
4086 }
4087
4088 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4089 {
4090         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4091         char *ptr, *buf = NULL;
4092         int err = -ENOMEM;
4093
4094         md_allow_write(mddev);
4095
4096         file = kmalloc(sizeof(*file), GFP_KERNEL);
4097         if (!file)
4098                 goto out;
4099
4100         /* bitmap disabled, zero the first byte and copy out */
4101         if (!mddev->bitmap || !mddev->bitmap->file) {
4102                 file->pathname[0] = '\0';
4103                 goto copy_out;
4104         }
4105
4106         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4107         if (!buf)
4108                 goto out;
4109
4110         ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4111         if (IS_ERR(ptr))
4112                 goto out;
4113
4114         strcpy(file->pathname, ptr);
4115
4116 copy_out:
4117         err = 0;
4118         if (copy_to_user(arg, file, sizeof(*file)))
4119                 err = -EFAULT;
4120 out:
4121         kfree(buf);
4122         kfree(file);
4123         return err;
4124 }
4125
4126 static int get_disk_info(mddev_t * mddev, void __user * arg)
4127 {
4128         mdu_disk_info_t info;
4129         unsigned int nr;
4130         mdk_rdev_t *rdev;
4131
4132         if (copy_from_user(&info, arg, sizeof(info)))
4133                 return -EFAULT;
4134
4135         nr = info.number;
4136
4137         rdev = find_rdev_nr(mddev, nr);
4138         if (rdev) {
4139                 info.major = MAJOR(rdev->bdev->bd_dev);
4140                 info.minor = MINOR(rdev->bdev->bd_dev);
4141                 info.raid_disk = rdev->raid_disk;
4142                 info.state = 0;
4143                 if (test_bit(Faulty, &rdev->flags))
4144                         info.state |= (1<<MD_DISK_FAULTY);
4145                 else if (test_bit(In_sync, &rdev->flags)) {
4146                         info.state |= (1<<MD_DISK_ACTIVE);
4147                         info.state |= (1<<MD_DISK_SYNC);
4148                 }
4149                 if (test_bit(WriteMostly, &rdev->flags))
4150                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
4151         } else {
4152                 info.major = info.minor = 0;
4153                 info.raid_disk = -1;
4154                 info.state = (1<<MD_DISK_REMOVED);
4155         }
4156
4157         if (copy_to_user(arg, &info, sizeof(info)))
4158                 return -EFAULT;
4159
4160         return 0;
4161 }
4162
4163 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4164 {
4165         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4166         mdk_rdev_t *rdev;
4167         dev_t dev = MKDEV(info->major,info->minor);
4168
4169         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
4170                 return -EOVERFLOW;
4171
4172         if (!mddev->raid_disks) {
4173                 int err;
4174                 /* expecting a device which has a superblock */
4175                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
4176                 if (IS_ERR(rdev)) {
4177                         printk(KERN_WARNING 
4178                                 "md: md_import_device returned %ld\n",
4179                                 PTR_ERR(rdev));
4180                         return PTR_ERR(rdev);
4181                 }
4182                 if (!list_empty(&mddev->disks)) {
4183                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
4184                                                         mdk_rdev_t, same_set);
4185                         int err = super_types[mddev->major_version]
4186                                 .load_super(rdev, rdev0, mddev->minor_version);
4187                         if (err < 0) {
4188                                 printk(KERN_WARNING 
4189                                         "md: %s has different UUID to %s\n",
4190                                         bdevname(rdev->bdev,b), 
4191                                         bdevname(rdev0->bdev,b2));
4192                                 export_rdev(rdev);
4193                                 return -EINVAL;
4194                         }
4195                 }
4196                 err = bind_rdev_to_array(rdev, mddev);
4197                 if (err)
4198                         export_rdev(rdev);
4199                 return err;
4200         }
4201
4202         /*
4203          * add_new_disk can be used once the array is assembled
4204          * to add "hot spares".  They must already have a superblock
4205          * written
4206          */
4207         if (mddev->pers) {
4208                 int err;
4209                 if (!mddev->pers->hot_add_disk) {
4210                         printk(KERN_WARNING 
4211                                 "%s: personality does not support diskops!\n",
4212                                mdname(mddev));
4213                         return -EINVAL;
4214                 }
4215                 if (mddev->persistent)
4216                         rdev = md_import_device(dev, mddev->major_version,
4217                                                 mddev->minor_version);
4218                 else
4219                         rdev = md_import_device(dev, -1, -1);
4220                 if (IS_ERR(rdev)) {
4221                         printk(KERN_WARNING 
4222                                 "md: md_import_device returned %ld\n",
4223                                 PTR_ERR(rdev));
4224                         return PTR_ERR(rdev);
4225                 }
4226                 /* set save_raid_disk if appropriate */
4227                 if (!mddev->persistent) {
4228                         if (info->state & (1<<MD_DISK_SYNC)  &&
4229                             info->raid_disk < mddev->raid_disks)
4230                                 rdev->raid_disk = info->raid_disk;
4231                         else
4232                                 rdev->raid_disk = -1;
4233                 } else
4234                         super_types[mddev->major_version].
4235                                 validate_super(mddev, rdev);
4236                 rdev->saved_raid_disk = rdev->raid_disk;
4237
4238                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
4239                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4240                         set_bit(WriteMostly, &rdev->flags);
4241
4242                 rdev->raid_disk = -1;
4243                 err = bind_rdev_to_array(rdev, mddev);
4244                 if (!err && !mddev->pers->hot_remove_disk) {
4245                         /* If there is hot_add_disk but no hot_remove_disk
4246                          * then added disks for geometry changes,
4247                          * and should be added immediately.
4248                          */
4249                         super_types[mddev->major_version].
4250                                 validate_super(mddev, rdev);
4251                         err = mddev->pers->hot_add_disk(mddev, rdev);
4252                         if (err)
4253                                 unbind_rdev_from_array(rdev);
4254                 }
4255                 if (err)
4256                         export_rdev(rdev);
4257
4258                 md_update_sb(mddev, 1);
4259                 if (mddev->degraded)
4260                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4261                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4262                 md_wakeup_thread(mddev->thread);
4263                 return err;
4264         }
4265
4266         /* otherwise, add_new_disk is only allowed
4267          * for major_version==0 superblocks
4268          */
4269         if (mddev->major_version != 0) {
4270                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
4271                        mdname(mddev));
4272                 return -EINVAL;
4273         }
4274
4275         if (!(info->state & (1<<MD_DISK_FAULTY))) {
4276                 int err;
4277                 rdev = md_import_device (dev, -1, 0);
4278                 if (IS_ERR(rdev)) {
4279                         printk(KERN_WARNING 
4280                                 "md: error, md_import_device() returned %ld\n",
4281                                 PTR_ERR(rdev));
4282                         return PTR_ERR(rdev);
4283                 }
4284                 rdev->desc_nr = info->number;
4285                 if (info->raid_disk < mddev->raid_disks)
4286                         rdev->raid_disk = info->raid_disk;
4287                 else
4288                         rdev->raid_disk = -1;
4289
4290                 if (rdev->raid_disk < mddev->raid_disks)
4291                         if (info->state & (1<<MD_DISK_SYNC))
4292                                 set_bit(In_sync, &rdev->flags);
4293
4294                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4295                         set_bit(WriteMostly, &rdev->flags);
4296
4297                 if (!mddev->persistent) {
4298                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
4299                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
4300                 } else 
4301                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
4302                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
4303
4304                 err = bind_rdev_to_array(rdev, mddev);
4305                 if (err) {
4306                         export_rdev(rdev);
4307                         return err;
4308                 }
4309         }
4310
4311         return 0;
4312 }
4313
4314 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
4315 {
4316         char b[BDEVNAME_SIZE];
4317         mdk_rdev_t *rdev;
4318
4319         rdev = find_rdev(mddev, dev);
4320         if (!rdev)
4321                 return -ENXIO;
4322
4323         if (rdev->raid_disk >= 0)
4324                 goto busy;
4325
4326         kick_rdev_from_array(rdev);
4327         md_update_sb(mddev, 1);
4328         md_new_event(mddev);
4329
4330         return 0;
4331 busy:
4332         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
4333                 bdevname(rdev->bdev,b), mdname(mddev));
4334         return -EBUSY;
4335 }
4336
4337 static int hot_add_disk(mddev_t * mddev, dev_t dev)
4338 {
4339         char b[BDEVNAME_SIZE];
4340         int err;
4341         unsigned int size;
4342         mdk_rdev_t *rdev;
4343
4344         if (!mddev->pers)
4345                 return -ENODEV;
4346
4347         if (mddev->major_version != 0) {
4348                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
4349                         " version-0 superblocks.\n",
4350                         mdname(mddev));
4351                 return -EINVAL;
4352         }
4353         if (!mddev->pers->hot_add_disk) {
4354                 printk(KERN_WARNING 
4355                         "%s: personality does not support diskops!\n",
4356                         mdname(mddev));
4357                 return -EINVAL;
4358         }
4359
4360         rdev = md_import_device (dev, -1, 0);
4361         if (IS_ERR(rdev)) {
4362                 printk(KERN_WARNING 
4363                         "md: error, md_import_device() returned %ld\n",
4364                         PTR_ERR(rdev));
4365                 return -EINVAL;
4366         }
4367
4368         if (mddev->persistent)
4369                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
4370         else
4371                 rdev->sb_offset =
4372                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
4373
4374         size = calc_dev_size(rdev, mddev->chunk_size);
4375         rdev->size = size;
4376
4377         if (test_bit(Faulty, &rdev->flags)) {
4378                 printk(KERN_WARNING 
4379                         "md: can not hot-add faulty %s disk to %s!\n",
4380                         bdevname(rdev->bdev,b), mdname(mddev));
4381                 err = -EINVAL;
4382                 goto abort_export;
4383         }
4384         clear_bit(In_sync, &rdev->flags);
4385         rdev->desc_nr = -1;
4386         rdev->saved_raid_disk = -1;
4387         err = bind_rdev_to_array(rdev, mddev);
4388         if (err)
4389                 goto abort_export;
4390
4391         /*
4392          * The rest should better be atomic, we can have disk failures
4393          * noticed in interrupt contexts ...
4394          */
4395
4396         if (rdev->desc_nr == mddev->max_disks) {
4397                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
4398                         mdname(mddev));
4399                 err = -EBUSY;
4400                 goto abort_unbind_export;
4401         }
4402
4403         rdev->raid_disk = -1;
4404
4405         md_update_sb(mddev, 1);
4406
4407         /*
4408          * Kick recovery, maybe this spare has to be added to the
4409          * array immediately.
4410          */
4411         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4412         md_wakeup_thread(mddev->thread);
4413         md_new_event(mddev);
4414         return 0;
4415
4416 abort_unbind_export:
4417         unbind_rdev_from_array(rdev);
4418
4419 abort_export:
4420         export_rdev(rdev);
4421         return err;
4422 }
4423
4424 static int set_bitmap_file(mddev_t *mddev, int fd)
4425 {
4426         int err;
4427
4428         if (mddev->pers) {
4429                 if (!mddev->pers->quiesce)
4430                         return -EBUSY;
4431                 if (mddev->recovery || mddev->sync_thread)
4432                         return -EBUSY;
4433                 /* we should be able to change the bitmap.. */
4434         }
4435
4436
4437         if (fd >= 0) {
4438                 if (mddev->bitmap)
4439                         return -EEXIST; /* cannot add when bitmap is present */
4440                 mddev->bitmap_file = fget(fd);
4441
4442                 if (mddev->bitmap_file == NULL) {
4443                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
4444                                mdname(mddev));
4445                         return -EBADF;
4446                 }
4447
4448                 err = deny_bitmap_write_access(mddev->bitmap_file);
4449                 if (err) {
4450                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
4451                                mdname(mddev));
4452                         fput(mddev->bitmap_file);
4453                         mddev->bitmap_file = NULL;
4454                         return err;
4455                 }
4456                 mddev->bitmap_offset = 0; /* file overrides offset */
4457         } else if (mddev->bitmap == NULL)
4458                 return -ENOENT; /* cannot remove what isn't there */
4459         err = 0;
4460         if (mddev->pers) {
4461                 mddev->pers->quiesce(mddev, 1);
4462                 if (fd >= 0)
4463                         err = bitmap_create(mddev);
4464                 if (fd < 0 || err) {
4465                         bitmap_destroy(mddev);
4466                         fd = -1; /* make sure to put the file */
4467                 }
4468                 mddev->pers->quiesce(mddev, 0);
4469         }
4470         if (fd < 0) {
4471                 if (mddev->bitmap_file) {
4472                         restore_bitmap_write_access(mddev->bitmap_file);
4473                         fput(mddev->bitmap_file);
4474                 }
4475                 mddev->bitmap_file = NULL;
4476         }
4477
4478         return err;
4479 }
4480
4481 /*
4482  * set_array_info is used two different ways
4483  * The original usage is when creating a new array.
4484  * In this usage, raid_disks is > 0 and it together with
4485  *  level, size, not_persistent,layout,chunksize determine the
4486  *  shape of the array.
4487  *  This will always create an array with a type-0.90.0 superblock.
4488  * The newer usage is when assembling an array.
4489  *  In this case raid_disks will be 0, and the major_version field is
4490  *  use to determine which style super-blocks are to be found on the devices.
4491  *  The minor and patch _version numbers are also kept incase the
4492  *  super_block handler wishes to interpret them.
4493  */
4494 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4495 {
4496
4497         if (info->raid_disks == 0) {
4498                 /* just setting version number for superblock loading */
4499                 if (info->major_version < 0 ||
4500                     info->major_version >= ARRAY_SIZE(super_types) ||
4501                     super_types[info->major_version].name == NULL) {
4502                         /* maybe try to auto-load a module? */
4503                         printk(KERN_INFO 
4504                                 "md: superblock version %d not known\n",
4505                                 info->major_version);
4506                         return -EINVAL;
4507                 }
4508                 mddev->major_version = info->major_version;
4509                 mddev->minor_version = info->minor_version;
4510                 mddev->patch_version = info->patch_version;
4511                 mddev->persistent = !info->not_persistent;
4512                 return 0;
4513         }
4514         mddev->major_version = MD_MAJOR_VERSION;
4515         mddev->minor_version = MD_MINOR_VERSION;
4516         mddev->patch_version = MD_PATCHLEVEL_VERSION;
4517         mddev->ctime         = get_seconds();
4518
4519         mddev->level         = info->level;
4520         mddev->clevel[0]     = 0;
4521         mddev->size          = info->size;
4522         mddev->raid_disks    = info->raid_disks;
4523         /* don't set md_minor, it is determined by which /dev/md* was
4524          * openned
4525          */
4526         if (info->state & (1<<MD_SB_CLEAN))
4527                 mddev->recovery_cp = MaxSector;
4528         else
4529                 mddev->recovery_cp = 0;
4530         mddev->persistent    = ! info->not_persistent;
4531         mddev->external      = 0;
4532
4533         mddev->layout        = info->layout;
4534         mddev->chunk_size    = info->chunk_size;
4535
4536         mddev->max_disks     = MD_SB_DISKS;
4537
4538         if (mddev->persistent)
4539                 mddev->flags         = 0;
4540         set_bit(MD_CHANGE_DEVS, &mddev->flags);
4541
4542         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4543         mddev->bitmap_offset = 0;
4544
4545         mddev->reshape_position = MaxSector;
4546
4547         /*
4548          * Generate a 128 bit UUID
4549          */
4550         get_random_bytes(mddev->uuid, 16);
4551
4552         mddev->new_level = mddev->level;
4553         mddev->new_chunk = mddev->chunk_size;
4554         mddev->new_layout = mddev->layout;
4555         mddev->delta_disks = 0;
4556
4557         return 0;
4558 }
4559
4560 static int update_size(mddev_t *mddev, unsigned long size)
4561 {
4562         mdk_rdev_t * rdev;
4563         int rv;
4564         struct list_head *tmp;
4565         int fit = (size == 0);
4566
4567         if (mddev->pers->resize == NULL)
4568                 return -EINVAL;
4569         /* The "size" is the amount of each device that is used.
4570          * This can only make sense for arrays with redundancy.
4571          * linear and raid0 always use whatever space is available
4572          * We can only consider changing the size if no resync
4573          * or reconstruction is happening, and if the new size
4574          * is acceptable. It must fit before the sb_offset or,
4575          * if that is <data_offset, it must fit before the
4576          * size of each device.
4577          * If size is zero, we find the largest size that fits.
4578          */
4579         if (mddev->sync_thread)
4580                 return -EBUSY;
4581         rdev_for_each(rdev, tmp, mddev) {
4582                 sector_t avail;
4583                 avail = rdev->size * 2;
4584
4585                 if (fit && (size == 0 || size > avail/2))
4586                         size = avail/2;
4587                 if (avail < ((sector_t)size << 1))
4588                         return -ENOSPC;
4589         }
4590         rv = mddev->pers->resize(mddev, (sector_t)size *2);
4591         if (!rv) {
4592                 struct block_device *bdev;
4593
4594                 bdev = bdget_disk(mddev->gendisk, 0);
4595                 if (bdev) {
4596                         mutex_lock(&bdev->bd_inode->i_mutex);
4597                         i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4598                         mutex_unlock(&bdev->bd_inode->i_mutex);
4599                         bdput(bdev);
4600                 }
4601         }
4602         return rv;
4603 }
4604
4605 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4606 {
4607         int rv;
4608         /* change the number of raid disks */
4609         if (mddev->pers->check_reshape == NULL)
4610                 return -EINVAL;
4611         if (raid_disks <= 0 ||
4612             raid_disks >= mddev->max_disks)
4613                 return -EINVAL;
4614         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4615                 return -EBUSY;
4616         mddev->delta_disks = raid_disks - mddev->raid_disks;
4617
4618         rv = mddev->pers->check_reshape(mddev);
4619         return rv;
4620 }
4621
4622
4623 /*
4624  * update_array_info is used to change the configuration of an
4625  * on-line array.
4626  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4627  * fields in the info are checked against the array.
4628  * Any differences that cannot be handled will cause an error.
4629  * Normally, only one change can be managed at a time.
4630  */
4631 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4632 {
4633         int rv = 0;
4634         int cnt = 0;
4635         int state = 0;
4636
4637         /* calculate expected state,ignoring low bits */
4638         if (mddev->bitmap && mddev->bitmap_offset)
4639                 state |= (1 << MD_SB_BITMAP_PRESENT);
4640
4641         if (mddev->major_version != info->major_version ||
4642             mddev->minor_version != info->minor_version ||
4643 /*          mddev->patch_version != info->patch_version || */
4644             mddev->ctime         != info->ctime         ||
4645             mddev->level         != info->level         ||
4646 /*          mddev->layout        != info->layout        || */
4647             !mddev->persistent   != info->not_persistent||
4648             mddev->chunk_size    != info->chunk_size    ||
4649             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4650             ((state^info->state) & 0xfffffe00)
4651                 )
4652                 return -EINVAL;
4653         /* Check there is only one change */
4654         if (info->size >= 0 && mddev->size != info->size) cnt++;
4655         if (mddev->raid_disks != info->raid_disks) cnt++;
4656         if (mddev->layout != info->layout) cnt++;
4657         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4658         if (cnt == 0) return 0;
4659         if (cnt > 1) return -EINVAL;
4660
4661         if (mddev->layout != info->layout) {
4662                 /* Change layout
4663                  * we don't need to do anything at the md level, the
4664                  * personality will take care of it all.
4665                  */
4666                 if (mddev->pers->reconfig == NULL)
4667                         return -EINVAL;
4668                 else
4669                         return mddev->pers->reconfig(mddev, info->layout, -1);
4670         }
4671         if (info->size >= 0 && mddev->size != info->size)
4672                 rv = update_size(mddev, info->size);
4673
4674         if (mddev->raid_disks    != info->raid_disks)
4675                 rv = update_raid_disks(mddev, info->raid_disks);
4676
4677         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4678                 if (mddev->pers->quiesce == NULL)
4679                         return -EINVAL;
4680                 if (mddev->recovery || mddev->sync_thread)
4681                         return -EBUSY;
4682                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4683                         /* add the bitmap */
4684                         if (mddev->bitmap)
4685                                 return -EEXIST;
4686                         if (mddev->default_bitmap_offset == 0)
4687                                 return -EINVAL;
4688                         mddev->bitmap_offset = mddev->default_bitmap_offset;
4689                         mddev->pers->quiesce(mddev, 1);
4690                         rv = bitmap_create(mddev);
4691                         if (rv)
4692                                 bitmap_destroy(mddev);
4693                         mddev->pers->quiesce(mddev, 0);
4694                 } else {
4695                         /* remove the bitmap */
4696                         if (!mddev->bitmap)
4697                                 return -ENOENT;
4698                         if (mddev->bitmap->file)
4699                                 return -EINVAL;
4700                         mddev->pers->quiesce(mddev, 1);
4701                         bitmap_destroy(mddev);
4702                         mddev->pers->quiesce(mddev, 0);
4703                         mddev->bitmap_offset = 0;
4704                 }
4705         }
4706         md_update_sb(mddev, 1);
4707         return rv;
4708 }
4709
4710 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4711 {
4712         mdk_rdev_t *rdev;
4713
4714         if (mddev->pers == NULL)
4715                 return -ENODEV;
4716
4717         rdev = find_rdev(mddev, dev);
4718         if (!rdev)
4719                 return -ENODEV;
4720
4721         md_error(mddev, rdev);
4722         return 0;
4723 }
4724
4725 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4726 {
4727         mddev_t *mddev = bdev->bd_disk->private_data;
4728
4729         geo->heads = 2;
4730         geo->sectors = 4;
4731         geo->cylinders = get_capacity(mddev->gendisk) / 8;
4732         return 0;
4733 }
4734
4735 static int md_ioctl(struct inode *inode, struct file *file,
4736                         unsigned int cmd, unsigned long arg)
4737 {
4738         int err = 0;
4739         void __user *argp = (void __user *)arg;
4740         mddev_t *mddev = NULL;
4741
4742         if (!capable(CAP_SYS_ADMIN))
4743                 return -EACCES;
4744
4745         /*
4746          * Commands dealing with the RAID driver but not any
4747          * particular array:
4748          */
4749         switch (cmd)
4750         {
4751                 case RAID_VERSION:
4752                         err = get_version(argp);
4753                         goto done;
4754
4755                 case PRINT_RAID_DEBUG:
4756                         err = 0;
4757                         md_print_devices();
4758                         goto done;
4759
4760 #ifndef MODULE
4761                 case RAID_AUTORUN:
4762                         err = 0;
4763                         autostart_arrays(arg);
4764                         goto done;
4765 #endif
4766                 default:;
4767         }
4768
4769         /*
4770          * Commands creating/starting a new array:
4771          */
4772
4773         mddev = inode->i_bdev->bd_disk->private_data;
4774
4775         if (!mddev) {
4776                 BUG();
4777                 goto abort;
4778         }
4779
4780         err = mddev_lock(mddev);
4781         if (err) {
4782                 printk(KERN_INFO 
4783                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
4784                         err, cmd);
4785                 goto abort;
4786         }
4787
4788         switch (cmd)
4789         {
4790                 case SET_ARRAY_INFO:
4791                         {
4792                                 mdu_array_info_t info;
4793                                 if (!arg)
4794                                         memset(&info, 0, sizeof(info));
4795                                 else if (copy_from_user(&info, argp, sizeof(info))) {
4796                                         err = -EFAULT;
4797                                         goto abort_unlock;
4798                                 }
4799                                 if (mddev->pers) {
4800                                         err = update_array_info(mddev, &info);
4801                                         if (err) {
4802                                                 printk(KERN_WARNING "md: couldn't update"
4803                                                        " array info. %d\n", err);
4804                                                 goto abort_unlock;
4805                                         }
4806                                         goto done_unlock;
4807                                 }
4808                                 if (!list_empty(&mddev->disks)) {
4809                                         printk(KERN_WARNING
4810                                                "md: array %s already has disks!\n",
4811                                                mdname(mddev));
4812                                         err = -EBUSY;
4813                                         goto abort_unlock;
4814                                 }
4815                                 if (mddev->raid_disks) {
4816                                         printk(KERN_WARNING
4817                                                "md: array %s already initialised!\n",
4818                                                mdname(mddev));
4819                                         err = -EBUSY;
4820                                         goto abort_unlock;
4821                                 }
4822                                 err = set_array_info(mddev, &info);
4823                                 if (err) {
4824                                         printk(KERN_WARNING "md: couldn't set"
4825                                                " array info. %d\n", err);
4826                                         goto abort_unlock;
4827                                 }
4828                         }
4829                         goto done_unlock;
4830
4831                 default:;
4832         }
4833
4834         /*
4835          * Commands querying/configuring an existing array:
4836          */
4837         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4838          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4839         if ((!mddev->raid_disks && !mddev->external)
4840             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4841             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
4842             && cmd != GET_BITMAP_FILE) {
4843                 err = -ENODEV;
4844                 goto abort_unlock;
4845         }
4846
4847         /*
4848          * Commands even a read-only array can execute:
4849          */
4850         switch (cmd)
4851         {
4852                 case GET_ARRAY_INFO:
4853                         err = get_array_info(mddev, argp);
4854                         goto done_unlock;
4855
4856                 case GET_BITMAP_FILE:
4857                         err = get_bitmap_file(mddev, argp);
4858                         goto done_unlock;
4859
4860                 case GET_DISK_INFO:
4861                         err = get_disk_info(mddev, argp);
4862                         goto done_unlock;
4863
4864                 case RESTART_ARRAY_RW:
4865                         err = restart_array(mddev);
4866                         goto done_unlock;
4867
4868                 case STOP_ARRAY:
4869                         err = do_md_stop (mddev, 0);
4870                         goto done_unlock;
4871
4872                 case STOP_ARRAY_RO:
4873                         err = do_md_stop (mddev, 1);
4874                         goto done_unlock;
4875
4876         /*
4877          * We have a problem here : there is no easy way to give a CHS
4878          * virtual geometry. We currently pretend that we have a 2 heads
4879          * 4 sectors (with a BIG number of cylinders...). This drives
4880          * dosfs just mad... ;-)
4881          */
4882         }
4883
4884         /*
4885          * The remaining ioctls are changing the state of the
4886          * superblock, so we do not allow them on read-only arrays.
4887          * However non-MD ioctls (e.g. get-size) will still come through
4888          * here and hit the 'default' below, so only disallow
4889          * 'md' ioctls, and switch to rw mode if started auto-readonly.
4890          */
4891         if (_IOC_TYPE(cmd) == MD_MAJOR &&
4892             mddev->ro && mddev->pers) {
4893                 if (mddev->ro == 2) {
4894                         mddev->ro = 0;
4895                         sysfs_notify(&mddev->kobj, NULL, "array_state");
4896                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4897                         md_wakeup_thread(mddev->thread);
4898
4899                 } else {
4900                         err = -EROFS;
4901                         goto abort_unlock;
4902                 }
4903         }
4904
4905         switch (cmd)
4906         {
4907                 case ADD_NEW_DISK:
4908                 {
4909                         mdu_disk_info_t info;
4910                         if (copy_from_user(&info, argp, sizeof(info)))
4911                                 err = -EFAULT;
4912                         else
4913                                 err = add_new_disk(mddev, &info);
4914                         goto done_unlock;
4915                 }
4916
4917                 case HOT_REMOVE_DISK:
4918                         err = hot_remove_disk(mddev, new_decode_dev(arg));
4919                         goto done_unlock;
4920
4921                 case HOT_ADD_DISK:
4922                         err = hot_add_disk(mddev, new_decode_dev(arg));
4923                         goto done_unlock;
4924
4925                 case SET_DISK_FAULTY:
4926                         err = set_disk_faulty(mddev, new_decode_dev(arg));
4927                         goto done_unlock;
4928
4929                 case RUN_ARRAY:
4930                         err = do_md_run (mddev);
4931                         goto done_unlock;
4932
4933                 case SET_BITMAP_FILE:
4934                         err = set_bitmap_file(mddev, (int)arg);
4935                         goto done_unlock;
4936
4937                 default:
4938                         err = -EINVAL;
4939                         goto abort_unlock;
4940         }
4941
4942 done_unlock:
4943 abort_unlock:
4944         mddev_unlock(mddev);
4945
4946         return err;
4947 done:
4948         if (err)
4949                 MD_BUG();
4950 abort:
4951         return err;
4952 }
4953
4954 static int md_open(struct inode *inode, struct file *file)
4955 {
4956         /*
4957          * Succeed if we can lock the mddev, which confirms that
4958          * it isn't being stopped right now.
4959          */
4960         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4961         int err;
4962
4963         if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
4964                 goto out;
4965
4966         err = 0;
4967         mddev_get(mddev);
4968         mddev_unlock(mddev);
4969
4970         check_disk_change(inode->i_bdev);
4971  out:
4972         return err;
4973 }
4974
4975 static int md_release(struct inode *inode, struct file * file)
4976 {
4977         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4978
4979         BUG_ON(!mddev);
4980         mddev_put(mddev);
4981
4982         return 0;
4983 }
4984
4985 static int md_media_changed(struct gendisk *disk)
4986 {
4987         mddev_t *mddev = disk->private_data;
4988
4989         return mddev->changed;
4990 }
4991
4992 static int md_revalidate(struct gendisk *disk)
4993 {
4994         mddev_t *mddev = disk->private_data;
4995
4996         mddev->changed = 0;
4997         return 0;
4998 }
4999 static struct block_device_operations md_fops =
5000 {
5001         .owner          = THIS_MODULE,
5002         .open           = md_open,
5003         .release        = md_release,
5004         .ioctl          = md_ioctl,
5005         .getgeo         = md_getgeo,
5006         .media_changed  = md_media_changed,
5007         .revalidate_disk= md_revalidate,
5008 };
5009
5010 static int md_thread(void * arg)
5011 {
5012         mdk_thread_t *thread = arg;
5013
5014         /*
5015          * md_thread is a 'system-thread', it's priority should be very
5016          * high. We avoid resource deadlocks individually in each
5017          * raid personality. (RAID5 does preallocation) We also use RR and
5018          * the very same RT priority as kswapd, thus we will never get
5019          * into a priority inversion deadlock.
5020          *
5021          * we definitely have to have equal or higher priority than
5022          * bdflush, otherwise bdflush will deadlock if there are too
5023          * many dirty RAID5 blocks.
5024          */
5025
5026         allow_signal(SIGKILL);
5027         while (!kthread_should_stop()) {
5028
5029                 /* We need to wait INTERRUPTIBLE so that
5030                  * we don't add to the load-average.
5031                  * That means we need to be sure no signals are
5032                  * pending
5033                  */
5034                 if (signal_pending(current))
5035                         flush_signals(current);
5036
5037                 wait_event_interruptible_timeout
5038                         (thread->wqueue,
5039                          test_bit(THREAD_WAKEUP, &thread->flags)
5040                          || kthread_should_stop(),
5041                          thread->timeout);
5042
5043                 clear_bit(THREAD_WAKEUP, &thread->flags);
5044
5045                 thread->run(thread->mddev);
5046         }
5047
5048         return 0;
5049 }
5050
5051 void md_wakeup_thread(mdk_thread_t *thread)
5052 {
5053         if (thread) {
5054                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5055                 set_bit(THREAD_WAKEUP, &thread->flags);
5056                 wake_up(&thread->wqueue);
5057         }
5058 }
5059
5060 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5061                                  const char *name)
5062 {
5063         mdk_thread_t *thread;
5064
5065         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5066         if (!thread)
5067                 return NULL;
5068
5069         init_waitqueue_head(&thread->wqueue);
5070
5071         thread->run = run;
5072         thread->mddev = mddev;
5073         thread->timeout = MAX_SCHEDULE_TIMEOUT;
5074         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
5075         if (IS_ERR(thread->tsk)) {
5076                 kfree(thread);
5077                 return NULL;
5078         }
5079         return thread;
5080 }
5081
5082 void md_unregister_thread(mdk_thread_t *thread)
5083 {
5084         dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
5085
5086         kthread_stop(thread->tsk);
5087         kfree(thread);
5088 }
5089
5090 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
5091 {
5092         if (!mddev) {
5093                 MD_BUG();
5094                 return;
5095         }
5096
5097         if (!rdev || test_bit(Faulty, &rdev->flags))
5098                 return;
5099
5100         if (mddev->external)
5101                 set_bit(Blocked, &rdev->flags);
5102 /*
5103         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5104                 mdname(mddev),
5105                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5106                 __builtin_return_address(0),__builtin_return_address(1),
5107                 __builtin_return_address(2),__builtin_return_address(3));
5108 */
5109         if (!mddev->pers)
5110                 return;
5111         if (!mddev->pers->error_handler)
5112                 return;
5113         mddev->pers->error_handler(mddev,rdev);
5114         if (mddev->degraded)
5115                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5116         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5117         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5118         md_wakeup_thread(mddev->thread);
5119         md_new_event_inintr(mddev);
5120 }
5121
5122 /* seq_file implementation /proc/mdstat */
5123
5124 static void status_unused(struct seq_file *seq)
5125 {
5126         int i = 0;
5127         mdk_rdev_t *rdev;
5128         struct list_head *tmp;
5129
5130         seq_printf(seq, "unused devices: ");
5131
5132         rdev_for_each_list(rdev, tmp, pending_raid_disks) {
5133                 char b[BDEVNAME_SIZE];
5134                 i++;
5135                 seq_printf(seq, "%s ",
5136                               bdevname(rdev->bdev,b));
5137         }
5138         if (!i)
5139                 seq_printf(seq, "<none>");
5140
5141         seq_printf(seq, "\n");
5142 }
5143
5144
5145 static void status_resync(struct seq_file *seq, mddev_t * mddev)
5146 {
5147         sector_t max_blocks, resync, res;
5148         unsigned long dt, db, rt;
5149         int scale;
5150         unsigned int per_milli;
5151
5152         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
5153
5154         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5155                 max_blocks = mddev->resync_max_sectors >> 1;
5156         else
5157                 max_blocks = mddev->size;
5158
5159         /*
5160          * Should not happen.
5161          */
5162         if (!max_blocks) {
5163                 MD_BUG();
5164                 return;
5165         }
5166         /* Pick 'scale' such that (resync>>scale)*1000 will fit
5167          * in a sector_t, and (max_blocks>>scale) will fit in a
5168          * u32, as those are the requirements for sector_div.
5169          * Thus 'scale' must be at least 10
5170          */
5171         scale = 10;
5172         if (sizeof(sector_t) > sizeof(unsigned long)) {
5173                 while ( max_blocks/2 > (1ULL<<(scale+32)))
5174                         scale++;
5175         }
5176         res = (resync>>scale)*1000;
5177         sector_div(res, (u32)((max_blocks>>scale)+1));
5178
5179         per_milli = res;
5180         {
5181                 int i, x = per_milli/50, y = 20-x;
5182                 seq_printf(seq, "[");
5183                 for (i = 0; i < x; i++)
5184                         seq_printf(seq, "=");
5185                 seq_printf(seq, ">");
5186                 for (i = 0; i < y; i++)
5187                         seq_printf(seq, ".");
5188                 seq_printf(seq, "] ");
5189         }
5190         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
5191                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
5192                     "reshape" :
5193                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
5194                      "check" :
5195                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5196                       "resync" : "recovery"))),
5197                    per_milli/10, per_milli % 10,
5198                    (unsigned long long) resync,
5199                    (unsigned long long) max_blocks);
5200
5201         /*
5202          * We do not want to overflow, so the order of operands and
5203          * the * 100 / 100 trick are important. We do a +1 to be
5204          * safe against division by zero. We only estimate anyway.
5205          *
5206          * dt: time from mark until now
5207          * db: blocks written from mark until now
5208          * rt: remaining time
5209          */
5210         dt = ((jiffies - mddev->resync_mark) / HZ);
5211         if (!dt) dt++;
5212         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5213                 - mddev->resync_mark_cnt;
5214         rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
5215
5216         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
5217
5218         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5219 }
5220
5221 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
5222 {
5223         struct list_head *tmp;
5224         loff_t l = *pos;
5225         mddev_t *mddev;
5226
5227         if (l >= 0x10000)
5228                 return NULL;
5229         if (!l--)
5230                 /* header */
5231                 return (void*)1;
5232
5233         spin_lock(&all_mddevs_lock);
5234         list_for_each(tmp,&all_mddevs)
5235                 if (!l--) {
5236                         mddev = list_entry(tmp, mddev_t, all_mddevs);
5237                         mddev_get(mddev);
5238                         spin_unlock(&all_mddevs_lock);
5239                         return mddev;
5240                 }
5241         spin_unlock(&all_mddevs_lock);
5242         if (!l--)
5243                 return (void*)2;/* tail */
5244         return NULL;
5245 }
5246
5247 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5248 {
5249         struct list_head *tmp;
5250         mddev_t *next_mddev, *mddev = v;
5251         
5252         ++*pos;
5253         if (v == (void*)2)
5254                 return NULL;
5255
5256         spin_lock(&all_mddevs_lock);
5257         if (v == (void*)1)
5258                 tmp = all_mddevs.next;
5259         else
5260                 tmp = mddev->all_mddevs.next;
5261         if (tmp != &all_mddevs)
5262                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
5263         else {
5264                 next_mddev = (void*)2;
5265                 *pos = 0x10000;
5266         }               
5267         spin_unlock(&all_mddevs_lock);
5268
5269         if (v != (void*)1)
5270                 mddev_put(mddev);
5271         return next_mddev;
5272
5273 }
5274
5275 static void md_seq_stop(struct seq_file *seq, void *v)
5276 {
5277         mddev_t *mddev = v;
5278
5279         if (mddev && v != (void*)1 && v != (void*)2)
5280                 mddev_put(mddev);
5281 }
5282
5283 struct mdstat_info {
5284         int event;
5285 };
5286
5287 static int md_seq_show(struct seq_file *seq, void *v)
5288 {
5289         mddev_t *mddev = v;
5290         sector_t size;
5291         struct list_head *tmp2;
5292         mdk_rdev_t *rdev;
5293         struct mdstat_info *mi = seq->private;
5294         struct bitmap *bitmap;
5295
5296         if (v == (void*)1) {
5297                 struct mdk_personality *pers;
5298                 seq_printf(seq, "Personalities : ");
5299                 spin_lock(&pers_lock);
5300                 list_for_each_entry(pers, &pers_list, list)
5301                         seq_printf(seq, "[%s] ", pers->name);
5302
5303                 spin_unlock(&pers_lock);
5304                 seq_printf(seq, "\n");
5305                 mi->event = atomic_read(&md_event_count);
5306                 return 0;
5307         }
5308         if (v == (void*)2) {
5309                 status_unused(seq);
5310                 return 0;
5311         }
5312
5313         if (mddev_lock(mddev) < 0)
5314                 return -EINTR;
5315
5316         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
5317                 seq_printf(seq, "%s : %sactive", mdname(mddev),
5318                                                 mddev->pers ? "" : "in");
5319                 if (mddev->pers) {
5320                         if (mddev->ro==1)
5321                                 seq_printf(seq, " (read-only)");
5322                         if (mddev->ro==2)
5323                                 seq_printf(seq, " (auto-read-only)");
5324                         seq_printf(seq, " %s", mddev->pers->name);
5325                 }
5326
5327                 size = 0;
5328                 rdev_for_each(rdev, tmp2, mddev) {
5329                         char b[BDEVNAME_SIZE];
5330                         seq_printf(seq, " %s[%d]",
5331                                 bdevname(rdev->bdev,b), rdev->desc_nr);
5332                         if (test_bit(WriteMostly, &rdev->flags))
5333                                 seq_printf(seq, "(W)");
5334                         if (test_bit(Faulty, &rdev->flags)) {
5335                                 seq_printf(seq, "(F)");
5336                                 continue;
5337                         } else if (rdev->raid_disk < 0)
5338                                 seq_printf(seq, "(S)"); /* spare */
5339                         size += rdev->size;
5340                 }
5341
5342                 if (!list_empty(&mddev->disks)) {
5343                         if (mddev->pers)
5344                                 seq_printf(seq, "\n      %llu blocks",
5345                                         (unsigned long long)mddev->array_size);
5346                         else
5347                                 seq_printf(seq, "\n      %llu blocks",
5348                                         (unsigned long long)size);
5349                 }
5350                 if (mddev->persistent) {
5351                         if (mddev->major_version != 0 ||
5352                             mddev->minor_version != 90) {
5353                                 seq_printf(seq," super %d.%d",
5354                                            mddev->major_version,
5355                                            mddev->minor_version);
5356                         }
5357                 } else if (mddev->external)
5358                         seq_printf(seq, " super external:%s",
5359                                    mddev->metadata_type);
5360                 else
5361                         seq_printf(seq, " super non-persistent");
5362
5363                 if (mddev->pers) {
5364                         mddev->pers->status (seq, mddev);
5365                         seq_printf(seq, "\n      ");
5366                         if (mddev->pers->sync_request) {
5367                                 if (mddev->curr_resync > 2) {
5368                                         status_resync (seq, mddev);
5369                                         seq_printf(seq, "\n      ");
5370                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
5371                                         seq_printf(seq, "\tresync=DELAYED\n      ");
5372                                 else if (mddev->recovery_cp < MaxSector)
5373                                         seq_printf(seq, "\tresync=PENDING\n      ");
5374                         }
5375                 } else
5376                         seq_printf(seq, "\n       ");
5377
5378                 if ((bitmap = mddev->bitmap)) {
5379                         unsigned long chunk_kb;
5380                         unsigned long flags;
5381                         spin_lock_irqsave(&bitmap->lock, flags);
5382                         chunk_kb = bitmap->chunksize >> 10;
5383                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
5384                                 "%lu%s chunk",
5385                                 bitmap->pages - bitmap->missing_pages,
5386                                 bitmap->pages,
5387                                 (bitmap->pages - bitmap->missing_pages)
5388                                         << (PAGE_SHIFT - 10),
5389                                 chunk_kb ? chunk_kb : bitmap->chunksize,
5390                                 chunk_kb ? "KB" : "B");
5391                         if (bitmap->file) {
5392                                 seq_printf(seq, ", file: ");
5393                                 seq_path(seq, &bitmap->file->f_path, " \t\n");
5394                         }
5395
5396                         seq_printf(seq, "\n");
5397                         spin_unlock_irqrestore(&bitmap->lock, flags);
5398                 }
5399
5400                 seq_printf(seq, "\n");
5401         }
5402         mddev_unlock(mddev);
5403         
5404         return 0;
5405 }
5406
5407 static struct seq_operations md_seq_ops = {
5408         .start  = md_seq_start,
5409         .next   = md_seq_next,
5410         .stop   = md_seq_stop,
5411         .show   = md_seq_show,
5412 };
5413
5414 static int md_seq_open(struct inode *inode, struct file *file)
5415 {
5416         int error;
5417         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
5418         if (mi == NULL)
5419                 return -ENOMEM;
5420
5421         error = seq_open(file, &md_seq_ops);
5422         if (error)
5423                 kfree(mi);
5424         else {
5425                 struct seq_file *p = file->private_data;
5426                 p->private = mi;
5427                 mi->event = atomic_read(&md_event_count);
5428         }
5429         return error;
5430 }
5431
5432 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
5433 {
5434         struct seq_file *m = filp->private_data;
5435         struct mdstat_info *mi = m->private;
5436         int mask;
5437
5438         poll_wait(filp, &md_event_waiters, wait);
5439
5440         /* always allow read */
5441         mask = POLLIN | POLLRDNORM;
5442
5443         if (mi->event != atomic_read(&md_event_count))
5444                 mask |= POLLERR | POLLPRI;
5445         return mask;
5446 }
5447
5448 static const struct file_operations md_seq_fops = {
5449         .owner          = THIS_MODULE,
5450         .open           = md_seq_open,
5451         .read           = seq_read,
5452         .llseek         = seq_lseek,
5453         .release        = seq_release_private,
5454         .poll           = mdstat_poll,
5455 };
5456
5457 int register_md_personality(struct mdk_personality *p)
5458 {
5459         spin_lock(&pers_lock);
5460         list_add_tail(&p->list, &pers_list);
5461         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
5462         spin_unlock(&pers_lock);
5463         return 0;
5464 }
5465
5466 int unregister_md_personality(struct mdk_personality *p)
5467 {
5468         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
5469         spin_lock(&pers_lock);
5470         list_del_init(&p->list);
5471         spin_unlock(&pers_lock);
5472         return 0;
5473 }
5474
5475 static int is_mddev_idle(mddev_t *mddev)
5476 {
5477         mdk_rdev_t * rdev;
5478         struct list_head *tmp;
5479         int idle;
5480         long curr_events;
5481
5482         idle = 1;
5483         rdev_for_each(rdev, tmp, mddev) {
5484                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5485                 curr_events = disk_stat_read(disk, sectors[0]) + 
5486                                 disk_stat_read(disk, sectors[1]) - 
5487                                 atomic_read(&disk->sync_io);
5488                 /* sync IO will cause sync_io to increase before the disk_stats
5489                  * as sync_io is counted when a request starts, and
5490                  * disk_stats is counted when it completes.
5491                  * So resync activity will cause curr_events to be smaller than
5492                  * when there was no such activity.
5493                  * non-sync IO will cause disk_stat to increase without
5494                  * increasing sync_io so curr_events will (eventually)
5495                  * be larger than it was before.  Once it becomes
5496                  * substantially larger, the test below will cause
5497                  * the array to appear non-idle, and resync will slow
5498                  * down.
5499                  * If there is a lot of outstanding resync activity when
5500                  * we set last_event to curr_events, then all that activity
5501                  * completing might cause the array to appear non-idle
5502                  * and resync will be slowed down even though there might
5503                  * not have been non-resync activity.  This will only
5504                  * happen once though.  'last_events' will soon reflect
5505                  * the state where there is little or no outstanding
5506                  * resync requests, and further resync activity will
5507                  * always make curr_events less than last_events.
5508                  *
5509                  */
5510                 if (curr_events - rdev->last_events > 4096) {
5511                         rdev->last_events = curr_events;
5512                         idle = 0;
5513                 }
5514         }
5515         return idle;
5516 }
5517
5518 void md_done_sync(mddev_t *mddev, int blocks, int ok)
5519 {
5520         /* another "blocks" (512byte) blocks have been synced */
5521         atomic_sub(blocks, &mddev->recovery_active);
5522         wake_up(&mddev->recovery_wait);
5523         if (!ok) {
5524                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5525                 md_wakeup_thread(mddev->thread);
5526                 // stop recovery, signal do_sync ....
5527         }
5528 }
5529
5530
5531 /* md_write_start(mddev, bi)
5532  * If we need to update some array metadata (e.g. 'active' flag
5533  * in superblock) before writing, schedule a superblock update
5534  * and wait for it to complete.
5535  */
5536 void md_write_start(mddev_t *mddev, struct bio *bi)
5537 {
5538         int did_change = 0;
5539         if (bio_data_dir(bi) != WRITE)
5540                 return;
5541
5542         BUG_ON(mddev->ro == 1);
5543         if (mddev->ro == 2) {
5544                 /* need to switch to read/write */
5545                 mddev->ro = 0;
5546                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5547                 md_wakeup_thread(mddev->thread);
5548                 md_wakeup_thread(mddev->sync_thread);
5549                 did_change = 1;
5550         }
5551         atomic_inc(&mddev->writes_pending);
5552         if (mddev->safemode == 1)
5553                 mddev->safemode = 0;
5554         if (mddev->in_sync) {
5555                 spin_lock_irq(&mddev->write_lock);
5556                 if (mddev->in_sync) {
5557                         mddev->in_sync = 0;
5558                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5559                         md_wakeup_thread(mddev->thread);
5560                         did_change = 1;
5561                 }
5562                 spin_unlock_irq(&mddev->write_lock);
5563         }
5564         if (did_change)
5565                 sysfs_notify(&mddev->kobj, NULL, "array_state");
5566         wait_event(mddev->sb_wait,
5567                    !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
5568                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5569 }
5570
5571 void md_write_end(mddev_t *mddev)
5572 {
5573         if (atomic_dec_and_test(&mddev->writes_pending)) {
5574                 if (mddev->safemode == 2)
5575                         md_wakeup_thread(mddev->thread);
5576                 else if (mddev->safemode_delay)
5577                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5578         }
5579 }
5580
5581 /* md_allow_write(mddev)
5582  * Calling this ensures that the array is marked 'active' so that writes
5583  * may proceed without blocking.  It is important to call this before
5584  * attempting a GFP_KERNEL allocation while holding the mddev lock.
5585  * Must be called with mddev_lock held.
5586  */
5587 void md_allow_write(mddev_t *mddev)
5588 {
5589         if (!mddev->pers)
5590                 return;
5591         if (mddev->ro)
5592                 return;
5593         if (!mddev->pers->sync_request)
5594                 return;
5595
5596         spin_lock_irq(&mddev->write_lock);
5597         if (mddev->in_sync) {
5598                 mddev->in_sync = 0;
5599                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5600                 if (mddev->safemode_delay &&
5601                     mddev->safemode == 0)
5602                         mddev->safemode = 1;
5603                 spin_unlock_irq(&mddev->write_lock);
5604                 md_update_sb(mddev, 0);
5605
5606                 sysfs_notify(&mddev->kobj, NULL, "array_state");
5607                 /* wait for the dirty state to be recorded in the metadata */
5608                 wait_event(mddev->sb_wait,
5609                            !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
5610                            !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5611         } else
5612                 spin_unlock_irq(&mddev->write_lock);
5613 }
5614 EXPORT_SYMBOL_GPL(md_allow_write);
5615
5616 #define SYNC_MARKS      10
5617 #define SYNC_MARK_STEP  (3*HZ)
5618 void md_do_sync(mddev_t *mddev)
5619 {
5620         mddev_t *mddev2;
5621         unsigned int currspeed = 0,
5622                  window;
5623         sector_t max_sectors,j, io_sectors;
5624         unsigned long mark[SYNC_MARKS];
5625         sector_t mark_cnt[SYNC_MARKS];
5626         int last_mark,m;
5627         struct list_head *tmp;
5628         sector_t last_check;
5629         int skipped = 0;
5630         struct list_head *rtmp;
5631         mdk_rdev_t *rdev;
5632         char *desc;
5633
5634         /* just incase thread restarts... */
5635         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5636                 return;
5637         if (mddev->ro) /* never try to sync a read-only array */
5638                 return;
5639
5640         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5641                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5642                         desc = "data-check";
5643                 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5644                         desc = "requested-resync";
5645                 else
5646                         desc = "resync";
5647         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5648                 desc = "reshape";
5649         else
5650                 desc = "recovery";
5651
5652         /* we overload curr_resync somewhat here.
5653          * 0 == not engaged in resync at all
5654          * 2 == checking that there is no conflict with another sync
5655          * 1 == like 2, but have yielded to allow conflicting resync to
5656          *              commense
5657          * other == active in resync - this many blocks
5658          *
5659          * Before starting a resync we must have set curr_resync to
5660          * 2, and then checked that every "conflicting" array has curr_resync
5661          * less than ours.  When we find one that is the same or higher
5662          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
5663          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5664          * This will mean we have to start checking from the beginning again.
5665          *
5666          */
5667
5668         do {
5669                 mddev->curr_resync = 2;
5670
5671         try_again:
5672                 if (kthread_should_stop()) {
5673                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5674                         goto skip;
5675                 }
5676                 for_each_mddev(mddev2, tmp) {
5677                         if (mddev2 == mddev)
5678                                 continue;
5679                         if (!mddev->parallel_resync
5680                         &&  mddev2->curr_resync
5681                         &&  match_mddev_units(mddev, mddev2)) {
5682                                 DEFINE_WAIT(wq);
5683                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
5684                                         /* arbitrarily yield */
5685                                         mddev->curr_resync = 1;
5686                                         wake_up(&resync_wait);
5687                                 }
5688                                 if (mddev > mddev2 && mddev->curr_resync == 1)
5689                                         /* no need to wait here, we can wait the next
5690                                          * time 'round when curr_resync == 2
5691                                          */
5692                                         continue;
5693                                 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5694                                 if (!kthread_should_stop() &&
5695                                     mddev2->curr_resync >= mddev->curr_resync) {
5696                                         printk(KERN_INFO "md: delaying %s of %s"
5697                                                " until %s has finished (they"
5698                                                " share one or more physical units)\n",
5699                                                desc, mdname(mddev), mdname(mddev2));
5700                                         mddev_put(mddev2);
5701                                         schedule();
5702                                         finish_wait(&resync_wait, &wq);
5703                                         goto try_again;
5704                                 }
5705                                 finish_wait(&resync_wait, &wq);
5706                         }
5707                 }
5708         } while (mddev->curr_resync < 2);
5709
5710         j = 0;
5711         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5712                 /* resync follows the size requested by the personality,
5713                  * which defaults to physical size, but can be virtual size
5714                  */
5715                 max_sectors = mddev->resync_max_sectors;
5716                 mddev->resync_mismatches = 0;
5717                 /* we don't use the checkpoint if there's a bitmap */
5718                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5719                         j = mddev->resync_min;
5720                 else if (!mddev->bitmap)
5721                         j = mddev->recovery_cp;
5722
5723         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5724                 max_sectors = mddev->size << 1;
5725         else {
5726                 /* recovery follows the physical size of devices */
5727                 max_sectors = mddev->size << 1;
5728                 j = MaxSector;
5729                 rdev_for_each(rdev, rtmp, mddev)
5730                         if (rdev->raid_disk >= 0 &&
5731                             !test_bit(Faulty, &rdev->flags) &&
5732                             !test_bit(In_sync, &rdev->flags) &&
5733                             rdev->recovery_offset < j)
5734                                 j = rdev->recovery_offset;
5735         }
5736
5737         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5738         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
5739                 " %d KB/sec/disk.\n", speed_min(mddev));
5740         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5741                "(but not more than %d KB/sec) for %s.\n",
5742                speed_max(mddev), desc);
5743
5744         is_mddev_idle(mddev); /* this also initializes IO event counters */
5745
5746         io_sectors = 0;
5747         for (m = 0; m < SYNC_MARKS; m++) {
5748                 mark[m] = jiffies;
5749                 mark_cnt[m] = io_sectors;
5750         }
5751         last_mark = 0;
5752         mddev->resync_mark = mark[last_mark];
5753         mddev->resync_mark_cnt = mark_cnt[last_mark];
5754
5755         /*
5756          * Tune reconstruction:
5757          */
5758         window = 32*(PAGE_SIZE/512);
5759         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5760                 window/2,(unsigned long long) max_sectors/2);
5761
5762         atomic_set(&mddev->recovery_active, 0);
5763         last_check = 0;
5764
5765         if (j>2) {
5766                 printk(KERN_INFO 
5767                        "md: resuming %s of %s from checkpoint.\n",
5768                        desc, mdname(mddev));
5769                 mddev->curr_resync = j;
5770         }
5771
5772         while (j < max_sectors) {
5773                 sector_t sectors;
5774
5775                 skipped = 0;
5776                 if (j >= mddev->resync_max) {
5777                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5778                         wait_event(mddev->recovery_wait,
5779                                    mddev->resync_max > j
5780                                    || kthread_should_stop());
5781                 }
5782                 if (kthread_should_stop())
5783                         goto interrupted;
5784                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5785                                                   currspeed < speed_min(mddev));
5786                 if (sectors == 0) {
5787                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5788                         goto out;
5789                 }
5790
5791                 if (!skipped) { /* actual IO requested */
5792                         io_sectors += sectors;
5793                         atomic_add(sectors, &mddev->recovery_active);
5794                 }
5795
5796                 j += sectors;
5797                 if (j>1) mddev->curr_resync = j;
5798                 mddev->curr_mark_cnt = io_sectors;
5799                 if (last_check == 0)
5800                         /* this is the earliers that rebuilt will be
5801                          * visible in /proc/mdstat
5802                          */
5803                         md_new_event(mddev);
5804
5805                 if (last_check + window > io_sectors || j == max_sectors)
5806                         continue;
5807
5808                 last_check = io_sectors;
5809
5810                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5811                         break;
5812
5813         repeat:
5814                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5815                         /* step marks */
5816                         int next = (last_mark+1) % SYNC_MARKS;
5817
5818                         mddev->resync_mark = mark[next];
5819                         mddev->resync_mark_cnt = mark_cnt[next];
5820                         mark[next] = jiffies;
5821                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5822                         last_mark = next;
5823                 }
5824
5825
5826                 if (kthread_should_stop())
5827                         goto interrupted;
5828
5829
5830                 /*
5831                  * this loop exits only if either when we are slower than
5832                  * the 'hard' speed limit, or the system was IO-idle for
5833                  * a jiffy.
5834                  * the system might be non-idle CPU-wise, but we only care
5835                  * about not overloading the IO subsystem. (things like an
5836                  * e2fsck being done on the RAID array should execute fast)
5837                  */
5838                 blk_unplug(mddev->queue);
5839                 cond_resched();
5840
5841                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5842                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
5843
5844                 if (currspeed > speed_min(mddev)) {
5845                         if ((currspeed > speed_max(mddev)) ||
5846                                         !is_mddev_idle(mddev)) {
5847                                 msleep(500);
5848                                 goto repeat;
5849                         }
5850                 }
5851         }
5852         printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5853         /*
5854          * this also signals 'finished resyncing' to md_stop
5855          */
5856  out:
5857         blk_unplug(mddev->queue);
5858
5859         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5860
5861         /* tell personality that we are finished */
5862         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5863
5864         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5865             mddev->curr_resync > 2) {
5866                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5867                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5868                                 if (mddev->curr_resync >= mddev->recovery_cp) {
5869                                         printk(KERN_INFO
5870                                                "md: checkpointing %s of %s.\n",
5871                                                desc, mdname(mddev));
5872                                         mddev->recovery_cp = mddev->curr_resync;
5873                                 }
5874                         } else
5875                                 mddev->recovery_cp = MaxSector;
5876                 } else {
5877                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5878                                 mddev->curr_resync = MaxSector;
5879                         rdev_for_each(rdev, rtmp, mddev)
5880                                 if (rdev->raid_disk >= 0 &&
5881                                     !test_bit(Faulty, &rdev->flags) &&
5882                                     !test_bit(In_sync, &rdev->flags) &&
5883                                     rdev->recovery_offset < mddev->curr_resync)
5884                                         rdev->recovery_offset = mddev->curr_resync;
5885                 }
5886         }
5887         set_bit(MD_CHANGE_DEVS, &mddev->flags);
5888
5889  skip:
5890         mddev->curr_resync = 0;
5891         mddev->resync_min = 0;
5892         mddev->resync_max = MaxSector;
5893         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5894         wake_up(&resync_wait);
5895         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5896         md_wakeup_thread(mddev->thread);
5897         return;
5898
5899  interrupted:
5900         /*
5901          * got a signal, exit.
5902          */
5903         printk(KERN_INFO
5904                "md: md_do_sync() got signal ... exiting\n");
5905         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5906         goto out;
5907
5908 }
5909 EXPORT_SYMBOL_GPL(md_do_sync);
5910
5911
5912 static int remove_and_add_spares(mddev_t *mddev)
5913 {
5914         mdk_rdev_t *rdev;
5915         struct list_head *rtmp;
5916         int spares = 0;
5917
5918         rdev_for_each(rdev, rtmp, mddev)
5919                 if (rdev->raid_disk >= 0 &&
5920                     !test_bit(Blocked, &rdev->flags) &&
5921                     (test_bit(Faulty, &rdev->flags) ||
5922                      ! test_bit(In_sync, &rdev->flags)) &&
5923                     atomic_read(&rdev->nr_pending)==0) {
5924                         if (mddev->pers->hot_remove_disk(
5925                                     mddev, rdev->raid_disk)==0) {
5926                                 char nm[20];
5927                                 sprintf(nm,"rd%d", rdev->raid_disk);
5928                                 sysfs_remove_link(&mddev->kobj, nm);
5929                                 rdev->raid_disk = -1;
5930                         }
5931                 }
5932
5933         if (mddev->degraded) {
5934                 rdev_for_each(rdev, rtmp, mddev) {
5935                         if (rdev->raid_disk >= 0 &&
5936                             !test_bit(In_sync, &rdev->flags))
5937                                 spares++;
5938                         if (rdev->raid_disk < 0
5939                             && !test_bit(Faulty, &rdev->flags)) {
5940                                 rdev->recovery_offset = 0;
5941                                 if (mddev->pers->
5942                                     hot_add_disk(mddev, rdev) == 0) {
5943                                         char nm[20];
5944                                         sprintf(nm, "rd%d", rdev->raid_disk);
5945                                         if (sysfs_create_link(&mddev->kobj,
5946                                                               &rdev->kobj, nm))
5947                                                 printk(KERN_WARNING
5948                                                        "md: cannot register "
5949                                                        "%s for %s\n",
5950                                                        nm, mdname(mddev));
5951                                         spares++;
5952                                         md_new_event(mddev);
5953                                 } else
5954                                         break;
5955                         }
5956                 }
5957         }
5958         return spares;
5959 }
5960 /*
5961  * This routine is regularly called by all per-raid-array threads to
5962  * deal with generic issues like resync and super-block update.
5963  * Raid personalities that don't have a thread (linear/raid0) do not
5964  * need this as they never do any recovery or update the superblock.
5965  *
5966  * It does not do any resync itself, but rather "forks" off other threads
5967  * to do that as needed.
5968  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5969  * "->recovery" and create a thread at ->sync_thread.
5970  * When the thread finishes it sets MD_RECOVERY_DONE
5971  * and wakeups up this thread which will reap the thread and finish up.
5972  * This thread also removes any faulty devices (with nr_pending == 0).
5973  *
5974  * The overall approach is:
5975  *  1/ if the superblock needs updating, update it.
5976  *  2/ If a recovery thread is running, don't do anything else.
5977  *  3/ If recovery has finished, clean up, possibly marking spares active.
5978  *  4/ If there are any faulty devices, remove them.
5979  *  5/ If array is degraded, try to add spares devices
5980  *  6/ If array has spares or is not in-sync, start a resync thread.
5981  */
5982 void md_check_recovery(mddev_t *mddev)
5983 {
5984         mdk_rdev_t *rdev;
5985         struct list_head *rtmp;
5986
5987
5988         if (mddev->bitmap)
5989                 bitmap_daemon_work(mddev->bitmap);
5990
5991         if (mddev->ro)
5992                 return;
5993
5994         if (signal_pending(current)) {
5995                 if (mddev->pers->sync_request && !mddev->external) {
5996                         printk(KERN_INFO "md: %s in immediate safe mode\n",
5997                                mdname(mddev));
5998                         mddev->safemode = 2;
5999                 }
6000                 flush_signals(current);
6001         }
6002
6003         if ( ! (
6004                 (mddev->flags && !mddev->external) ||
6005                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
6006                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
6007                 (mddev->external == 0 && mddev->safemode == 1) ||
6008                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
6009                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
6010                 ))
6011                 return;
6012
6013         if (mddev_trylock(mddev)) {
6014                 int spares = 0;
6015
6016                 if (!mddev->external) {
6017                         int did_change = 0;
6018                         spin_lock_irq(&mddev->write_lock);
6019                         if (mddev->safemode &&
6020                             !atomic_read(&mddev->writes_pending) &&
6021                             !mddev->in_sync &&
6022                             mddev->recovery_cp == MaxSector) {
6023                                 mddev->in_sync = 1;
6024                                 did_change = 1;
6025                                 if (mddev->persistent)
6026                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6027                         }
6028                         if (mddev->safemode == 1)
6029                                 mddev->safemode = 0;
6030                         spin_unlock_irq(&mddev->write_lock);
6031                         if (did_change)
6032                                 sysfs_notify(&mddev->kobj, NULL, "array_state");
6033                 }
6034
6035                 if (mddev->flags)
6036                         md_update_sb(mddev, 0);
6037
6038
6039                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
6040                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
6041                         /* resync/recovery still happening */
6042                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6043                         goto unlock;
6044                 }
6045                 if (mddev->sync_thread) {
6046                         /* resync has finished, collect result */
6047                         md_unregister_thread(mddev->sync_thread);
6048                         mddev->sync_thread = NULL;
6049                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6050                                 /* success...*/
6051                                 /* activate any spares */
6052                                 mddev->pers->spare_active(mddev);
6053                         }
6054                         md_update_sb(mddev, 1);
6055
6056                         /* if array is no-longer degraded, then any saved_raid_disk
6057                          * information must be scrapped
6058                          */
6059                         if (!mddev->degraded)
6060                                 rdev_for_each(rdev, rtmp, mddev)
6061                                         rdev->saved_raid_disk = -1;
6062
6063                         mddev->recovery = 0;
6064                         /* flag recovery needed just to double check */
6065                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6066                         sysfs_notify(&mddev->kobj, NULL, "sync_action");
6067                         md_new_event(mddev);
6068                         goto unlock;
6069                 }
6070                 /* Set RUNNING before clearing NEEDED to avoid
6071                  * any transients in the value of "sync_action".
6072                  */
6073                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6074                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6075                 /* Clear some bits that don't mean anything, but
6076                  * might be left set
6077                  */
6078                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
6079                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
6080
6081                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
6082                         goto unlock;
6083                 /* no recovery is running.
6084                  * remove any failed drives, then
6085                  * add spares if possible.
6086                  * Spare are also removed and re-added, to allow
6087                  * the personality to fail the re-add.
6088                  */
6089
6090                 if (mddev->reshape_position != MaxSector) {
6091                         if (mddev->pers->check_reshape(mddev) != 0)
6092                                 /* Cannot proceed */
6093                                 goto unlock;
6094                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6095                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6096                 } else if ((spares = remove_and_add_spares(mddev))) {
6097                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6098                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6099                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6100                 } else if (mddev->recovery_cp < MaxSector) {
6101                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6102                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6103                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6104                         /* nothing to be done ... */
6105                         goto unlock;
6106
6107                 if (mddev->pers->sync_request) {
6108                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
6109                                 /* We are adding a device or devices to an array
6110                                  * which has the bitmap stored on all devices.
6111                                  * So make sure all bitmap pages get written
6112                                  */
6113                                 bitmap_write_all(mddev->bitmap);
6114                         }
6115                         mddev->sync_thread = md_register_thread(md_do_sync,
6116                                                                 mddev,
6117                                                                 "%s_resync");
6118                         if (!mddev->sync_thread) {
6119                                 printk(KERN_ERR "%s: could not start resync"
6120                                         " thread...\n", 
6121                                         mdname(mddev));
6122                                 /* leave the spares where they are, it shouldn't hurt */
6123                                 mddev->recovery = 0;
6124                         } else
6125                                 md_wakeup_thread(mddev->sync_thread);
6126                         sysfs_notify(&mddev->kobj, NULL, "sync_action");
6127                         md_new_event(mddev);
6128                 }
6129         unlock:
6130                 if (!mddev->sync_thread) {
6131                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6132                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
6133                                                &mddev->recovery))
6134                                 sysfs_notify(&mddev->kobj, NULL, "sync_action");
6135                 }
6136                 mddev_unlock(mddev);
6137         }
6138 }
6139
6140 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6141 {
6142         sysfs_notify(&rdev->kobj, NULL, "state");
6143         wait_event_timeout(rdev->blocked_wait,
6144                            !test_bit(Blocked, &rdev->flags),
6145                            msecs_to_jiffies(5000));
6146         rdev_dec_pending(rdev, mddev);
6147 }
6148 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
6149
6150 static int md_notify_reboot(struct notifier_block *this,
6151                             unsigned long code, void *x)
6152 {
6153         struct list_head *tmp;
6154         mddev_t *mddev;
6155
6156         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
6157
6158                 printk(KERN_INFO "md: stopping all md devices.\n");
6159
6160                 for_each_mddev(mddev, tmp)
6161                         if (mddev_trylock(mddev)) {
6162                                 do_md_stop (mddev, 1);
6163                                 mddev_unlock(mddev);
6164                         }
6165                 /*
6166                  * certain more exotic SCSI devices are known to be
6167                  * volatile wrt too early system reboots. While the
6168                  * right place to handle this issue is the given
6169                  * driver, we do want to have a safe RAID driver ...
6170                  */
6171                 mdelay(1000*1);
6172         }
6173         return NOTIFY_DONE;
6174 }
6175
6176 static struct notifier_block md_notifier = {
6177         .notifier_call  = md_notify_reboot,
6178         .next           = NULL,
6179         .priority       = INT_MAX, /* before any real devices */
6180 };
6181
6182 static void md_geninit(void)
6183 {
6184         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
6185
6186         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
6187 }
6188
6189 static int __init md_init(void)
6190 {
6191         if (register_blkdev(MAJOR_NR, "md"))
6192                 return -1;
6193         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
6194                 unregister_blkdev(MAJOR_NR, "md");
6195                 return -1;
6196         }
6197         blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
6198                             md_probe, NULL, NULL);
6199         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
6200                             md_probe, NULL, NULL);
6201
6202         register_reboot_notifier(&md_notifier);
6203         raid_table_header = register_sysctl_table(raid_root_table);
6204
6205         md_geninit();
6206         return (0);
6207 }
6208
6209
6210 #ifndef MODULE
6211
6212 /*
6213  * Searches all registered partitions for autorun RAID arrays
6214  * at boot time.
6215  */
6216
6217 static LIST_HEAD(all_detected_devices);
6218 struct detected_devices_node {
6219         struct list_head list;
6220         dev_t dev;
6221 };
6222
6223 void md_autodetect_dev(dev_t dev)
6224 {
6225         struct detected_devices_node *node_detected_dev;
6226
6227         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
6228         if (node_detected_dev) {
6229                 node_detected_dev->dev = dev;
6230                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
6231         } else {
6232                 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
6233                         ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
6234         }
6235 }
6236
6237
6238 static void autostart_arrays(int part)
6239 {
6240         mdk_rdev_t *rdev;
6241         struct detected_devices_node *node_detected_dev;
6242         dev_t dev;
6243         int i_scanned, i_passed;
6244
6245         i_scanned = 0;
6246         i_passed = 0;
6247
6248         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
6249
6250         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
6251                 i_scanned++;
6252                 node_detected_dev = list_entry(all_detected_devices.next,
6253                                         struct detected_devices_node, list);
6254                 list_del(&node_detected_dev->list);
6255                 dev = node_detected_dev->dev;
6256                 kfree(node_detected_dev);
6257                 rdev = md_import_device(dev,0, 90);
6258                 if (IS_ERR(rdev))
6259                         continue;
6260
6261                 if (test_bit(Faulty, &rdev->flags)) {
6262                         MD_BUG();
6263                         continue;
6264                 }
6265                 set_bit(AutoDetected, &rdev->flags);
6266                 list_add(&rdev->same_set, &pending_raid_disks);
6267                 i_passed++;
6268         }
6269
6270         printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
6271                                                 i_scanned, i_passed);
6272
6273         autorun_devices(part);
6274 }
6275
6276 #endif /* !MODULE */
6277
6278 static __exit void md_exit(void)
6279 {
6280         mddev_t *mddev;
6281         struct list_head *tmp;
6282
6283         blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
6284         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
6285
6286         unregister_blkdev(MAJOR_NR,"md");
6287         unregister_blkdev(mdp_major, "mdp");
6288         unregister_reboot_notifier(&md_notifier);
6289         unregister_sysctl_table(raid_table_header);
6290         remove_proc_entry("mdstat", NULL);
6291         for_each_mddev(mddev, tmp) {
6292                 struct gendisk *disk = mddev->gendisk;
6293                 if (!disk)
6294                         continue;
6295                 export_array(mddev);
6296                 del_gendisk(disk);
6297                 put_disk(disk);
6298                 mddev->gendisk = NULL;
6299                 mddev_put(mddev);
6300         }
6301 }
6302
6303 subsys_initcall(md_init);
6304 module_exit(md_exit)
6305
6306 static int get_ro(char *buffer, struct kernel_param *kp)
6307 {
6308         return sprintf(buffer, "%d", start_readonly);
6309 }
6310 static int set_ro(const char *val, struct kernel_param *kp)
6311 {
6312         char *e;
6313         int num = simple_strtoul(val, &e, 10);
6314         if (*val && (*e == '\0' || *e == '\n')) {
6315                 start_readonly = num;
6316                 return 0;
6317         }
6318         return -EINVAL;
6319 }
6320
6321 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
6322 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
6323
6324
6325 EXPORT_SYMBOL(register_md_personality);
6326 EXPORT_SYMBOL(unregister_md_personality);
6327 EXPORT_SYMBOL(md_error);
6328 EXPORT_SYMBOL(md_done_sync);
6329 EXPORT_SYMBOL(md_write_start);
6330 EXPORT_SYMBOL(md_write_end);
6331 EXPORT_SYMBOL(md_register_thread);
6332 EXPORT_SYMBOL(md_unregister_thread);
6333 EXPORT_SYMBOL(md_wakeup_thread);
6334 EXPORT_SYMBOL(md_check_recovery);
6335 MODULE_LICENSE("GPL");
6336 MODULE_ALIAS("md");
6337 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);