2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include "dm-bio-record.h"
10 #include <linux/init.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/pagemap.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/device-mapper.h>
17 #include <linux/dm-io.h>
18 #include <linux/dm-dirty-log.h>
19 #include <linux/dm-kcopyd.h>
20 #include <linux/dm-region-hash.h>
22 #define DM_MSG_PREFIX "raid1"
24 #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
25 #define DM_IO_PAGES 64
26 #define DM_KCOPYD_PAGES 64
28 #define DM_RAID1_HANDLE_ERRORS 0x01
29 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
31 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
33 /*-----------------------------------------------------------------
34 * Mirror set structures.
35 *---------------------------------------------------------------*/
44 struct mirror_set *ms;
46 unsigned long error_type;
53 struct list_head list;
57 spinlock_t lock; /* protects the lists */
58 struct bio_list reads;
59 struct bio_list writes;
60 struct bio_list failures;
62 struct dm_region_hash *rh;
63 struct dm_kcopyd_client *kcopyd_client;
64 struct dm_io_client *io_client;
65 mempool_t *read_record_pool;
73 atomic_t default_mirror; /* Default mirror */
75 struct workqueue_struct *kmirrord_wq;
76 struct work_struct kmirrord_work;
77 struct timer_list timer;
78 unsigned long timer_pending;
80 struct work_struct trigger_event;
83 struct mirror mirror[0];
86 static void wakeup_mirrord(void *context)
88 struct mirror_set *ms = context;
90 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
93 static void delayed_wake_fn(unsigned long data)
95 struct mirror_set *ms = (struct mirror_set *) data;
97 clear_bit(0, &ms->timer_pending);
101 static void delayed_wake(struct mirror_set *ms)
103 if (test_and_set_bit(0, &ms->timer_pending))
106 ms->timer.expires = jiffies + HZ / 5;
107 ms->timer.data = (unsigned long) ms;
108 ms->timer.function = delayed_wake_fn;
109 add_timer(&ms->timer);
112 static void wakeup_all_recovery_waiters(void *context)
114 wake_up_all(&_kmirrord_recovery_stopped);
117 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
123 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
124 spin_lock_irqsave(&ms->lock, flags);
125 should_wake = !(bl->head);
126 bio_list_add(bl, bio);
127 spin_unlock_irqrestore(&ms->lock, flags);
133 static void dispatch_bios(void *context, struct bio_list *bio_list)
135 struct mirror_set *ms = context;
138 while ((bio = bio_list_pop(bio_list)))
139 queue_bio(ms, bio, WRITE);
142 #define MIN_READ_RECORDS 20
143 struct dm_raid1_read_record {
145 struct dm_bio_details details;
148 static struct kmem_cache *_dm_raid1_read_record_cache;
151 * Every mirror should look like this one.
153 #define DEFAULT_MIRROR 0
156 * This is yucky. We squirrel the mirror struct away inside
157 * bi_next for read/write buffers. This is safe since the bh
158 * doesn't get submitted to the lower levels of block layer.
160 static struct mirror *bio_get_m(struct bio *bio)
162 return (struct mirror *) bio->bi_next;
165 static void bio_set_m(struct bio *bio, struct mirror *m)
167 bio->bi_next = (struct bio *) m;
170 static struct mirror *get_default_mirror(struct mirror_set *ms)
172 return &ms->mirror[atomic_read(&ms->default_mirror)];
175 static void set_default_mirror(struct mirror *m)
177 struct mirror_set *ms = m->ms;
178 struct mirror *m0 = &(ms->mirror[0]);
180 atomic_set(&ms->default_mirror, m - m0);
184 * @m: mirror device to fail
185 * @error_type: one of the enum's, DM_RAID1_*_ERROR
187 * If errors are being handled, record the type of
188 * error encountered for this device. If this type
189 * of error has already been recorded, we can return;
190 * otherwise, we must signal userspace by triggering
191 * an event. Additionally, if the device is the
192 * primary device, we must choose a new primary, but
193 * only if the mirror is in-sync.
195 * This function must not block.
197 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
199 struct mirror_set *ms = m->ms;
203 * error_count is used for nothing more than a
204 * simple way to tell if a device has encountered
207 atomic_inc(&m->error_count);
209 if (test_and_set_bit(error_type, &m->error_type))
212 if (!errors_handled(ms))
215 if (m != get_default_mirror(ms))
220 * Better to issue requests to same failing device
221 * than to risk returning corrupt data.
223 DMERR("Primary mirror (%s) failed while out-of-sync: "
224 "Reads may fail.", m->dev->name);
228 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
229 if (!atomic_read(&new->error_count)) {
230 set_default_mirror(new);
234 if (unlikely(new == ms->mirror + ms->nr_mirrors))
235 DMWARN("All sides of mirror have failed.");
238 schedule_work(&ms->trigger_event);
241 static int mirror_flush(struct dm_target *ti)
243 struct mirror_set *ms = ti->private;
244 unsigned long error_bits;
247 struct dm_io_region io[ms->nr_mirrors];
249 struct dm_io_request io_req = {
250 .bi_rw = WRITE_BARRIER,
251 .mem.type = DM_IO_KMEM,
252 .mem.ptr.bvec = NULL,
253 .client = ms->io_client,
256 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
257 io[i].bdev = m->dev->bdev;
263 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
264 if (unlikely(error_bits != 0)) {
265 for (i = 0; i < ms->nr_mirrors; i++)
266 if (test_bit(i, &error_bits))
267 fail_mirror(ms->mirror + i,
268 DM_RAID1_FLUSH_ERROR);
275 /*-----------------------------------------------------------------
278 * When a mirror is first activated we may find that some regions
279 * are in the no-sync state. We have to recover these by
280 * recopying from the default mirror to all the others.
281 *---------------------------------------------------------------*/
282 static void recovery_complete(int read_err, unsigned long write_err,
285 struct dm_region *reg = context;
286 struct mirror_set *ms = dm_rh_region_context(reg);
290 /* Read error means the failure of default mirror. */
291 DMERR_LIMIT("Unable to read primary mirror during recovery");
292 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
296 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
299 * Bits correspond to devices (excluding default mirror).
300 * The default mirror cannot change during recovery.
302 for (m = 0; m < ms->nr_mirrors; m++) {
303 if (&ms->mirror[m] == get_default_mirror(ms))
305 if (test_bit(bit, &write_err))
306 fail_mirror(ms->mirror + m,
307 DM_RAID1_SYNC_ERROR);
312 dm_rh_recovery_end(reg, !(read_err || write_err));
315 static int recover(struct mirror_set *ms, struct dm_region *reg)
319 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
321 unsigned long flags = 0;
322 region_t key = dm_rh_get_region_key(reg);
323 sector_t region_size = dm_rh_get_region_size(ms->rh);
325 /* fill in the source */
326 m = get_default_mirror(ms);
327 from.bdev = m->dev->bdev;
328 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
329 if (key == (ms->nr_regions - 1)) {
331 * The final region may be smaller than
334 from.count = ms->ti->len & (region_size - 1);
336 from.count = region_size;
338 from.count = region_size;
340 /* fill in the destinations */
341 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
342 if (&ms->mirror[i] == get_default_mirror(ms))
346 dest->bdev = m->dev->bdev;
347 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
348 dest->count = from.count;
353 if (!errors_handled(ms))
354 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
356 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
357 flags, recovery_complete, reg);
362 static void do_recovery(struct mirror_set *ms)
364 struct dm_region *reg;
365 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
369 * Start quiescing some regions.
371 dm_rh_recovery_prepare(ms->rh);
374 * Copy any already quiesced regions.
376 while ((reg = dm_rh_recovery_start(ms->rh))) {
377 r = recover(ms, reg);
379 dm_rh_recovery_end(reg, 0);
383 * Update the in sync flag.
386 (log->type->get_sync_count(log) == ms->nr_regions)) {
387 /* the sync is complete */
388 dm_table_event(ms->ti->table);
393 /*-----------------------------------------------------------------
395 *---------------------------------------------------------------*/
396 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
398 struct mirror *m = get_default_mirror(ms);
401 if (likely(!atomic_read(&m->error_count)))
404 if (m-- == ms->mirror)
406 } while (m != get_default_mirror(ms));
411 static int default_ok(struct mirror *m)
413 struct mirror *default_mirror = get_default_mirror(m->ms);
415 return !atomic_read(&default_mirror->error_count);
418 static int mirror_available(struct mirror_set *ms, struct bio *bio)
420 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
421 region_t region = dm_rh_bio_to_region(ms->rh, bio);
423 if (log->type->in_sync(log, region, 0))
424 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
430 * remap a buffer to a particular mirror.
432 static sector_t map_sector(struct mirror *m, struct bio *bio)
434 if (unlikely(!bio->bi_size))
436 return m->offset + (bio->bi_sector - m->ms->ti->begin);
439 static void map_bio(struct mirror *m, struct bio *bio)
441 bio->bi_bdev = m->dev->bdev;
442 bio->bi_sector = map_sector(m, bio);
445 static void map_region(struct dm_io_region *io, struct mirror *m,
448 io->bdev = m->dev->bdev;
449 io->sector = map_sector(m, bio);
450 io->count = bio->bi_size >> 9;
453 /*-----------------------------------------------------------------
455 *---------------------------------------------------------------*/
456 static void read_callback(unsigned long error, void *context)
458 struct bio *bio = context;
462 bio_set_m(bio, NULL);
464 if (likely(!error)) {
469 fail_mirror(m, DM_RAID1_READ_ERROR);
471 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
472 DMWARN_LIMIT("Read failure on mirror device %s. "
473 "Trying alternative device.",
475 queue_bio(m->ms, bio, bio_rw(bio));
479 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
481 bio_endio(bio, -EIO);
484 /* Asynchronous read. */
485 static void read_async_bio(struct mirror *m, struct bio *bio)
487 struct dm_io_region io;
488 struct dm_io_request io_req = {
490 .mem.type = DM_IO_BVEC,
491 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
492 .notify.fn = read_callback,
493 .notify.context = bio,
494 .client = m->ms->io_client,
497 map_region(&io, m, bio);
499 BUG_ON(dm_io(&io_req, 1, &io, NULL));
502 static inline int region_in_sync(struct mirror_set *ms, region_t region,
505 int state = dm_rh_get_state(ms->rh, region, may_block);
506 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
509 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
515 while ((bio = bio_list_pop(reads))) {
516 region = dm_rh_bio_to_region(ms->rh, bio);
517 m = get_default_mirror(ms);
520 * We can only read balance if the region is in sync.
522 if (likely(region_in_sync(ms, region, 1)))
523 m = choose_mirror(ms, bio->bi_sector);
524 else if (m && atomic_read(&m->error_count))
528 read_async_bio(m, bio);
530 bio_endio(bio, -EIO);
534 /*-----------------------------------------------------------------
537 * We do different things with the write io depending on the
538 * state of the region that it's in:
540 * SYNC: increment pending, use kcopyd to write to *all* mirrors
541 * RECOVERING: delay the io until recovery completes
542 * NOSYNC: increment pending, just write to the default mirror
543 *---------------------------------------------------------------*/
546 static void write_callback(unsigned long error, void *context)
549 struct bio *bio = (struct bio *) context;
550 struct mirror_set *ms;
555 ms = bio_get_m(bio)->ms;
556 bio_set_m(bio, NULL);
559 * NOTE: We don't decrement the pending count here,
560 * instead it is done by the targets endio function.
561 * This way we handle both writes to SYNC and NOSYNC
562 * regions with the same code.
567 for (i = 0; i < ms->nr_mirrors; i++)
568 if (test_bit(i, &error))
569 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
573 if (unlikely(!uptodate)) {
574 DMERR("All replicated volumes dead, failing I/O");
575 /* None of the writes succeeded, fail the I/O. */
577 } else if (errors_handled(ms)) {
579 * Need to raise event. Since raising
580 * events can block, we need to do it in
583 spin_lock_irqsave(&ms->lock, flags);
584 if (!ms->failures.head)
586 bio_list_add(&ms->failures, bio);
587 spin_unlock_irqrestore(&ms->lock, flags);
596 static void do_write(struct mirror_set *ms, struct bio *bio)
599 struct dm_io_region io[ms->nr_mirrors], *dest = io;
601 struct dm_io_request io_req = {
602 .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER),
603 .mem.type = DM_IO_BVEC,
604 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
605 .notify.fn = write_callback,
606 .notify.context = bio,
607 .client = ms->io_client,
610 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
611 map_region(dest++, m, bio);
614 * Use default mirror because we only need it to retrieve the reference
615 * to the mirror set in write_callback().
617 bio_set_m(bio, get_default_mirror(ms));
619 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
622 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
626 struct bio_list sync, nosync, recover, *this_list = NULL;
627 struct bio_list requeue;
628 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
635 * Classify each write.
637 bio_list_init(&sync);
638 bio_list_init(&nosync);
639 bio_list_init(&recover);
640 bio_list_init(&requeue);
642 while ((bio = bio_list_pop(writes))) {
643 if (unlikely(bio_empty_barrier(bio))) {
644 bio_list_add(&sync, bio);
648 region = dm_rh_bio_to_region(ms->rh, bio);
650 if (log->type->is_remote_recovering &&
651 log->type->is_remote_recovering(log, region)) {
652 bio_list_add(&requeue, bio);
656 state = dm_rh_get_state(ms->rh, region, 1);
667 case DM_RH_RECOVERING:
668 this_list = &recover;
672 bio_list_add(this_list, bio);
676 * Add bios that are delayed due to remote recovery
677 * back on to the write queue
679 if (unlikely(requeue.head)) {
680 spin_lock_irq(&ms->lock);
681 bio_list_merge(&ms->writes, &requeue);
682 spin_unlock_irq(&ms->lock);
687 * Increment the pending counts for any regions that will
688 * be written to (writes to recover regions are going to
691 dm_rh_inc_pending(ms->rh, &sync);
692 dm_rh_inc_pending(ms->rh, &nosync);
695 * If the flush fails on a previous call and succeeds here,
696 * we must not reset the log_failure variable. We need
697 * userspace interaction to do that.
699 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
704 if (unlikely(ms->log_failure)) {
705 spin_lock_irq(&ms->lock);
706 bio_list_merge(&ms->failures, &sync);
707 spin_unlock_irq(&ms->lock);
710 while ((bio = bio_list_pop(&sync)))
713 while ((bio = bio_list_pop(&recover)))
714 dm_rh_delay(ms->rh, bio);
716 while ((bio = bio_list_pop(&nosync))) {
717 map_bio(get_default_mirror(ms), bio);
718 generic_make_request(bio);
722 static void do_failures(struct mirror_set *ms, struct bio_list *failures)
729 if (!ms->log_failure) {
730 while ((bio = bio_list_pop(failures))) {
732 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
738 * If the log has failed, unattempted writes are being
739 * put on the failures list. We can't issue those writes
740 * until a log has been marked, so we must store them.
742 * If a 'noflush' suspend is in progress, we can requeue
743 * the I/O's to the core. This give userspace a chance
744 * to reconfigure the mirror, at which point the core
745 * will reissue the writes. If the 'noflush' flag is
746 * not set, we have no choice but to return errors.
748 * Some writes on the failures list may have been
749 * submitted before the log failure and represent a
750 * failure to write to one of the devices. It is ok
751 * for us to treat them the same and requeue them
754 if (dm_noflush_suspending(ms->ti)) {
755 while ((bio = bio_list_pop(failures)))
756 bio_endio(bio, DM_ENDIO_REQUEUE);
760 if (atomic_read(&ms->suspend)) {
761 while ((bio = bio_list_pop(failures)))
762 bio_endio(bio, -EIO);
766 spin_lock_irq(&ms->lock);
767 bio_list_merge(&ms->failures, failures);
768 spin_unlock_irq(&ms->lock);
773 static void trigger_event(struct work_struct *work)
775 struct mirror_set *ms =
776 container_of(work, struct mirror_set, trigger_event);
778 dm_table_event(ms->ti->table);
781 /*-----------------------------------------------------------------
783 *---------------------------------------------------------------*/
784 static void do_mirror(struct work_struct *work)
786 struct mirror_set *ms = container_of(work, struct mirror_set,
788 struct bio_list reads, writes, failures;
791 spin_lock_irqsave(&ms->lock, flags);
794 failures = ms->failures;
795 bio_list_init(&ms->reads);
796 bio_list_init(&ms->writes);
797 bio_list_init(&ms->failures);
798 spin_unlock_irqrestore(&ms->lock, flags);
800 dm_rh_update_states(ms->rh, errors_handled(ms));
802 do_reads(ms, &reads);
803 do_writes(ms, &writes);
804 do_failures(ms, &failures);
806 dm_table_unplug_all(ms->ti->table);
809 /*-----------------------------------------------------------------
811 *---------------------------------------------------------------*/
812 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
813 uint32_t region_size,
814 struct dm_target *ti,
815 struct dm_dirty_log *dl)
818 struct mirror_set *ms = NULL;
820 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
822 ms = kzalloc(len, GFP_KERNEL);
824 ti->error = "Cannot allocate mirror context";
828 spin_lock_init(&ms->lock);
831 ms->nr_mirrors = nr_mirrors;
832 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
835 atomic_set(&ms->suspend, 0);
836 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
838 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
839 _dm_raid1_read_record_cache);
841 if (!ms->read_record_pool) {
842 ti->error = "Error creating mirror read_record_pool";
847 ms->io_client = dm_io_client_create(DM_IO_PAGES);
848 if (IS_ERR(ms->io_client)) {
849 ti->error = "Error creating dm_io client";
850 mempool_destroy(ms->read_record_pool);
855 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
856 wakeup_all_recovery_waiters,
857 ms->ti->begin, MAX_RECOVERY,
858 dl, region_size, ms->nr_regions);
859 if (IS_ERR(ms->rh)) {
860 ti->error = "Error creating dirty region hash";
861 dm_io_client_destroy(ms->io_client);
862 mempool_destroy(ms->read_record_pool);
870 static void free_context(struct mirror_set *ms, struct dm_target *ti,
874 dm_put_device(ti, ms->mirror[m].dev);
876 dm_io_client_destroy(ms->io_client);
877 dm_region_hash_destroy(ms->rh);
878 mempool_destroy(ms->read_record_pool);
882 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
883 unsigned int mirror, char **argv)
885 unsigned long long offset;
887 if (sscanf(argv[1], "%llu", &offset) != 1) {
888 ti->error = "Invalid offset";
892 if (dm_get_device(ti, argv[0], offset, ti->len,
893 dm_table_get_mode(ti->table),
894 &ms->mirror[mirror].dev)) {
895 ti->error = "Device lookup failure";
899 ms->mirror[mirror].ms = ms;
900 atomic_set(&(ms->mirror[mirror].error_count), 0);
901 ms->mirror[mirror].error_type = 0;
902 ms->mirror[mirror].offset = offset;
908 * Create dirty log: log_type #log_params <log_params>
910 static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
911 unsigned argc, char **argv,
914 unsigned param_count;
915 struct dm_dirty_log *dl;
918 ti->error = "Insufficient mirror log arguments";
922 if (sscanf(argv[1], "%u", ¶m_count) != 1) {
923 ti->error = "Invalid mirror log argument count";
927 *args_used = 2 + param_count;
929 if (argc < *args_used) {
930 ti->error = "Insufficient mirror log arguments";
934 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
937 ti->error = "Error creating mirror dirty log";
944 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
947 unsigned num_features;
948 struct dm_target *ti = ms->ti;
955 if (sscanf(argv[0], "%u", &num_features) != 1) {
956 ti->error = "Invalid number of features";
964 if (num_features > argc) {
965 ti->error = "Not enough arguments to support feature count";
969 if (!strcmp("handle_errors", argv[0]))
970 ms->features |= DM_RAID1_HANDLE_ERRORS;
972 ti->error = "Unrecognised feature requested";
982 * Construct a mirror mapping:
984 * log_type #log_params <log_params>
985 * #mirrors [mirror_path offset]{2,}
986 * [#features <features>]
988 * log_type is "core" or "disk"
989 * #log_params is between 1 and 3
991 * If present, features must be "handle_errors".
993 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
996 unsigned int nr_mirrors, m, args_used;
997 struct mirror_set *ms;
998 struct dm_dirty_log *dl;
1000 dl = create_dirty_log(ti, argc, argv, &args_used);
1007 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1008 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
1009 ti->error = "Invalid number of mirrors";
1010 dm_dirty_log_destroy(dl);
1016 if (argc < nr_mirrors * 2) {
1017 ti->error = "Too few mirror arguments";
1018 dm_dirty_log_destroy(dl);
1022 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1024 dm_dirty_log_destroy(dl);
1028 /* Get the mirror parameter sets */
1029 for (m = 0; m < nr_mirrors; m++) {
1030 r = get_mirror(ms, ti, m, argv);
1032 free_context(ms, ti, m);
1040 ti->split_io = dm_rh_get_region_size(ms->rh);
1041 ti->num_flush_requests = 1;
1043 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1044 if (!ms->kmirrord_wq) {
1045 DMERR("couldn't start kmirrord");
1047 goto err_free_context;
1049 INIT_WORK(&ms->kmirrord_work, do_mirror);
1050 init_timer(&ms->timer);
1051 ms->timer_pending = 0;
1052 INIT_WORK(&ms->trigger_event, trigger_event);
1054 r = parse_features(ms, argc, argv, &args_used);
1056 goto err_destroy_wq;
1062 * Any read-balancing addition depends on the
1063 * DM_RAID1_HANDLE_ERRORS flag being present.
1064 * This is because the decision to balance depends
1065 * on the sync state of a region. If the above
1066 * flag is not present, we ignore errors; and
1067 * the sync state may be inaccurate.
1071 ti->error = "Too many mirror arguments";
1073 goto err_destroy_wq;
1076 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
1078 goto err_destroy_wq;
1084 destroy_workqueue(ms->kmirrord_wq);
1086 free_context(ms, ti, ms->nr_mirrors);
1090 static void mirror_dtr(struct dm_target *ti)
1092 struct mirror_set *ms = (struct mirror_set *) ti->private;
1094 del_timer_sync(&ms->timer);
1095 flush_workqueue(ms->kmirrord_wq);
1096 flush_scheduled_work();
1097 dm_kcopyd_client_destroy(ms->kcopyd_client);
1098 destroy_workqueue(ms->kmirrord_wq);
1099 free_context(ms, ti, ms->nr_mirrors);
1103 * Mirror mapping function
1105 static int mirror_map(struct dm_target *ti, struct bio *bio,
1106 union map_info *map_context)
1108 int r, rw = bio_rw(bio);
1110 struct mirror_set *ms = ti->private;
1111 struct dm_raid1_read_record *read_record = NULL;
1112 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1115 /* Save region for mirror_end_io() handler */
1116 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
1117 queue_bio(ms, bio, rw);
1118 return DM_MAPIO_SUBMITTED;
1121 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1122 if (r < 0 && r != -EWOULDBLOCK)
1126 * If region is not in-sync queue the bio.
1128 if (!r || (r == -EWOULDBLOCK)) {
1130 return -EWOULDBLOCK;
1132 queue_bio(ms, bio, rw);
1133 return DM_MAPIO_SUBMITTED;
1137 * The region is in-sync and we can perform reads directly.
1138 * Store enough information so we can retry if it fails.
1140 m = choose_mirror(ms, bio->bi_sector);
1144 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
1145 if (likely(read_record)) {
1146 dm_bio_record(&read_record->details, bio);
1147 map_context->ptr = read_record;
1153 return DM_MAPIO_REMAPPED;
1156 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1157 int error, union map_info *map_context)
1159 int rw = bio_rw(bio);
1160 struct mirror_set *ms = (struct mirror_set *) ti->private;
1161 struct mirror *m = NULL;
1162 struct dm_bio_details *bd = NULL;
1163 struct dm_raid1_read_record *read_record = map_context->ptr;
1166 * We need to dec pending if this was a write.
1169 if (likely(!bio_empty_barrier(bio)))
1170 dm_rh_dec(ms->rh, map_context->ll);
1174 if (error == -EOPNOTSUPP)
1177 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
1180 if (unlikely(error)) {
1183 * There wasn't enough memory to record necessary
1184 * information for a retry or there was no other
1187 DMERR_LIMIT("Mirror read failed.");
1193 DMERR("Mirror read failed from %s. Trying alternative device.",
1196 fail_mirror(m, DM_RAID1_READ_ERROR);
1199 * A failed read is requeued for another attempt using an intact
1202 if (default_ok(m) || mirror_available(ms, bio)) {
1203 bd = &read_record->details;
1205 dm_bio_restore(bd, bio);
1206 mempool_free(read_record, ms->read_record_pool);
1207 map_context->ptr = NULL;
1208 queue_bio(ms, bio, rw);
1211 DMERR("All replicated volumes dead, failing I/O");
1216 mempool_free(read_record, ms->read_record_pool);
1217 map_context->ptr = NULL;
1223 static void mirror_presuspend(struct dm_target *ti)
1225 struct mirror_set *ms = (struct mirror_set *) ti->private;
1226 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1228 atomic_set(&ms->suspend, 1);
1231 * We must finish up all the work that we've
1232 * generated (i.e. recovery work).
1234 dm_rh_stop_recovery(ms->rh);
1236 wait_event(_kmirrord_recovery_stopped,
1237 !dm_rh_recovery_in_flight(ms->rh));
1239 if (log->type->presuspend && log->type->presuspend(log))
1240 /* FIXME: need better error handling */
1241 DMWARN("log presuspend failed");
1244 * Now that recovery is complete/stopped and the
1245 * delayed bios are queued, we need to wait for
1246 * the worker thread to complete. This way,
1247 * we know that all of our I/O has been pushed.
1249 flush_workqueue(ms->kmirrord_wq);
1252 static void mirror_postsuspend(struct dm_target *ti)
1254 struct mirror_set *ms = ti->private;
1255 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1257 if (log->type->postsuspend && log->type->postsuspend(log))
1258 /* FIXME: need better error handling */
1259 DMWARN("log postsuspend failed");
1262 static void mirror_resume(struct dm_target *ti)
1264 struct mirror_set *ms = ti->private;
1265 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1267 atomic_set(&ms->suspend, 0);
1268 if (log->type->resume && log->type->resume(log))
1269 /* FIXME: need better error handling */
1270 DMWARN("log resume failed");
1271 dm_rh_start_recovery(ms->rh);
1275 * device_status_char
1276 * @m: mirror device/leg we want the status of
1278 * We return one character representing the most severe error
1279 * we have encountered.
1280 * A => Alive - No failures
1281 * D => Dead - A write failure occurred leaving mirror out-of-sync
1282 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1283 * R => Read - A read failure occurred, mirror data unaffected
1287 static char device_status_char(struct mirror *m)
1289 if (!atomic_read(&(m->error_count)))
1292 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1293 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1294 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1295 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1299 static int mirror_status(struct dm_target *ti, status_type_t type,
1300 char *result, unsigned int maxlen)
1302 unsigned int m, sz = 0;
1303 struct mirror_set *ms = (struct mirror_set *) ti->private;
1304 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1305 char buffer[ms->nr_mirrors + 1];
1308 case STATUSTYPE_INFO:
1309 DMEMIT("%d ", ms->nr_mirrors);
1310 for (m = 0; m < ms->nr_mirrors; m++) {
1311 DMEMIT("%s ", ms->mirror[m].dev->name);
1312 buffer[m] = device_status_char(&(ms->mirror[m]));
1316 DMEMIT("%llu/%llu 1 %s ",
1317 (unsigned long long)log->type->get_sync_count(log),
1318 (unsigned long long)ms->nr_regions, buffer);
1320 sz += log->type->status(log, type, result+sz, maxlen-sz);
1324 case STATUSTYPE_TABLE:
1325 sz = log->type->status(log, type, result, maxlen);
1327 DMEMIT("%d", ms->nr_mirrors);
1328 for (m = 0; m < ms->nr_mirrors; m++)
1329 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1330 (unsigned long long)ms->mirror[m].offset);
1332 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1333 DMEMIT(" 1 handle_errors");
1339 static int mirror_iterate_devices(struct dm_target *ti,
1340 iterate_devices_callout_fn fn, void *data)
1342 struct mirror_set *ms = ti->private;
1346 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1347 ret = fn(ti, ms->mirror[i].dev,
1348 ms->mirror[i].offset, ti->len, data);
1353 static struct target_type mirror_target = {
1355 .version = {1, 12, 0},
1356 .module = THIS_MODULE,
1360 .end_io = mirror_end_io,
1361 .presuspend = mirror_presuspend,
1362 .postsuspend = mirror_postsuspend,
1363 .resume = mirror_resume,
1364 .status = mirror_status,
1365 .iterate_devices = mirror_iterate_devices,
1368 static int __init dm_mirror_init(void)
1372 _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
1373 if (!_dm_raid1_read_record_cache) {
1374 DMERR("Can't allocate dm_raid1_read_record cache");
1379 r = dm_register_target(&mirror_target);
1381 DMERR("Failed to register mirror target");
1388 kmem_cache_destroy(_dm_raid1_read_record_cache);
1393 static void __exit dm_mirror_exit(void)
1395 dm_unregister_target(&mirror_target);
1396 kmem_cache_destroy(_dm_raid1_read_record_cache);
1400 module_init(dm_mirror_init);
1401 module_exit(dm_mirror_exit);
1403 MODULE_DESCRIPTION(DM_NAME " mirror target");
1404 MODULE_AUTHOR("Joe Thornber");
1405 MODULE_LICENSE("GPL");