2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
23 #include <trace/events/block.h>
25 #define DM_MSG_PREFIX "core"
27 static const char *_name = DM_NAME;
29 static unsigned int major = 0;
30 static unsigned int _major = 0;
32 static DEFINE_SPINLOCK(_minor_lock);
35 * One of these is allocated per bio.
38 struct mapped_device *md;
42 unsigned long start_time;
47 * One of these is allocated per target within a bio. Hopefully
48 * this will be simplified out one day.
57 * For request-based dm.
58 * One of these is allocated per request.
60 struct dm_rq_target_io {
61 struct mapped_device *md;
63 struct request *orig, clone;
69 * For request-based dm.
70 * One of these is allocated per bio.
72 struct dm_rq_clone_bio_info {
77 union map_info *dm_get_mapinfo(struct bio *bio)
79 if (bio && bio->bi_private)
80 return &((struct dm_target_io *)bio->bi_private)->info;
84 #define MINOR_ALLOCED ((void *)-1)
87 * Bits for the md->flags field.
89 #define DMF_BLOCK_IO_FOR_SUSPEND 0
90 #define DMF_SUSPENDED 1
93 #define DMF_DELETING 4
94 #define DMF_NOFLUSH_SUSPENDING 5
95 #define DMF_QUEUE_IO_TO_THREAD 6
98 * Work processed by per-device workqueue.
100 struct mapped_device {
101 struct rw_semaphore io_lock;
102 struct mutex suspend_lock;
109 struct request_queue *queue;
110 struct gendisk *disk;
116 * A list of ios that arrived while we were suspended.
119 wait_queue_head_t wait;
120 struct work_struct work;
121 struct bio_list deferred;
122 spinlock_t deferred_lock;
125 * An error from the barrier request currently being processed.
130 * Processing queue (flush/barriers)
132 struct workqueue_struct *wq;
135 * The current mapping.
137 struct dm_table *map;
140 * io objects are allocated from here.
151 wait_queue_head_t eventq;
153 struct list_head uevent_list;
154 spinlock_t uevent_lock; /* Protect access to uevent_list */
157 * freeze/thaw support require holding onto a super block
159 struct super_block *frozen_sb;
160 struct block_device *bdev;
162 /* forced geometry settings */
163 struct hd_geometry geometry;
170 static struct kmem_cache *_io_cache;
171 static struct kmem_cache *_tio_cache;
172 static struct kmem_cache *_rq_tio_cache;
173 static struct kmem_cache *_rq_bio_info_cache;
175 static int __init local_init(void)
179 /* allocate a slab for the dm_ios */
180 _io_cache = KMEM_CACHE(dm_io, 0);
184 /* allocate a slab for the target ios */
185 _tio_cache = KMEM_CACHE(dm_target_io, 0);
187 goto out_free_io_cache;
189 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
191 goto out_free_tio_cache;
193 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
194 if (!_rq_bio_info_cache)
195 goto out_free_rq_tio_cache;
197 r = dm_uevent_init();
199 goto out_free_rq_bio_info_cache;
202 r = register_blkdev(_major, _name);
204 goto out_uevent_exit;
213 out_free_rq_bio_info_cache:
214 kmem_cache_destroy(_rq_bio_info_cache);
215 out_free_rq_tio_cache:
216 kmem_cache_destroy(_rq_tio_cache);
218 kmem_cache_destroy(_tio_cache);
220 kmem_cache_destroy(_io_cache);
225 static void local_exit(void)
227 kmem_cache_destroy(_rq_bio_info_cache);
228 kmem_cache_destroy(_rq_tio_cache);
229 kmem_cache_destroy(_tio_cache);
230 kmem_cache_destroy(_io_cache);
231 unregister_blkdev(_major, _name);
236 DMINFO("cleaned up");
239 static int (*_inits[])(void) __initdata = {
248 static void (*_exits[])(void) = {
257 static int __init dm_init(void)
259 const int count = ARRAY_SIZE(_inits);
263 for (i = 0; i < count; i++) {
278 static void __exit dm_exit(void)
280 int i = ARRAY_SIZE(_exits);
287 * Block device functions
289 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
291 struct mapped_device *md;
293 spin_lock(&_minor_lock);
295 md = bdev->bd_disk->private_data;
299 if (test_bit(DMF_FREEING, &md->flags) ||
300 test_bit(DMF_DELETING, &md->flags)) {
306 atomic_inc(&md->open_count);
309 spin_unlock(&_minor_lock);
311 return md ? 0 : -ENXIO;
314 static int dm_blk_close(struct gendisk *disk, fmode_t mode)
316 struct mapped_device *md = disk->private_data;
317 atomic_dec(&md->open_count);
322 int dm_open_count(struct mapped_device *md)
324 return atomic_read(&md->open_count);
328 * Guarantees nothing is using the device before it's deleted.
330 int dm_lock_for_deletion(struct mapped_device *md)
334 spin_lock(&_minor_lock);
336 if (dm_open_count(md))
339 set_bit(DMF_DELETING, &md->flags);
341 spin_unlock(&_minor_lock);
346 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
348 struct mapped_device *md = bdev->bd_disk->private_data;
350 return dm_get_geometry(md, geo);
353 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
354 unsigned int cmd, unsigned long arg)
356 struct mapped_device *md = bdev->bd_disk->private_data;
357 struct dm_table *map = dm_get_table(md);
358 struct dm_target *tgt;
361 if (!map || !dm_table_get_size(map))
364 /* We only support devices that have a single target */
365 if (dm_table_get_num_targets(map) != 1)
368 tgt = dm_table_get_target(map, 0);
370 if (dm_suspended(md)) {
375 if (tgt->type->ioctl)
376 r = tgt->type->ioctl(tgt, cmd, arg);
384 static struct dm_io *alloc_io(struct mapped_device *md)
386 return mempool_alloc(md->io_pool, GFP_NOIO);
389 static void free_io(struct mapped_device *md, struct dm_io *io)
391 mempool_free(io, md->io_pool);
394 static struct dm_target_io *alloc_tio(struct mapped_device *md)
396 return mempool_alloc(md->tio_pool, GFP_NOIO);
399 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
401 mempool_free(tio, md->tio_pool);
404 static void start_io_acct(struct dm_io *io)
406 struct mapped_device *md = io->md;
409 io->start_time = jiffies;
411 cpu = part_stat_lock();
412 part_round_stats(cpu, &dm_disk(md)->part0);
414 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
417 static void end_io_acct(struct dm_io *io)
419 struct mapped_device *md = io->md;
420 struct bio *bio = io->bio;
421 unsigned long duration = jiffies - io->start_time;
423 int rw = bio_data_dir(bio);
425 cpu = part_stat_lock();
426 part_round_stats(cpu, &dm_disk(md)->part0);
427 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
431 * After this is decremented the bio must not be touched if it is
434 dm_disk(md)->part0.in_flight = pending =
435 atomic_dec_return(&md->pending);
437 /* nudge anyone waiting on suspend queue */
443 * Add the bio to the list of deferred io.
445 static void queue_io(struct mapped_device *md, struct bio *bio)
447 down_write(&md->io_lock);
449 spin_lock_irq(&md->deferred_lock);
450 bio_list_add(&md->deferred, bio);
451 spin_unlock_irq(&md->deferred_lock);
453 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
454 queue_work(md->wq, &md->work);
456 up_write(&md->io_lock);
460 * Everyone (including functions in this file), should use this
461 * function to access the md->map field, and make sure they call
462 * dm_table_put() when finished.
464 struct dm_table *dm_get_table(struct mapped_device *md)
468 read_lock(&md->map_lock);
472 read_unlock(&md->map_lock);
478 * Get the geometry associated with a dm device
480 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
488 * Set the geometry of a device.
490 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
492 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
494 if (geo->start > sz) {
495 DMWARN("Start sector is beyond the geometry limits.");
504 /*-----------------------------------------------------------------
506 * A more elegant soln is in the works that uses the queue
507 * merge fn, unfortunately there are a couple of changes to
508 * the block layer that I want to make for this. So in the
509 * interests of getting something for people to use I give
510 * you this clearly demarcated crap.
511 *---------------------------------------------------------------*/
513 static int __noflush_suspending(struct mapped_device *md)
515 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
519 * Decrements the number of outstanding ios that a bio has been
520 * cloned into, completing the original io if necc.
522 static void dec_pending(struct dm_io *io, int error)
527 struct mapped_device *md = io->md;
529 /* Push-back supersedes any I/O errors */
530 if (error && !(io->error > 0 && __noflush_suspending(md)))
533 if (atomic_dec_and_test(&io->io_count)) {
534 if (io->error == DM_ENDIO_REQUEUE) {
536 * Target requested pushing back the I/O.
538 spin_lock_irqsave(&md->deferred_lock, flags);
539 if (__noflush_suspending(md)) {
540 if (!bio_barrier(io->bio))
541 bio_list_add_head(&md->deferred,
544 /* noflush suspend was interrupted. */
546 spin_unlock_irqrestore(&md->deferred_lock, flags);
549 io_error = io->error;
552 if (bio_barrier(bio)) {
554 * There can be just one barrier request so we use
555 * a per-device variable for error reporting.
556 * Note that you can't touch the bio after end_io_acct
558 if (!md->barrier_error && io_error != -EOPNOTSUPP)
559 md->barrier_error = io_error;
564 if (io_error != DM_ENDIO_REQUEUE) {
565 trace_block_bio_complete(md->queue, bio);
567 bio_endio(bio, io_error);
575 static void clone_endio(struct bio *bio, int error)
578 struct dm_target_io *tio = bio->bi_private;
579 struct dm_io *io = tio->io;
580 struct mapped_device *md = tio->io->md;
581 dm_endio_fn endio = tio->ti->type->end_io;
583 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
587 r = endio(tio->ti, bio, error, &tio->info);
588 if (r < 0 || r == DM_ENDIO_REQUEUE)
590 * error and requeue request are handled
594 else if (r == DM_ENDIO_INCOMPLETE)
595 /* The target will handle the io */
598 DMWARN("unimplemented target endio return value: %d", r);
604 * Store md for cleanup instead of tio which is about to get freed.
606 bio->bi_private = md->bs;
610 dec_pending(io, error);
613 static sector_t max_io_len(struct mapped_device *md,
614 sector_t sector, struct dm_target *ti)
616 sector_t offset = sector - ti->begin;
617 sector_t len = ti->len - offset;
620 * Does the target need to split even further ?
624 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
633 static void __map_bio(struct dm_target *ti, struct bio *clone,
634 struct dm_target_io *tio)
638 struct mapped_device *md;
643 BUG_ON(!clone->bi_size);
645 clone->bi_end_io = clone_endio;
646 clone->bi_private = tio;
649 * Map the clone. If r == 0 we don't need to do
650 * anything, the target has assumed ownership of
653 atomic_inc(&tio->io->io_count);
654 sector = clone->bi_sector;
655 r = ti->type->map(ti, clone, &tio->info);
656 if (r == DM_MAPIO_REMAPPED) {
657 /* the bio has been remapped so dispatch it */
659 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
660 tio->io->bio->bi_bdev->bd_dev, sector);
662 generic_make_request(clone);
663 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
664 /* error the io and bail out, or requeue it if needed */
666 dec_pending(tio->io, r);
668 * Store bio_set for cleanup.
670 clone->bi_private = md->bs;
674 DMWARN("unimplemented target map return value: %d", r);
680 struct mapped_device *md;
681 struct dm_table *map;
685 sector_t sector_count;
689 static void dm_bio_destructor(struct bio *bio)
691 struct bio_set *bs = bio->bi_private;
697 * Creates a little bio that is just does part of a bvec.
699 static struct bio *split_bvec(struct bio *bio, sector_t sector,
700 unsigned short idx, unsigned int offset,
701 unsigned int len, struct bio_set *bs)
704 struct bio_vec *bv = bio->bi_io_vec + idx;
706 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
707 clone->bi_destructor = dm_bio_destructor;
708 *clone->bi_io_vec = *bv;
710 clone->bi_sector = sector;
711 clone->bi_bdev = bio->bi_bdev;
712 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
714 clone->bi_size = to_bytes(len);
715 clone->bi_io_vec->bv_offset = offset;
716 clone->bi_io_vec->bv_len = clone->bi_size;
717 clone->bi_flags |= 1 << BIO_CLONED;
719 if (bio_integrity(bio)) {
720 bio_integrity_clone(clone, bio, GFP_NOIO);
721 bio_integrity_trim(clone,
722 bio_sector_offset(bio, idx, offset), len);
729 * Creates a bio that consists of range of complete bvecs.
731 static struct bio *clone_bio(struct bio *bio, sector_t sector,
732 unsigned short idx, unsigned short bv_count,
733 unsigned int len, struct bio_set *bs)
737 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
738 __bio_clone(clone, bio);
739 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
740 clone->bi_destructor = dm_bio_destructor;
741 clone->bi_sector = sector;
743 clone->bi_vcnt = idx + bv_count;
744 clone->bi_size = to_bytes(len);
745 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
747 if (bio_integrity(bio)) {
748 bio_integrity_clone(clone, bio, GFP_NOIO);
750 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
751 bio_integrity_trim(clone,
752 bio_sector_offset(bio, idx, 0), len);
758 static int __clone_and_map(struct clone_info *ci)
760 struct bio *clone, *bio = ci->bio;
761 struct dm_target *ti;
762 sector_t len = 0, max;
763 struct dm_target_io *tio;
765 ti = dm_table_find_target(ci->map, ci->sector);
766 if (!dm_target_is_valid(ti))
769 max = max_io_len(ci->md, ci->sector, ti);
772 * Allocate a target io object.
774 tio = alloc_tio(ci->md);
777 memset(&tio->info, 0, sizeof(tio->info));
779 if (ci->sector_count <= max) {
781 * Optimise for the simple case where we can do all of
782 * the remaining io with a single clone.
784 clone = clone_bio(bio, ci->sector, ci->idx,
785 bio->bi_vcnt - ci->idx, ci->sector_count,
787 __map_bio(ti, clone, tio);
788 ci->sector_count = 0;
790 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
792 * There are some bvecs that don't span targets.
793 * Do as many of these as possible.
796 sector_t remaining = max;
799 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
800 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
802 if (bv_len > remaining)
809 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
811 __map_bio(ti, clone, tio);
814 ci->sector_count -= len;
819 * Handle a bvec that must be split between two or more targets.
821 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
822 sector_t remaining = to_sector(bv->bv_len);
823 unsigned int offset = 0;
827 ti = dm_table_find_target(ci->map, ci->sector);
828 if (!dm_target_is_valid(ti))
831 max = max_io_len(ci->md, ci->sector, ti);
833 tio = alloc_tio(ci->md);
836 memset(&tio->info, 0, sizeof(tio->info));
839 len = min(remaining, max);
841 clone = split_bvec(bio, ci->sector, ci->idx,
842 bv->bv_offset + offset, len,
845 __map_bio(ti, clone, tio);
848 ci->sector_count -= len;
849 offset += to_bytes(len);
850 } while (remaining -= len);
859 * Split the bio into several clones and submit it to targets.
861 static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
863 struct clone_info ci;
866 ci.map = dm_get_table(md);
867 if (unlikely(!ci.map)) {
868 if (!bio_barrier(bio))
871 if (!md->barrier_error)
872 md->barrier_error = -EIO;
878 ci.io = alloc_io(md);
880 atomic_set(&ci.io->io_count, 1);
883 ci.sector = bio->bi_sector;
884 ci.sector_count = bio_sectors(bio);
885 ci.idx = bio->bi_idx;
887 start_io_acct(ci.io);
888 while (ci.sector_count && !error)
889 error = __clone_and_map(&ci);
891 /* drop the extra reference count */
892 dec_pending(ci.io, error);
893 dm_table_put(ci.map);
895 /*-----------------------------------------------------------------
897 *---------------------------------------------------------------*/
899 static int dm_merge_bvec(struct request_queue *q,
900 struct bvec_merge_data *bvm,
901 struct bio_vec *biovec)
903 struct mapped_device *md = q->queuedata;
904 struct dm_table *map = dm_get_table(md);
905 struct dm_target *ti;
906 sector_t max_sectors;
912 ti = dm_table_find_target(map, bvm->bi_sector);
913 if (!dm_target_is_valid(ti))
917 * Find maximum amount of I/O that won't need splitting
919 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
920 (sector_t) BIO_MAX_SECTORS);
921 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
926 * merge_bvec_fn() returns number of bytes
927 * it can accept at this offset
928 * max is precomputed maximal io size
930 if (max_size && ti->type->merge)
931 max_size = ti->type->merge(ti, bvm, biovec, max_size);
933 * If the target doesn't support merge method and some of the devices
934 * provided their merge_bvec method (we know this by looking at
935 * queue_max_hw_sectors), then we can't allow bios with multiple vector
936 * entries. So always set max_size to 0, and the code below allows
939 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
948 * Always allow an entire first page
950 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
951 max_size = biovec->bv_len;
957 * The request function that just remaps the bio built up by
960 static int dm_request(struct request_queue *q, struct bio *bio)
962 int rw = bio_data_dir(bio);
963 struct mapped_device *md = q->queuedata;
966 down_read(&md->io_lock);
968 cpu = part_stat_lock();
969 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
970 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
974 * If we're suspended or the thread is processing barriers
975 * we have to queue this io for later.
977 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
978 unlikely(bio_barrier(bio))) {
979 up_read(&md->io_lock);
981 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
982 bio_rw(bio) == READA) {
992 __split_and_process_bio(md, bio);
993 up_read(&md->io_lock);
997 static void dm_unplug_all(struct request_queue *q)
999 struct mapped_device *md = q->queuedata;
1000 struct dm_table *map = dm_get_table(md);
1003 dm_table_unplug_all(map);
1008 static int dm_any_congested(void *congested_data, int bdi_bits)
1011 struct mapped_device *md = congested_data;
1012 struct dm_table *map;
1014 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1015 map = dm_get_table(md);
1017 r = dm_table_any_congested(map, bdi_bits);
1025 /*-----------------------------------------------------------------
1026 * An IDR is used to keep track of allocated minor numbers.
1027 *---------------------------------------------------------------*/
1028 static DEFINE_IDR(_minor_idr);
1030 static void free_minor(int minor)
1032 spin_lock(&_minor_lock);
1033 idr_remove(&_minor_idr, minor);
1034 spin_unlock(&_minor_lock);
1038 * See if the device with a specific minor # is free.
1040 static int specific_minor(int minor)
1044 if (minor >= (1 << MINORBITS))
1047 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1051 spin_lock(&_minor_lock);
1053 if (idr_find(&_minor_idr, minor)) {
1058 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
1063 idr_remove(&_minor_idr, m);
1069 spin_unlock(&_minor_lock);
1073 static int next_free_minor(int *minor)
1077 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1081 spin_lock(&_minor_lock);
1083 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1087 if (m >= (1 << MINORBITS)) {
1088 idr_remove(&_minor_idr, m);
1096 spin_unlock(&_minor_lock);
1100 static struct block_device_operations dm_blk_dops;
1102 static void dm_wq_work(struct work_struct *work);
1105 * Allocate and initialise a blank device with a given minor.
1107 static struct mapped_device *alloc_dev(int minor)
1110 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1114 DMWARN("unable to allocate device, out of memory.");
1118 if (!try_module_get(THIS_MODULE))
1119 goto bad_module_get;
1121 /* get a minor number for the dev */
1122 if (minor == DM_ANY_MINOR)
1123 r = next_free_minor(&minor);
1125 r = specific_minor(minor);
1129 init_rwsem(&md->io_lock);
1130 mutex_init(&md->suspend_lock);
1131 spin_lock_init(&md->deferred_lock);
1132 rwlock_init(&md->map_lock);
1133 atomic_set(&md->holders, 1);
1134 atomic_set(&md->open_count, 0);
1135 atomic_set(&md->event_nr, 0);
1136 atomic_set(&md->uevent_seq, 0);
1137 INIT_LIST_HEAD(&md->uevent_list);
1138 spin_lock_init(&md->uevent_lock);
1140 md->queue = blk_alloc_queue(GFP_KERNEL);
1144 md->queue->queuedata = md;
1145 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1146 md->queue->backing_dev_info.congested_data = md;
1147 blk_queue_make_request(md->queue, dm_request);
1148 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
1149 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1150 md->queue->unplug_fn = dm_unplug_all;
1151 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1153 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
1157 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1161 md->bs = bioset_create(16, 0);
1165 md->disk = alloc_disk(1);
1169 atomic_set(&md->pending, 0);
1170 init_waitqueue_head(&md->wait);
1171 INIT_WORK(&md->work, dm_wq_work);
1172 init_waitqueue_head(&md->eventq);
1174 md->disk->major = _major;
1175 md->disk->first_minor = minor;
1176 md->disk->fops = &dm_blk_dops;
1177 md->disk->queue = md->queue;
1178 md->disk->private_data = md;
1179 sprintf(md->disk->disk_name, "dm-%d", minor);
1181 format_dev_t(md->name, MKDEV(_major, minor));
1183 md->wq = create_singlethread_workqueue("kdmflush");
1187 md->bdev = bdget_disk(md->disk, 0);
1191 /* Populate the mapping, nobody knows we exist yet */
1192 spin_lock(&_minor_lock);
1193 old_md = idr_replace(&_minor_idr, md, minor);
1194 spin_unlock(&_minor_lock);
1196 BUG_ON(old_md != MINOR_ALLOCED);
1201 destroy_workqueue(md->wq);
1205 bioset_free(md->bs);
1207 mempool_destroy(md->tio_pool);
1209 mempool_destroy(md->io_pool);
1211 blk_cleanup_queue(md->queue);
1215 module_put(THIS_MODULE);
1221 static void unlock_fs(struct mapped_device *md);
1223 static void free_dev(struct mapped_device *md)
1225 int minor = MINOR(disk_devt(md->disk));
1229 destroy_workqueue(md->wq);
1230 mempool_destroy(md->tio_pool);
1231 mempool_destroy(md->io_pool);
1232 bioset_free(md->bs);
1233 blk_integrity_unregister(md->disk);
1234 del_gendisk(md->disk);
1237 spin_lock(&_minor_lock);
1238 md->disk->private_data = NULL;
1239 spin_unlock(&_minor_lock);
1242 blk_cleanup_queue(md->queue);
1243 module_put(THIS_MODULE);
1248 * Bind a table to the device.
1250 static void event_callback(void *context)
1252 unsigned long flags;
1254 struct mapped_device *md = (struct mapped_device *) context;
1256 spin_lock_irqsave(&md->uevent_lock, flags);
1257 list_splice_init(&md->uevent_list, &uevents);
1258 spin_unlock_irqrestore(&md->uevent_lock, flags);
1260 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1262 atomic_inc(&md->event_nr);
1263 wake_up(&md->eventq);
1266 static void __set_size(struct mapped_device *md, sector_t size)
1268 set_capacity(md->disk, size);
1270 mutex_lock(&md->bdev->bd_inode->i_mutex);
1271 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1272 mutex_unlock(&md->bdev->bd_inode->i_mutex);
1275 static int __bind(struct mapped_device *md, struct dm_table *t)
1277 struct request_queue *q = md->queue;
1280 size = dm_table_get_size(t);
1283 * Wipe any geometry if the size of the table changed.
1285 if (size != get_capacity(md->disk))
1286 memset(&md->geometry, 0, sizeof(md->geometry));
1288 __set_size(md, size);
1291 dm_table_destroy(t);
1295 dm_table_event_callback(t, event_callback, md);
1297 write_lock(&md->map_lock);
1299 dm_table_set_restrictions(t, q);
1300 write_unlock(&md->map_lock);
1305 static void __unbind(struct mapped_device *md)
1307 struct dm_table *map = md->map;
1312 dm_table_event_callback(map, NULL, NULL);
1313 write_lock(&md->map_lock);
1315 write_unlock(&md->map_lock);
1316 dm_table_destroy(map);
1320 * Constructor for a new device.
1322 int dm_create(int minor, struct mapped_device **result)
1324 struct mapped_device *md;
1326 md = alloc_dev(minor);
1336 static struct mapped_device *dm_find_md(dev_t dev)
1338 struct mapped_device *md;
1339 unsigned minor = MINOR(dev);
1341 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1344 spin_lock(&_minor_lock);
1346 md = idr_find(&_minor_idr, minor);
1347 if (md && (md == MINOR_ALLOCED ||
1348 (MINOR(disk_devt(dm_disk(md))) != minor) ||
1349 test_bit(DMF_FREEING, &md->flags))) {
1355 spin_unlock(&_minor_lock);
1360 struct mapped_device *dm_get_md(dev_t dev)
1362 struct mapped_device *md = dm_find_md(dev);
1370 void *dm_get_mdptr(struct mapped_device *md)
1372 return md->interface_ptr;
1375 void dm_set_mdptr(struct mapped_device *md, void *ptr)
1377 md->interface_ptr = ptr;
1380 void dm_get(struct mapped_device *md)
1382 atomic_inc(&md->holders);
1385 const char *dm_device_name(struct mapped_device *md)
1389 EXPORT_SYMBOL_GPL(dm_device_name);
1391 void dm_put(struct mapped_device *md)
1393 struct dm_table *map;
1395 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1397 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1398 map = dm_get_table(md);
1399 idr_replace(&_minor_idr, MINOR_ALLOCED,
1400 MINOR(disk_devt(dm_disk(md))));
1401 set_bit(DMF_FREEING, &md->flags);
1402 spin_unlock(&_minor_lock);
1403 if (!dm_suspended(md)) {
1404 dm_table_presuspend_targets(map);
1405 dm_table_postsuspend_targets(map);
1413 EXPORT_SYMBOL_GPL(dm_put);
1415 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
1418 DECLARE_WAITQUEUE(wait, current);
1420 dm_unplug_all(md->queue);
1422 add_wait_queue(&md->wait, &wait);
1425 set_current_state(interruptible);
1428 if (!atomic_read(&md->pending))
1431 if (interruptible == TASK_INTERRUPTIBLE &&
1432 signal_pending(current)) {
1439 set_current_state(TASK_RUNNING);
1441 remove_wait_queue(&md->wait, &wait);
1446 static void dm_flush(struct mapped_device *md)
1448 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
1451 static void process_barrier(struct mapped_device *md, struct bio *bio)
1453 md->barrier_error = 0;
1457 if (!bio_empty_barrier(bio)) {
1458 __split_and_process_bio(md, bio);
1462 if (md->barrier_error != DM_ENDIO_REQUEUE)
1463 bio_endio(bio, md->barrier_error);
1465 spin_lock_irq(&md->deferred_lock);
1466 bio_list_add_head(&md->deferred, bio);
1467 spin_unlock_irq(&md->deferred_lock);
1472 * Process the deferred bios
1474 static void dm_wq_work(struct work_struct *work)
1476 struct mapped_device *md = container_of(work, struct mapped_device,
1480 down_write(&md->io_lock);
1482 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1483 spin_lock_irq(&md->deferred_lock);
1484 c = bio_list_pop(&md->deferred);
1485 spin_unlock_irq(&md->deferred_lock);
1488 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
1492 up_write(&md->io_lock);
1495 process_barrier(md, c);
1497 __split_and_process_bio(md, c);
1499 down_write(&md->io_lock);
1502 up_write(&md->io_lock);
1505 static void dm_queue_flush(struct mapped_device *md)
1507 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1508 smp_mb__after_clear_bit();
1509 queue_work(md->wq, &md->work);
1513 * Swap in a new table (destroying old one).
1515 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1519 mutex_lock(&md->suspend_lock);
1521 /* device must be suspended */
1522 if (!dm_suspended(md))
1526 r = __bind(md, table);
1529 mutex_unlock(&md->suspend_lock);
1534 * Functions to lock and unlock any filesystem running on the
1537 static int lock_fs(struct mapped_device *md)
1541 WARN_ON(md->frozen_sb);
1543 md->frozen_sb = freeze_bdev(md->bdev);
1544 if (IS_ERR(md->frozen_sb)) {
1545 r = PTR_ERR(md->frozen_sb);
1546 md->frozen_sb = NULL;
1550 set_bit(DMF_FROZEN, &md->flags);
1555 static void unlock_fs(struct mapped_device *md)
1557 if (!test_bit(DMF_FROZEN, &md->flags))
1560 thaw_bdev(md->bdev, md->frozen_sb);
1561 md->frozen_sb = NULL;
1562 clear_bit(DMF_FROZEN, &md->flags);
1566 * We need to be able to change a mapping table under a mounted
1567 * filesystem. For example we might want to move some data in
1568 * the background. Before the table can be swapped with
1569 * dm_bind_table, dm_suspend must be called to flush any in
1570 * flight bios and ensure that any further io gets deferred.
1572 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1574 struct dm_table *map = NULL;
1576 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
1577 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1579 mutex_lock(&md->suspend_lock);
1581 if (dm_suspended(md)) {
1586 map = dm_get_table(md);
1589 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1590 * This flag is cleared before dm_suspend returns.
1593 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1595 /* This does not get reverted if there's an error later. */
1596 dm_table_presuspend_targets(map);
1599 * Flush I/O to the device. noflush supersedes do_lockfs,
1600 * because lock_fs() needs to flush I/Os.
1602 if (!noflush && do_lockfs) {
1609 * Here we must make sure that no processes are submitting requests
1610 * to target drivers i.e. no one may be executing
1611 * __split_and_process_bio. This is called from dm_request and
1614 * To get all processes out of __split_and_process_bio in dm_request,
1615 * we take the write lock. To prevent any process from reentering
1616 * __split_and_process_bio from dm_request, we set
1617 * DMF_QUEUE_IO_TO_THREAD.
1619 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1620 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1621 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1622 * further calls to __split_and_process_bio from dm_wq_work.
1624 down_write(&md->io_lock);
1625 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1626 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
1627 up_write(&md->io_lock);
1629 flush_workqueue(md->wq);
1632 * At this point no more requests are entering target request routines.
1633 * We call dm_wait_for_completion to wait for all existing requests
1636 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
1638 down_write(&md->io_lock);
1640 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1641 up_write(&md->io_lock);
1643 /* were we interrupted ? */
1648 goto out; /* pushback list is already flushed, so skip flush */
1652 * If dm_wait_for_completion returned 0, the device is completely
1653 * quiescent now. There is no request-processing activity. All new
1654 * requests are being added to md->deferred list.
1657 dm_table_postsuspend_targets(map);
1659 set_bit(DMF_SUSPENDED, &md->flags);
1665 mutex_unlock(&md->suspend_lock);
1669 int dm_resume(struct mapped_device *md)
1672 struct dm_table *map = NULL;
1674 mutex_lock(&md->suspend_lock);
1675 if (!dm_suspended(md))
1678 map = dm_get_table(md);
1679 if (!map || !dm_table_get_size(map))
1682 r = dm_table_resume_targets(map);
1690 clear_bit(DMF_SUSPENDED, &md->flags);
1692 dm_table_unplug_all(map);
1694 dm_kobject_uevent(md);
1700 mutex_unlock(&md->suspend_lock);
1705 /*-----------------------------------------------------------------
1706 * Event notification.
1707 *---------------------------------------------------------------*/
1708 void dm_kobject_uevent(struct mapped_device *md)
1710 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
1713 uint32_t dm_next_uevent_seq(struct mapped_device *md)
1715 return atomic_add_return(1, &md->uevent_seq);
1718 uint32_t dm_get_event_nr(struct mapped_device *md)
1720 return atomic_read(&md->event_nr);
1723 int dm_wait_event(struct mapped_device *md, int event_nr)
1725 return wait_event_interruptible(md->eventq,
1726 (event_nr != atomic_read(&md->event_nr)));
1729 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1731 unsigned long flags;
1733 spin_lock_irqsave(&md->uevent_lock, flags);
1734 list_add(elist, &md->uevent_list);
1735 spin_unlock_irqrestore(&md->uevent_lock, flags);
1739 * The gendisk is only valid as long as you have a reference
1742 struct gendisk *dm_disk(struct mapped_device *md)
1747 struct kobject *dm_kobject(struct mapped_device *md)
1753 * struct mapped_device should not be exported outside of dm.c
1754 * so use this check to verify that kobj is part of md structure
1756 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1758 struct mapped_device *md;
1760 md = container_of(kobj, struct mapped_device, kobj);
1761 if (&md->kobj != kobj)
1764 if (test_bit(DMF_FREEING, &md->flags) ||
1765 test_bit(DMF_DELETING, &md->flags))
1772 int dm_suspended(struct mapped_device *md)
1774 return test_bit(DMF_SUSPENDED, &md->flags);
1777 int dm_noflush_suspending(struct dm_target *ti)
1779 struct mapped_device *md = dm_table_get_md(ti->table);
1780 int r = __noflush_suspending(md);
1786 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1788 static struct block_device_operations dm_blk_dops = {
1789 .open = dm_blk_open,
1790 .release = dm_blk_close,
1791 .ioctl = dm_blk_ioctl,
1792 .getgeo = dm_blk_getgeo,
1793 .owner = THIS_MODULE
1796 EXPORT_SYMBOL(dm_get_mapinfo);
1801 module_init(dm_init);
1802 module_exit(dm_exit);
1804 module_param(major, uint, 0);
1805 MODULE_PARM_DESC(major, "The major number of the device mapper");
1806 MODULE_DESCRIPTION(DM_NAME " driver");
1807 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1808 MODULE_LICENSE("GPL");