2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-list.h"
10 #include "dm-uevent.h"
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/moduleparam.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/buffer_head.h>
19 #include <linux/mempool.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/hdreg.h>
23 #include <linux/blktrace_api.h>
24 #include <trace/block.h>
26 #define DM_MSG_PREFIX "core"
28 static const char *_name = DM_NAME;
30 static unsigned int major = 0;
31 static unsigned int _major = 0;
33 static DEFINE_SPINLOCK(_minor_lock);
36 * One of these is allocated per bio.
39 struct mapped_device *md;
43 unsigned long start_time;
48 * One of these is allocated per target within a bio. Hopefully
49 * this will be simplified out one day.
57 DEFINE_TRACE(block_bio_complete);
60 * For request-based dm.
61 * One of these is allocated per request.
63 struct dm_rq_target_io {
64 struct mapped_device *md;
66 struct request *orig, clone;
72 * For request-based dm.
73 * One of these is allocated per bio.
75 struct dm_rq_clone_bio_info {
80 union map_info *dm_get_mapinfo(struct bio *bio)
82 if (bio && bio->bi_private)
83 return &((struct dm_target_io *)bio->bi_private)->info;
87 #define MINOR_ALLOCED ((void *)-1)
90 * Bits for the md->flags field.
92 #define DMF_BLOCK_IO 0
93 #define DMF_SUSPENDED 1
96 #define DMF_DELETING 4
97 #define DMF_NOFLUSH_SUSPENDING 5
100 * Work processed by per-device workqueue.
103 struct work_struct work;
104 struct mapped_device *md;
108 struct mapped_device {
109 struct rw_semaphore io_lock;
110 struct mutex suspend_lock;
111 spinlock_t pushback_lock;
118 struct request_queue *queue;
119 struct gendisk *disk;
125 * A list of ios that arrived while we were suspended.
128 wait_queue_head_t wait;
129 struct bio_list deferred;
130 struct bio_list pushback;
133 * Processing queue (flush/barriers)
135 struct workqueue_struct *wq;
138 * The current mapping.
140 struct dm_table *map;
143 * io objects are allocated from here.
154 wait_queue_head_t eventq;
156 struct list_head uevent_list;
157 spinlock_t uevent_lock; /* Protect access to uevent_list */
160 * freeze/thaw support require holding onto a super block
162 struct super_block *frozen_sb;
163 struct block_device *suspended_bdev;
165 /* forced geometry settings */
166 struct hd_geometry geometry;
173 static struct kmem_cache *_io_cache;
174 static struct kmem_cache *_tio_cache;
175 static struct kmem_cache *_rq_tio_cache;
176 static struct kmem_cache *_rq_bio_info_cache;
178 static int __init local_init(void)
182 /* allocate a slab for the dm_ios */
183 _io_cache = KMEM_CACHE(dm_io, 0);
187 /* allocate a slab for the target ios */
188 _tio_cache = KMEM_CACHE(dm_target_io, 0);
190 goto out_free_io_cache;
192 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
194 goto out_free_tio_cache;
196 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
197 if (!_rq_bio_info_cache)
198 goto out_free_rq_tio_cache;
200 r = dm_uevent_init();
202 goto out_free_rq_bio_info_cache;
205 r = register_blkdev(_major, _name);
207 goto out_uevent_exit;
216 out_free_rq_bio_info_cache:
217 kmem_cache_destroy(_rq_bio_info_cache);
218 out_free_rq_tio_cache:
219 kmem_cache_destroy(_rq_tio_cache);
221 kmem_cache_destroy(_tio_cache);
223 kmem_cache_destroy(_io_cache);
228 static void local_exit(void)
230 kmem_cache_destroy(_rq_bio_info_cache);
231 kmem_cache_destroy(_rq_tio_cache);
232 kmem_cache_destroy(_tio_cache);
233 kmem_cache_destroy(_io_cache);
234 unregister_blkdev(_major, _name);
239 DMINFO("cleaned up");
242 static int (*_inits[])(void) __initdata = {
251 static void (*_exits[])(void) = {
260 static int __init dm_init(void)
262 const int count = ARRAY_SIZE(_inits);
266 for (i = 0; i < count; i++) {
281 static void __exit dm_exit(void)
283 int i = ARRAY_SIZE(_exits);
290 * Block device functions
292 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
294 struct mapped_device *md;
296 spin_lock(&_minor_lock);
298 md = bdev->bd_disk->private_data;
302 if (test_bit(DMF_FREEING, &md->flags) ||
303 test_bit(DMF_DELETING, &md->flags)) {
309 atomic_inc(&md->open_count);
312 spin_unlock(&_minor_lock);
314 return md ? 0 : -ENXIO;
317 static int dm_blk_close(struct gendisk *disk, fmode_t mode)
319 struct mapped_device *md = disk->private_data;
320 atomic_dec(&md->open_count);
325 int dm_open_count(struct mapped_device *md)
327 return atomic_read(&md->open_count);
331 * Guarantees nothing is using the device before it's deleted.
333 int dm_lock_for_deletion(struct mapped_device *md)
337 spin_lock(&_minor_lock);
339 if (dm_open_count(md))
342 set_bit(DMF_DELETING, &md->flags);
344 spin_unlock(&_minor_lock);
349 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
351 struct mapped_device *md = bdev->bd_disk->private_data;
353 return dm_get_geometry(md, geo);
356 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
357 unsigned int cmd, unsigned long arg)
359 struct mapped_device *md = bdev->bd_disk->private_data;
360 struct dm_table *map = dm_get_table(md);
361 struct dm_target *tgt;
364 if (!map || !dm_table_get_size(map))
367 /* We only support devices that have a single target */
368 if (dm_table_get_num_targets(map) != 1)
371 tgt = dm_table_get_target(map, 0);
373 if (dm_suspended(md)) {
378 if (tgt->type->ioctl)
379 r = tgt->type->ioctl(tgt, cmd, arg);
387 static struct dm_io *alloc_io(struct mapped_device *md)
389 return mempool_alloc(md->io_pool, GFP_NOIO);
392 static void free_io(struct mapped_device *md, struct dm_io *io)
394 mempool_free(io, md->io_pool);
397 static struct dm_target_io *alloc_tio(struct mapped_device *md)
399 return mempool_alloc(md->tio_pool, GFP_NOIO);
402 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
404 mempool_free(tio, md->tio_pool);
407 static void start_io_acct(struct dm_io *io)
409 struct mapped_device *md = io->md;
412 io->start_time = jiffies;
414 cpu = part_stat_lock();
415 part_round_stats(cpu, &dm_disk(md)->part0);
417 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
420 static void end_io_acct(struct dm_io *io)
422 struct mapped_device *md = io->md;
423 struct bio *bio = io->bio;
424 unsigned long duration = jiffies - io->start_time;
426 int rw = bio_data_dir(bio);
428 cpu = part_stat_lock();
429 part_round_stats(cpu, &dm_disk(md)->part0);
430 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
433 dm_disk(md)->part0.in_flight = pending =
434 atomic_dec_return(&md->pending);
436 /* nudge anyone waiting on suspend queue */
442 * Add the bio to the list of deferred io.
444 static int queue_io(struct mapped_device *md, struct bio *bio)
446 down_write(&md->io_lock);
448 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
449 up_write(&md->io_lock);
453 bio_list_add(&md->deferred, bio);
455 up_write(&md->io_lock);
456 return 0; /* deferred successfully */
460 * Everyone (including functions in this file), should use this
461 * function to access the md->map field, and make sure they call
462 * dm_table_put() when finished.
464 struct dm_table *dm_get_table(struct mapped_device *md)
468 read_lock(&md->map_lock);
472 read_unlock(&md->map_lock);
478 * Get the geometry associated with a dm device
480 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
488 * Set the geometry of a device.
490 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
492 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
494 if (geo->start > sz) {
495 DMWARN("Start sector is beyond the geometry limits.");
504 /*-----------------------------------------------------------------
506 * A more elegant soln is in the works that uses the queue
507 * merge fn, unfortunately there are a couple of changes to
508 * the block layer that I want to make for this. So in the
509 * interests of getting something for people to use I give
510 * you this clearly demarcated crap.
511 *---------------------------------------------------------------*/
513 static int __noflush_suspending(struct mapped_device *md)
515 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
519 * Decrements the number of outstanding ios that a bio has been
520 * cloned into, completing the original io if necc.
522 static void dec_pending(struct dm_io *io, int error)
527 struct mapped_device *md = io->md;
529 /* Push-back supersedes any I/O errors */
530 if (error && !(io->error > 0 && __noflush_suspending(md)))
533 if (atomic_dec_and_test(&io->io_count)) {
534 if (io->error == DM_ENDIO_REQUEUE) {
536 * Target requested pushing back the I/O.
537 * This must be handled before the sleeper on
538 * suspend queue merges the pushback list.
540 spin_lock_irqsave(&md->pushback_lock, flags);
541 if (__noflush_suspending(md))
542 bio_list_add(&md->pushback, io->bio);
544 /* noflush suspend was interrupted. */
546 spin_unlock_irqrestore(&md->pushback_lock, flags);
551 io_error = io->error;
556 if (io_error != DM_ENDIO_REQUEUE) {
557 trace_block_bio_complete(md->queue, bio);
559 bio_endio(bio, io_error);
564 static void clone_endio(struct bio *bio, int error)
567 struct dm_target_io *tio = bio->bi_private;
568 struct dm_io *io = tio->io;
569 struct mapped_device *md = tio->io->md;
570 dm_endio_fn endio = tio->ti->type->end_io;
572 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
576 r = endio(tio->ti, bio, error, &tio->info);
577 if (r < 0 || r == DM_ENDIO_REQUEUE)
579 * error and requeue request are handled
583 else if (r == DM_ENDIO_INCOMPLETE)
584 /* The target will handle the io */
587 DMWARN("unimplemented target endio return value: %d", r);
593 * Store md for cleanup instead of tio which is about to get freed.
595 bio->bi_private = md->bs;
599 dec_pending(io, error);
602 static sector_t max_io_len(struct mapped_device *md,
603 sector_t sector, struct dm_target *ti)
605 sector_t offset = sector - ti->begin;
606 sector_t len = ti->len - offset;
609 * Does the target need to split even further ?
613 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
622 static void __map_bio(struct dm_target *ti, struct bio *clone,
623 struct dm_target_io *tio)
627 struct mapped_device *md;
632 BUG_ON(!clone->bi_size);
634 clone->bi_end_io = clone_endio;
635 clone->bi_private = tio;
638 * Map the clone. If r == 0 we don't need to do
639 * anything, the target has assumed ownership of
642 atomic_inc(&tio->io->io_count);
643 sector = clone->bi_sector;
644 r = ti->type->map(ti, clone, &tio->info);
645 if (r == DM_MAPIO_REMAPPED) {
646 /* the bio has been remapped so dispatch it */
648 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
649 tio->io->bio->bi_bdev->bd_dev,
650 clone->bi_sector, sector);
652 generic_make_request(clone);
653 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
654 /* error the io and bail out, or requeue it if needed */
656 dec_pending(tio->io, r);
658 * Store bio_set for cleanup.
660 clone->bi_private = md->bs;
664 DMWARN("unimplemented target map return value: %d", r);
670 struct mapped_device *md;
671 struct dm_table *map;
675 sector_t sector_count;
679 static void dm_bio_destructor(struct bio *bio)
681 struct bio_set *bs = bio->bi_private;
687 * Creates a little bio that is just does part of a bvec.
689 static struct bio *split_bvec(struct bio *bio, sector_t sector,
690 unsigned short idx, unsigned int offset,
691 unsigned int len, struct bio_set *bs)
694 struct bio_vec *bv = bio->bi_io_vec + idx;
696 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
697 clone->bi_destructor = dm_bio_destructor;
698 *clone->bi_io_vec = *bv;
700 clone->bi_sector = sector;
701 clone->bi_bdev = bio->bi_bdev;
702 clone->bi_rw = bio->bi_rw;
704 clone->bi_size = to_bytes(len);
705 clone->bi_io_vec->bv_offset = offset;
706 clone->bi_io_vec->bv_len = clone->bi_size;
707 clone->bi_flags |= 1 << BIO_CLONED;
713 * Creates a bio that consists of range of complete bvecs.
715 static struct bio *clone_bio(struct bio *bio, sector_t sector,
716 unsigned short idx, unsigned short bv_count,
717 unsigned int len, struct bio_set *bs)
721 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
722 __bio_clone(clone, bio);
723 clone->bi_destructor = dm_bio_destructor;
724 clone->bi_sector = sector;
726 clone->bi_vcnt = idx + bv_count;
727 clone->bi_size = to_bytes(len);
728 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
733 static int __clone_and_map(struct clone_info *ci)
735 struct bio *clone, *bio = ci->bio;
736 struct dm_target *ti;
737 sector_t len = 0, max;
738 struct dm_target_io *tio;
740 ti = dm_table_find_target(ci->map, ci->sector);
741 if (!dm_target_is_valid(ti))
744 max = max_io_len(ci->md, ci->sector, ti);
747 * Allocate a target io object.
749 tio = alloc_tio(ci->md);
752 memset(&tio->info, 0, sizeof(tio->info));
754 if (ci->sector_count <= max) {
756 * Optimise for the simple case where we can do all of
757 * the remaining io with a single clone.
759 clone = clone_bio(bio, ci->sector, ci->idx,
760 bio->bi_vcnt - ci->idx, ci->sector_count,
762 __map_bio(ti, clone, tio);
763 ci->sector_count = 0;
765 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
767 * There are some bvecs that don't span targets.
768 * Do as many of these as possible.
771 sector_t remaining = max;
774 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
775 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
777 if (bv_len > remaining)
784 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
786 __map_bio(ti, clone, tio);
789 ci->sector_count -= len;
794 * Handle a bvec that must be split between two or more targets.
796 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
797 sector_t remaining = to_sector(bv->bv_len);
798 unsigned int offset = 0;
802 ti = dm_table_find_target(ci->map, ci->sector);
803 if (!dm_target_is_valid(ti))
806 max = max_io_len(ci->md, ci->sector, ti);
808 tio = alloc_tio(ci->md);
811 memset(&tio->info, 0, sizeof(tio->info));
814 len = min(remaining, max);
816 clone = split_bvec(bio, ci->sector, ci->idx,
817 bv->bv_offset + offset, len,
820 __map_bio(ti, clone, tio);
823 ci->sector_count -= len;
824 offset += to_bytes(len);
825 } while (remaining -= len);
834 * Split the bio into several clones.
836 static int __split_bio(struct mapped_device *md, struct bio *bio)
838 struct clone_info ci;
841 ci.map = dm_get_table(md);
842 if (unlikely(!ci.map))
844 if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) {
845 dm_table_put(ci.map);
846 bio_endio(bio, -EOPNOTSUPP);
851 ci.io = alloc_io(md);
853 atomic_set(&ci.io->io_count, 1);
856 ci.sector = bio->bi_sector;
857 ci.sector_count = bio_sectors(bio);
858 ci.idx = bio->bi_idx;
860 start_io_acct(ci.io);
861 while (ci.sector_count && !error)
862 error = __clone_and_map(&ci);
864 /* drop the extra reference count */
865 dec_pending(ci.io, error);
866 dm_table_put(ci.map);
870 /*-----------------------------------------------------------------
872 *---------------------------------------------------------------*/
874 static int dm_merge_bvec(struct request_queue *q,
875 struct bvec_merge_data *bvm,
876 struct bio_vec *biovec)
878 struct mapped_device *md = q->queuedata;
879 struct dm_table *map = dm_get_table(md);
880 struct dm_target *ti;
881 sector_t max_sectors;
887 ti = dm_table_find_target(map, bvm->bi_sector);
888 if (!dm_target_is_valid(ti))
892 * Find maximum amount of I/O that won't need splitting
894 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
895 (sector_t) BIO_MAX_SECTORS);
896 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
901 * merge_bvec_fn() returns number of bytes
902 * it can accept at this offset
903 * max is precomputed maximal io size
905 if (max_size && ti->type->merge)
906 max_size = ti->type->merge(ti, bvm, biovec, max_size);
913 * Always allow an entire first page
915 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
916 max_size = biovec->bv_len;
922 * The request function that just remaps the bio built up by
925 static int dm_request(struct request_queue *q, struct bio *bio)
928 int rw = bio_data_dir(bio);
929 struct mapped_device *md = q->queuedata;
932 down_read(&md->io_lock);
934 cpu = part_stat_lock();
935 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
936 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
940 * If we're suspended we have to queue
943 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
944 up_read(&md->io_lock);
946 if (bio_rw(bio) != READA)
947 r = queue_io(md, bio);
953 * We're in a while loop, because someone could suspend
954 * before we get to the following read lock.
956 down_read(&md->io_lock);
959 r = __split_bio(md, bio);
960 up_read(&md->io_lock);
969 static void dm_unplug_all(struct request_queue *q)
971 struct mapped_device *md = q->queuedata;
972 struct dm_table *map = dm_get_table(md);
975 dm_table_unplug_all(map);
980 static int dm_any_congested(void *congested_data, int bdi_bits)
983 struct mapped_device *md = congested_data;
984 struct dm_table *map;
986 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
987 map = dm_get_table(md);
989 r = dm_table_any_congested(map, bdi_bits);
997 /*-----------------------------------------------------------------
998 * An IDR is used to keep track of allocated minor numbers.
999 *---------------------------------------------------------------*/
1000 static DEFINE_IDR(_minor_idr);
1002 static void free_minor(int minor)
1004 spin_lock(&_minor_lock);
1005 idr_remove(&_minor_idr, minor);
1006 spin_unlock(&_minor_lock);
1010 * See if the device with a specific minor # is free.
1012 static int specific_minor(int minor)
1016 if (minor >= (1 << MINORBITS))
1019 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1023 spin_lock(&_minor_lock);
1025 if (idr_find(&_minor_idr, minor)) {
1030 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
1035 idr_remove(&_minor_idr, m);
1041 spin_unlock(&_minor_lock);
1045 static int next_free_minor(int *minor)
1049 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1053 spin_lock(&_minor_lock);
1055 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1059 if (m >= (1 << MINORBITS)) {
1060 idr_remove(&_minor_idr, m);
1068 spin_unlock(&_minor_lock);
1072 static struct block_device_operations dm_blk_dops;
1075 * Allocate and initialise a blank device with a given minor.
1077 static struct mapped_device *alloc_dev(int minor)
1080 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1084 DMWARN("unable to allocate device, out of memory.");
1088 if (!try_module_get(THIS_MODULE))
1089 goto bad_module_get;
1091 /* get a minor number for the dev */
1092 if (minor == DM_ANY_MINOR)
1093 r = next_free_minor(&minor);
1095 r = specific_minor(minor);
1099 init_rwsem(&md->io_lock);
1100 mutex_init(&md->suspend_lock);
1101 spin_lock_init(&md->pushback_lock);
1102 rwlock_init(&md->map_lock);
1103 atomic_set(&md->holders, 1);
1104 atomic_set(&md->open_count, 0);
1105 atomic_set(&md->event_nr, 0);
1106 atomic_set(&md->uevent_seq, 0);
1107 INIT_LIST_HEAD(&md->uevent_list);
1108 spin_lock_init(&md->uevent_lock);
1110 md->queue = blk_alloc_queue(GFP_KERNEL);
1114 md->queue->queuedata = md;
1115 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1116 md->queue->backing_dev_info.congested_data = md;
1117 blk_queue_make_request(md->queue, dm_request);
1118 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1119 md->queue->unplug_fn = dm_unplug_all;
1120 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1122 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
1126 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1130 md->bs = bioset_create(16, 0);
1134 md->disk = alloc_disk(1);
1138 atomic_set(&md->pending, 0);
1139 init_waitqueue_head(&md->wait);
1140 init_waitqueue_head(&md->eventq);
1142 md->disk->major = _major;
1143 md->disk->first_minor = minor;
1144 md->disk->fops = &dm_blk_dops;
1145 md->disk->queue = md->queue;
1146 md->disk->private_data = md;
1147 sprintf(md->disk->disk_name, "dm-%d", minor);
1149 format_dev_t(md->name, MKDEV(_major, minor));
1151 md->wq = create_singlethread_workqueue("kdmflush");
1155 /* Populate the mapping, nobody knows we exist yet */
1156 spin_lock(&_minor_lock);
1157 old_md = idr_replace(&_minor_idr, md, minor);
1158 spin_unlock(&_minor_lock);
1160 BUG_ON(old_md != MINOR_ALLOCED);
1167 bioset_free(md->bs);
1169 mempool_destroy(md->tio_pool);
1171 mempool_destroy(md->io_pool);
1173 blk_cleanup_queue(md->queue);
1177 module_put(THIS_MODULE);
1183 static void unlock_fs(struct mapped_device *md);
1185 static void free_dev(struct mapped_device *md)
1187 int minor = MINOR(disk_devt(md->disk));
1189 if (md->suspended_bdev) {
1191 bdput(md->suspended_bdev);
1193 destroy_workqueue(md->wq);
1194 mempool_destroy(md->tio_pool);
1195 mempool_destroy(md->io_pool);
1196 bioset_free(md->bs);
1197 del_gendisk(md->disk);
1200 spin_lock(&_minor_lock);
1201 md->disk->private_data = NULL;
1202 spin_unlock(&_minor_lock);
1205 blk_cleanup_queue(md->queue);
1206 module_put(THIS_MODULE);
1211 * Bind a table to the device.
1213 static void event_callback(void *context)
1215 unsigned long flags;
1217 struct mapped_device *md = (struct mapped_device *) context;
1219 spin_lock_irqsave(&md->uevent_lock, flags);
1220 list_splice_init(&md->uevent_list, &uevents);
1221 spin_unlock_irqrestore(&md->uevent_lock, flags);
1223 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1225 atomic_inc(&md->event_nr);
1226 wake_up(&md->eventq);
1229 static void __set_size(struct mapped_device *md, sector_t size)
1231 set_capacity(md->disk, size);
1233 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
1234 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1235 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
1238 static int __bind(struct mapped_device *md, struct dm_table *t)
1240 struct request_queue *q = md->queue;
1243 size = dm_table_get_size(t);
1246 * Wipe any geometry if the size of the table changed.
1248 if (size != get_capacity(md->disk))
1249 memset(&md->geometry, 0, sizeof(md->geometry));
1251 if (md->suspended_bdev)
1252 __set_size(md, size);
1255 dm_table_destroy(t);
1259 dm_table_event_callback(t, event_callback, md);
1261 write_lock(&md->map_lock);
1263 dm_table_set_restrictions(t, q);
1264 write_unlock(&md->map_lock);
1269 static void __unbind(struct mapped_device *md)
1271 struct dm_table *map = md->map;
1276 dm_table_event_callback(map, NULL, NULL);
1277 write_lock(&md->map_lock);
1279 write_unlock(&md->map_lock);
1280 dm_table_destroy(map);
1284 * Constructor for a new device.
1286 int dm_create(int minor, struct mapped_device **result)
1288 struct mapped_device *md;
1290 md = alloc_dev(minor);
1300 static struct mapped_device *dm_find_md(dev_t dev)
1302 struct mapped_device *md;
1303 unsigned minor = MINOR(dev);
1305 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1308 spin_lock(&_minor_lock);
1310 md = idr_find(&_minor_idr, minor);
1311 if (md && (md == MINOR_ALLOCED ||
1312 (MINOR(disk_devt(dm_disk(md))) != minor) ||
1313 test_bit(DMF_FREEING, &md->flags))) {
1319 spin_unlock(&_minor_lock);
1324 struct mapped_device *dm_get_md(dev_t dev)
1326 struct mapped_device *md = dm_find_md(dev);
1334 void *dm_get_mdptr(struct mapped_device *md)
1336 return md->interface_ptr;
1339 void dm_set_mdptr(struct mapped_device *md, void *ptr)
1341 md->interface_ptr = ptr;
1344 void dm_get(struct mapped_device *md)
1346 atomic_inc(&md->holders);
1349 const char *dm_device_name(struct mapped_device *md)
1353 EXPORT_SYMBOL_GPL(dm_device_name);
1355 void dm_put(struct mapped_device *md)
1357 struct dm_table *map;
1359 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1361 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1362 map = dm_get_table(md);
1363 idr_replace(&_minor_idr, MINOR_ALLOCED,
1364 MINOR(disk_devt(dm_disk(md))));
1365 set_bit(DMF_FREEING, &md->flags);
1366 spin_unlock(&_minor_lock);
1367 if (!dm_suspended(md)) {
1368 dm_table_presuspend_targets(map);
1369 dm_table_postsuspend_targets(map);
1377 EXPORT_SYMBOL_GPL(dm_put);
1379 static int dm_wait_for_completion(struct mapped_device *md)
1384 set_current_state(TASK_INTERRUPTIBLE);
1387 if (!atomic_read(&md->pending))
1390 if (signal_pending(current)) {
1397 set_current_state(TASK_RUNNING);
1403 * Process the deferred bios
1405 static void __flush_deferred_io(struct mapped_device *md)
1409 while ((c = bio_list_pop(&md->deferred))) {
1410 if (__split_bio(md, c))
1414 clear_bit(DMF_BLOCK_IO, &md->flags);
1417 static void __merge_pushback_list(struct mapped_device *md)
1419 unsigned long flags;
1421 spin_lock_irqsave(&md->pushback_lock, flags);
1422 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1423 bio_list_merge_head(&md->deferred, &md->pushback);
1424 bio_list_init(&md->pushback);
1425 spin_unlock_irqrestore(&md->pushback_lock, flags);
1428 static void dm_wq_work(struct work_struct *work)
1430 struct dm_wq_req *req = container_of(work, struct dm_wq_req, work);
1431 struct mapped_device *md = req->md;
1433 down_write(&md->io_lock);
1434 __flush_deferred_io(md);
1435 up_write(&md->io_lock);
1438 static void dm_wq_queue(struct mapped_device *md, void *context,
1439 struct dm_wq_req *req)
1442 req->context = context;
1443 INIT_WORK(&req->work, dm_wq_work);
1444 queue_work(md->wq, &req->work);
1447 static void dm_queue_flush(struct mapped_device *md, void *context)
1449 struct dm_wq_req req;
1451 dm_wq_queue(md, context, &req);
1452 flush_workqueue(md->wq);
1456 * Swap in a new table (destroying old one).
1458 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1462 mutex_lock(&md->suspend_lock);
1464 /* device must be suspended */
1465 if (!dm_suspended(md))
1468 /* without bdev, the device size cannot be changed */
1469 if (!md->suspended_bdev)
1470 if (get_capacity(md->disk) != dm_table_get_size(table))
1474 r = __bind(md, table);
1477 mutex_unlock(&md->suspend_lock);
1482 * Functions to lock and unlock any filesystem running on the
1485 static int lock_fs(struct mapped_device *md)
1489 WARN_ON(md->frozen_sb);
1491 md->frozen_sb = freeze_bdev(md->suspended_bdev);
1492 if (IS_ERR(md->frozen_sb)) {
1493 r = PTR_ERR(md->frozen_sb);
1494 md->frozen_sb = NULL;
1498 set_bit(DMF_FROZEN, &md->flags);
1500 /* don't bdput right now, we don't want the bdev
1501 * to go away while it is locked.
1506 static void unlock_fs(struct mapped_device *md)
1508 if (!test_bit(DMF_FROZEN, &md->flags))
1511 thaw_bdev(md->suspended_bdev, md->frozen_sb);
1512 md->frozen_sb = NULL;
1513 clear_bit(DMF_FROZEN, &md->flags);
1517 * We need to be able to change a mapping table under a mounted
1518 * filesystem. For example we might want to move some data in
1519 * the background. Before the table can be swapped with
1520 * dm_bind_table, dm_suspend must be called to flush any in
1521 * flight bios and ensure that any further io gets deferred.
1523 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1525 struct dm_table *map = NULL;
1526 DECLARE_WAITQUEUE(wait, current);
1528 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
1529 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1531 mutex_lock(&md->suspend_lock);
1533 if (dm_suspended(md)) {
1538 map = dm_get_table(md);
1541 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1542 * This flag is cleared before dm_suspend returns.
1545 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1547 /* This does not get reverted if there's an error later. */
1548 dm_table_presuspend_targets(map);
1550 /* bdget() can stall if the pending I/Os are not flushed */
1552 md->suspended_bdev = bdget_disk(md->disk, 0);
1553 if (!md->suspended_bdev) {
1554 DMWARN("bdget failed in dm_suspend");
1560 * Flush I/O to the device. noflush supersedes do_lockfs,
1561 * because lock_fs() needs to flush I/Os.
1571 * First we set the BLOCK_IO flag so no more ios will be mapped.
1573 down_write(&md->io_lock);
1574 set_bit(DMF_BLOCK_IO, &md->flags);
1576 add_wait_queue(&md->wait, &wait);
1577 up_write(&md->io_lock);
1581 dm_table_unplug_all(map);
1584 * Wait for the already-mapped ios to complete.
1586 r = dm_wait_for_completion(md);
1588 down_write(&md->io_lock);
1589 remove_wait_queue(&md->wait, &wait);
1592 __merge_pushback_list(md);
1593 up_write(&md->io_lock);
1595 /* were we interrupted ? */
1597 dm_queue_flush(md, NULL);
1600 goto out; /* pushback list is already flushed, so skip flush */
1603 dm_table_postsuspend_targets(map);
1605 set_bit(DMF_SUSPENDED, &md->flags);
1608 if (r && md->suspended_bdev) {
1609 bdput(md->suspended_bdev);
1610 md->suspended_bdev = NULL;
1616 mutex_unlock(&md->suspend_lock);
1620 int dm_resume(struct mapped_device *md)
1623 struct dm_table *map = NULL;
1625 mutex_lock(&md->suspend_lock);
1626 if (!dm_suspended(md))
1629 map = dm_get_table(md);
1630 if (!map || !dm_table_get_size(map))
1633 r = dm_table_resume_targets(map);
1637 dm_queue_flush(md, NULL);
1641 if (md->suspended_bdev) {
1642 bdput(md->suspended_bdev);
1643 md->suspended_bdev = NULL;
1646 clear_bit(DMF_SUSPENDED, &md->flags);
1648 dm_table_unplug_all(map);
1650 dm_kobject_uevent(md);
1656 mutex_unlock(&md->suspend_lock);
1661 /*-----------------------------------------------------------------
1662 * Event notification.
1663 *---------------------------------------------------------------*/
1664 void dm_kobject_uevent(struct mapped_device *md)
1666 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
1669 uint32_t dm_next_uevent_seq(struct mapped_device *md)
1671 return atomic_add_return(1, &md->uevent_seq);
1674 uint32_t dm_get_event_nr(struct mapped_device *md)
1676 return atomic_read(&md->event_nr);
1679 int dm_wait_event(struct mapped_device *md, int event_nr)
1681 return wait_event_interruptible(md->eventq,
1682 (event_nr != atomic_read(&md->event_nr)));
1685 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1687 unsigned long flags;
1689 spin_lock_irqsave(&md->uevent_lock, flags);
1690 list_add(elist, &md->uevent_list);
1691 spin_unlock_irqrestore(&md->uevent_lock, flags);
1695 * The gendisk is only valid as long as you have a reference
1698 struct gendisk *dm_disk(struct mapped_device *md)
1703 struct kobject *dm_kobject(struct mapped_device *md)
1709 * struct mapped_device should not be exported outside of dm.c
1710 * so use this check to verify that kobj is part of md structure
1712 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1714 struct mapped_device *md;
1716 md = container_of(kobj, struct mapped_device, kobj);
1717 if (&md->kobj != kobj)
1724 int dm_suspended(struct mapped_device *md)
1726 return test_bit(DMF_SUSPENDED, &md->flags);
1729 int dm_noflush_suspending(struct dm_target *ti)
1731 struct mapped_device *md = dm_table_get_md(ti->table);
1732 int r = __noflush_suspending(md);
1738 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1740 static struct block_device_operations dm_blk_dops = {
1741 .open = dm_blk_open,
1742 .release = dm_blk_close,
1743 .ioctl = dm_blk_ioctl,
1744 .getgeo = dm_blk_getgeo,
1745 .owner = THIS_MODULE
1748 EXPORT_SYMBOL(dm_get_mapinfo);
1753 module_init(dm_init);
1754 module_exit(dm_exit);
1756 module_param(major, uint, 0);
1757 MODULE_PARM_DESC(major, "The major number of the device mapper");
1758 MODULE_DESCRIPTION(DM_NAME " driver");
1759 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1760 MODULE_LICENSE("GPL");