#include <linux/idr.h>
#include <linux/hdreg.h>
#include <linux/blktrace_api.h>
-#include <linux/smp_lock.h>
#define DM_MSG_PREFIX "core"
struct dm_io {
struct mapped_device *md;
int error;
- struct bio *bio;
atomic_t io_count;
+ struct bio *bio;
unsigned long start_time;
};
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
+/*
+ * Work processed by per-device workqueue.
+ */
+struct dm_wq_req {
+ enum {
+ DM_WQ_FLUSH_DEFERRED,
+ } type;
+ struct work_struct work;
+ struct mapped_device *md;
+ void *context;
+};
+
struct mapped_device {
struct rw_semaphore io_lock;
- struct semaphore suspend_lock;
+ struct mutex suspend_lock;
spinlock_t pushback_lock;
rwlock_t map_lock;
atomic_t holders;
struct bio_list pushback;
/*
+ * Processing queue (flush/barriers)
+ */
+ struct workqueue_struct *wq;
+
+ /*
* The current mapping.
*/
struct dm_table *map;
static int __init local_init(void)
{
- int r;
+ int r = -ENOMEM;
/* allocate a slab for the dm_ios */
_io_cache = KMEM_CACHE(dm_io, 0);
if (!_io_cache)
- return -ENOMEM;
+ return r;
/* allocate a slab for the target ios */
_tio_cache = KMEM_CACHE(dm_target_io, 0);
- if (!_tio_cache) {
- kmem_cache_destroy(_io_cache);
- return -ENOMEM;
- }
+ if (!_tio_cache)
+ goto out_free_io_cache;
r = dm_uevent_init();
- if (r) {
- kmem_cache_destroy(_tio_cache);
- kmem_cache_destroy(_io_cache);
- return r;
- }
+ if (r)
+ goto out_free_tio_cache;
_major = major;
r = register_blkdev(_major, _name);
- if (r < 0) {
- kmem_cache_destroy(_tio_cache);
- kmem_cache_destroy(_io_cache);
- dm_uevent_exit();
- return r;
- }
+ if (r < 0)
+ goto out_uevent_exit;
if (!_major)
_major = r;
return 0;
+
+out_uevent_exit:
+ dm_uevent_exit();
+out_free_tio_cache:
+ kmem_cache_destroy(_tio_cache);
+out_free_io_cache:
+ kmem_cache_destroy(_io_cache);
+
+ return r;
}
static void local_exit(void)
dm_target_init,
dm_linear_init,
dm_stripe_init,
+ dm_kcopyd_init,
dm_interface_init,
};
dm_target_exit,
dm_linear_exit,
dm_stripe_exit,
+ dm_kcopyd_exit,
dm_interface_exit,
};
/*
* Block device functions
*/
-static int dm_blk_open(struct inode *inode, struct file *file)
+static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
spin_lock(&_minor_lock);
- md = inode->i_bdev->bd_disk->private_data;
+ md = bdev->bd_disk->private_data;
if (!md)
goto out;
return md ? 0 : -ENXIO;
}
-static int dm_blk_close(struct inode *inode, struct file *file)
+static int dm_blk_close(struct gendisk *disk, fmode_t mode)
{
- struct mapped_device *md;
-
- md = inode->i_bdev->bd_disk->private_data;
+ struct mapped_device *md = disk->private_data;
atomic_dec(&md->open_count);
dm_put(md);
return 0;
return dm_get_geometry(md, geo);
}
-static int dm_blk_ioctl(struct inode *inode, struct file *file,
+static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct mapped_device *md;
- struct dm_table *map;
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ struct dm_table *map = dm_get_table(md);
struct dm_target *tgt;
int r = -ENOTTY;
- /* We don't really need this lock, but we do need 'inode'. */
- unlock_kernel();
-
- md = inode->i_bdev->bd_disk->private_data;
-
- map = dm_get_table(md);
-
if (!map || !dm_table_get_size(map))
goto out;
}
if (tgt->type->ioctl)
- r = tgt->type->ioctl(tgt, inode, file, cmd, arg);
+ r = tgt->type->ioctl(tgt, cmd, arg);
out:
dm_table_put(map);
- lock_kernel();
return r;
}
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
+ int cpu;
io->start_time = jiffies;
- preempt_disable();
- disk_round_stats(dm_disk(md));
- preempt_enable();
- dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
+ cpu = part_stat_lock();
+ part_round_stats(cpu, &dm_disk(md)->part0);
+ part_stat_unlock();
+ dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
}
static int end_io_acct(struct dm_io *io)
struct mapped_device *md = io->md;
struct bio *bio = io->bio;
unsigned long duration = jiffies - io->start_time;
- int pending;
+ int pending, cpu;
int rw = bio_data_dir(bio);
- preempt_disable();
- disk_round_stats(dm_disk(md));
- preempt_enable();
- dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
+ cpu = part_stat_lock();
+ part_round_stats(cpu, &dm_disk(md)->part0);
+ part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
+ part_stat_unlock();
- disk_stat_add(dm_disk(md), ticks[rw], duration);
+ dm_disk(md)->part0.in_flight = pending =
+ atomic_dec_return(&md->pending);
return !pending;
}
clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
clone->bi_io_vec->bv_len = clone->bi_size;
+ clone->bi_flags |= 1 << BIO_CLONED;
return clone;
}
* CRUD END
*---------------------------------------------------------------*/
+static int dm_merge_bvec(struct request_queue *q,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_table *map = dm_get_table(md);
+ struct dm_target *ti;
+ sector_t max_sectors;
+ int max_size = 0;
+
+ if (unlikely(!map))
+ goto out;
+
+ ti = dm_table_find_target(map, bvm->bi_sector);
+ if (!dm_target_is_valid(ti))
+ goto out_table;
+
+ /*
+ * Find maximum amount of I/O that won't need splitting
+ */
+ max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
+ (sector_t) BIO_MAX_SECTORS);
+ max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
+ if (max_size < 0)
+ max_size = 0;
+
+ /*
+ * merge_bvec_fn() returns number of bytes
+ * it can accept at this offset
+ * max is precomputed maximal io size
+ */
+ if (max_size && ti->type->merge)
+ max_size = ti->type->merge(ti, bvm, biovec, max_size);
+
+out_table:
+ dm_table_put(map);
+
+out:
+ /*
+ * Always allow an entire first page
+ */
+ if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
+ max_size = biovec->bv_len;
+
+ return max_size;
+}
+
/*
* The request function that just remaps the bio built up by
* dm_merge_bvec.
int r = -EIO;
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
+ int cpu;
/*
* There is no use in forwarding any barrier request since we can't
down_read(&md->io_lock);
- disk_stat_inc(dm_disk(md), ios[rw]);
- disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
+ part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
+ part_stat_unlock();
/*
* If we're suspended we have to queue
/*
* See if the device with a specific minor # is free.
*/
-static int specific_minor(struct mapped_device *md, int minor)
+static int specific_minor(int minor)
{
int r, m;
return r;
}
-static int next_free_minor(struct mapped_device *md, int *minor)
+static int next_free_minor(int *minor)
{
int r, m;
spin_lock(&_minor_lock);
r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
- if (r) {
+ if (r)
goto out;
- }
if (m >= (1 << MINORBITS)) {
idr_remove(&_minor_idr, m);
static struct mapped_device *alloc_dev(int minor)
{
int r;
- struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
+ struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
void *old_md;
if (!md) {
}
if (!try_module_get(THIS_MODULE))
- goto bad0;
+ goto bad_module_get;
/* get a minor number for the dev */
if (minor == DM_ANY_MINOR)
- r = next_free_minor(md, &minor);
+ r = next_free_minor(&minor);
else
- r = specific_minor(md, minor);
+ r = specific_minor(minor);
if (r < 0)
- goto bad1;
+ goto bad_minor;
- memset(md, 0, sizeof(*md));
init_rwsem(&md->io_lock);
- init_MUTEX(&md->suspend_lock);
+ mutex_init(&md->suspend_lock);
spin_lock_init(&md->pushback_lock);
rwlock_init(&md->map_lock);
atomic_set(&md->holders, 1);
md->queue = blk_alloc_queue(GFP_KERNEL);
if (!md->queue)
- goto bad1_free_minor;
+ goto bad_queue;
md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
+ blk_queue_merge_bvec(md->queue, dm_merge_bvec);
md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
if (!md->io_pool)
- goto bad2;
+ goto bad_io_pool;
md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
if (!md->tio_pool)
- goto bad3;
+ goto bad_tio_pool;
md->bs = bioset_create(16, 16);
if (!md->bs)
md->disk = alloc_disk(1);
if (!md->disk)
- goto bad4;
+ goto bad_disk;
atomic_set(&md->pending, 0);
init_waitqueue_head(&md->wait);
add_disk(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
+ md->wq = create_singlethread_workqueue("kdmflush");
+ if (!md->wq)
+ goto bad_thread;
+
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
return md;
- bad4:
+bad_thread:
+ put_disk(md->disk);
+bad_disk:
bioset_free(md->bs);
- bad_no_bioset:
+bad_no_bioset:
mempool_destroy(md->tio_pool);
- bad3:
+bad_tio_pool:
mempool_destroy(md->io_pool);
- bad2:
+bad_io_pool:
blk_cleanup_queue(md->queue);
- bad1_free_minor:
+bad_queue:
free_minor(minor);
- bad1:
+bad_minor:
module_put(THIS_MODULE);
- bad0:
+bad_module_get:
kfree(md);
return NULL;
}
static void free_dev(struct mapped_device *md)
{
- int minor = md->disk->first_minor;
+ int minor = MINOR(disk_devt(md->disk));
if (md->suspended_bdev) {
unlock_fs(md);
bdput(md->suspended_bdev);
}
+ destroy_workqueue(md->wq);
mempool_destroy(md->tio_pool);
mempool_destroy(md->io_pool);
bioset_free(md->bs);
list_splice_init(&md->uevent_list, &uevents);
spin_unlock_irqrestore(&md->uevent_lock, flags);
- dm_send_uevents(&uevents, &md->disk->dev.kobj);
+ dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
wake_up(&md->eventq);
md = idr_find(&_minor_idr, minor);
if (md && (md == MINOR_ALLOCED ||
- (dm_disk(md)->first_minor != minor) ||
+ (MINOR(disk_devt(dm_disk(md))) != minor) ||
test_bit(DMF_FREEING, &md->flags))) {
md = NULL;
goto out;
if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
map = dm_get_table(md);
- idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
+ idr_replace(&_minor_idr, MINOR_ALLOCED,
+ MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
if (!dm_suspended(md)) {
}
EXPORT_SYMBOL_GPL(dm_put);
+static int dm_wait_for_completion(struct mapped_device *md)
+{
+ int r = 0;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ smp_mb();
+ if (!atomic_read(&md->pending))
+ break;
+
+ if (signal_pending(current)) {
+ r = -EINTR;
+ break;
+ }
+
+ io_schedule();
+ }
+ set_current_state(TASK_RUNNING);
+
+ return r;
+}
+
/*
* Process the deferred bios
*/
-static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
+static void __flush_deferred_io(struct mapped_device *md)
{
- struct bio *n;
+ struct bio *c;
- while (c) {
- n = c->bi_next;
- c->bi_next = NULL;
+ while ((c = bio_list_pop(&md->deferred))) {
if (__split_bio(md, c))
bio_io_error(c);
- c = n;
}
+
+ clear_bit(DMF_BLOCK_IO, &md->flags);
+}
+
+static void __merge_pushback_list(struct mapped_device *md)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&md->pushback_lock, flags);
+ clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
+ bio_list_merge_head(&md->deferred, &md->pushback);
+ bio_list_init(&md->pushback);
+ spin_unlock_irqrestore(&md->pushback_lock, flags);
+}
+
+static void dm_wq_work(struct work_struct *work)
+{
+ struct dm_wq_req *req = container_of(work, struct dm_wq_req, work);
+ struct mapped_device *md = req->md;
+
+ down_write(&md->io_lock);
+ switch (req->type) {
+ case DM_WQ_FLUSH_DEFERRED:
+ __flush_deferred_io(md);
+ break;
+ default:
+ DMERR("dm_wq_work: unrecognised work type %d", req->type);
+ BUG();
+ }
+ up_write(&md->io_lock);
+}
+
+static void dm_wq_queue(struct mapped_device *md, int type, void *context,
+ struct dm_wq_req *req)
+{
+ req->type = type;
+ req->md = md;
+ req->context = context;
+ INIT_WORK(&req->work, dm_wq_work);
+ queue_work(md->wq, &req->work);
+}
+
+static void dm_queue_flush(struct mapped_device *md, int type, void *context)
+{
+ struct dm_wq_req req;
+
+ dm_wq_queue(md, type, context, &req);
+ flush_workqueue(md->wq);
}
/*
{
int r = -EINVAL;
- down(&md->suspend_lock);
+ mutex_lock(&md->suspend_lock);
/* device must be suspended */
if (!dm_suspended(md))
r = __bind(md, table);
out:
- up(&md->suspend_lock);
+ mutex_unlock(&md->suspend_lock);
return r;
}
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
struct dm_table *map = NULL;
- unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
- struct bio *def;
- int r = -EINVAL;
+ int r = 0;
int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
- down(&md->suspend_lock);
+ mutex_lock(&md->suspend_lock);
- if (dm_suspended(md))
+ if (dm_suspended(md)) {
+ r = -EINVAL;
goto out_unlock;
+ }
map = dm_get_table(md);
if (!md->suspended_bdev) {
DMWARN("bdget failed in dm_suspend");
r = -ENOMEM;
- goto flush_and_out;
+ goto out;
}
- }
- /*
- * Flush I/O to the device.
- * noflush supersedes do_lockfs, because lock_fs() needs to flush I/Os.
- */
- if (do_lockfs && !noflush) {
- r = lock_fs(md);
- if (r)
- goto out;
+ /*
+ * Flush I/O to the device. noflush supersedes do_lockfs,
+ * because lock_fs() needs to flush I/Os.
+ */
+ if (do_lockfs) {
+ r = lock_fs(md);
+ if (r)
+ goto out;
+ }
}
/*
dm_table_unplug_all(map);
/*
- * Then we wait for the already mapped ios to
- * complete.
+ * Wait for the already-mapped ios to complete.
*/
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- smp_mb();
- if (!atomic_read(&md->pending) || signal_pending(current))
- break;
-
- io_schedule();
- }
- set_current_state(TASK_RUNNING);
+ r = dm_wait_for_completion(md);
down_write(&md->io_lock);
remove_wait_queue(&md->wait, &wait);
- if (noflush) {
- spin_lock_irqsave(&md->pushback_lock, flags);
- clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
- bio_list_merge_head(&md->deferred, &md->pushback);
- bio_list_init(&md->pushback);
- spin_unlock_irqrestore(&md->pushback_lock, flags);
- }
+ if (noflush)
+ __merge_pushback_list(md);
+ up_write(&md->io_lock);
/* were we interrupted ? */
- r = -EINTR;
- if (atomic_read(&md->pending)) {
- clear_bit(DMF_BLOCK_IO, &md->flags);
- def = bio_list_get(&md->deferred);
- __flush_deferred_io(md, def);
- up_write(&md->io_lock);
+ if (r < 0) {
+ dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL);
+
unlock_fs(md);
goto out; /* pushback list is already flushed, so skip flush */
}
- up_write(&md->io_lock);
dm_table_postsuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
- r = 0;
-
-flush_and_out:
- if (r && noflush) {
- /*
- * Because there may be already I/Os in the pushback list,
- * flush them before return.
- */
- down_write(&md->io_lock);
-
- spin_lock_irqsave(&md->pushback_lock, flags);
- clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
- bio_list_merge_head(&md->deferred, &md->pushback);
- bio_list_init(&md->pushback);
- spin_unlock_irqrestore(&md->pushback_lock, flags);
-
- def = bio_list_get(&md->deferred);
- __flush_deferred_io(md, def);
- up_write(&md->io_lock);
- }
-
out:
if (r && md->suspended_bdev) {
bdput(md->suspended_bdev);
dm_table_put(map);
out_unlock:
- up(&md->suspend_lock);
+ mutex_unlock(&md->suspend_lock);
return r;
}
int dm_resume(struct mapped_device *md)
{
int r = -EINVAL;
- struct bio *def;
struct dm_table *map = NULL;
- down(&md->suspend_lock);
+ mutex_lock(&md->suspend_lock);
if (!dm_suspended(md))
goto out;
if (r)
goto out;
- down_write(&md->io_lock);
- clear_bit(DMF_BLOCK_IO, &md->flags);
-
- def = bio_list_get(&md->deferred);
- __flush_deferred_io(md, def);
- up_write(&md->io_lock);
+ dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL);
unlock_fs(md);
out:
dm_table_put(map);
- up(&md->suspend_lock);
+ mutex_unlock(&md->suspend_lock);
return r;
}
*---------------------------------------------------------------*/
void dm_kobject_uevent(struct mapped_device *md)
{
- kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE);
+ kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)