atomic_t io_count;
struct bio *bio;
unsigned long start_time;
+ spinlock_t endio_lock;
};
/*
/*
* A list of ios that arrived while we were suspended.
*/
- atomic_t pending;
+ atomic_t pending[2];
wait_queue_head_t wait;
struct work_struct work;
struct bio_list deferred;
{
struct mapped_device *md = io->md;
int cpu;
+ int rw = bio_data_dir(io->bio);
io->start_time = jiffies;
cpu = part_stat_lock();
part_round_stats(cpu, &dm_disk(md)->part0);
part_stat_unlock();
- dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
+ dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
}
static void end_io_acct(struct dm_io *io)
* After this is decremented the bio must not be touched if it is
* a barrier.
*/
- dm_disk(md)->part0.in_flight = pending =
- atomic_dec_return(&md->pending);
+ dm_disk(md)->part0.in_flight[rw] = pending =
+ atomic_dec_return(&md->pending[rw]);
+ pending += atomic_read(&md->pending[rw^0x1]);
/* nudge anyone waiting on suspend queue */
if (!pending)
struct dm_table *dm_get_table(struct mapped_device *md)
{
struct dm_table *t;
+ unsigned long flags;
- read_lock(&md->map_lock);
+ read_lock_irqsave(&md->map_lock, flags);
t = md->map;
if (t)
dm_table_get(t);
- read_unlock(&md->map_lock);
+ read_unlock_irqrestore(&md->map_lock, flags);
return t;
}
struct mapped_device *md = io->md;
/* Push-back supersedes any I/O errors */
- if (error && !(io->error > 0 && __noflush_suspending(md)))
- io->error = error;
+ if (unlikely(error)) {
+ spin_lock_irqsave(&io->endio_lock, flags);
+ if (!(io->error > 0 && __noflush_suspending(md)))
+ io->error = error;
+ spin_unlock_irqrestore(&io->endio_lock, flags);
+ }
if (atomic_dec_and_test(&io->io_count)) {
if (io->error == DM_ENDIO_REQUEUE) {
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md)) {
- if (!bio_barrier(io->bio))
+ if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
bio_list_add_head(&md->deferred,
io->bio);
} else
io_error = io->error;
bio = io->bio;
- if (bio_barrier(bio)) {
+ if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
/*
* There can be just one barrier request so we use
* a per-device variable for error reporting.
dm_put(md);
}
+static void free_rq_clone(struct request *clone)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ blk_rq_unprep_clone(clone);
+ free_rq_tio(tio);
+}
+
static void dm_unprep_request(struct request *rq)
{
struct request *clone = rq->special;
- struct dm_rq_target_io *tio = clone->end_io_data;
rq->special = NULL;
rq->cmd_flags &= ~REQ_DONTPREP;
- blk_rq_unprep_clone(clone);
- free_rq_tio(tio);
+ free_rq_clone(clone);
}
/*
rq->sense_len = clone->sense_len;
}
- BUG_ON(clone->bio);
- free_rq_tio(tio);
+ free_rq_clone(clone);
blk_end_request_all(rq, error);
clone->bi_flags |= 1 << BIO_CLONED;
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ bio_integrity_clone(clone, bio, GFP_NOIO, bs);
bio_integrity_trim(clone,
bio_sector_offset(bio, idx, offset), len);
}
clone->bi_flags &= ~(1 << BIO_SEG_VALID);
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ bio_integrity_clone(clone, bio, GFP_NOIO, bs);
if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
bio_integrity_trim(clone,
ci.map = dm_get_table(md);
if (unlikely(!ci.map)) {
- if (!bio_barrier(bio))
+ if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
bio_io_error(bio);
else
if (!md->barrier_error)
atomic_set(&ci.io->io_count, 1);
ci.io->bio = bio;
ci.io->md = md;
+ spin_lock_init(&ci.io->endio_lock);
ci.sector = bio->bi_sector;
ci.sector_count = bio_sectors(bio);
if (unlikely(bio_empty_barrier(bio)))
* we have to queue this io for later.
*/
if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
- unlikely(bio_barrier(bio))) {
+ unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
up_read(&md->io_lock);
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
{
struct mapped_device *md = q->queuedata;
- if (unlikely(bio_barrier(bio))) {
+ if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
return r;
}
-static struct block_device_operations dm_blk_dops;
+static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
if (!md->disk)
goto bad_disk;
- atomic_set(&md->pending, 0);
+ atomic_set(&md->pending[0], 0);
+ atomic_set(&md->pending[1], 0);
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
bad_bdev:
destroy_workqueue(md->wq);
bad_thread:
+ del_gendisk(md->disk);
put_disk(md->disk);
bad_disk:
blk_cleanup_queue(md->queue);
{
struct request_queue *q = md->queue;
sector_t size;
+ unsigned long flags;
size = dm_table_get_size(t);
__bind_mempools(md, t);
- write_lock(&md->map_lock);
+ write_lock_irqsave(&md->map_lock, flags);
md->map = t;
dm_table_set_restrictions(t, q, limits);
- write_unlock(&md->map_lock);
+ write_unlock_irqrestore(&md->map_lock, flags);
return 0;
}
static void __unbind(struct mapped_device *md)
{
struct dm_table *map = md->map;
+ unsigned long flags;
if (!map)
return;
dm_table_event_callback(map, NULL, NULL);
- write_lock(&md->map_lock);
+ write_lock_irqsave(&md->map_lock, flags);
md->map = NULL;
- write_unlock(&md->map_lock);
+ write_unlock_irqrestore(&md->map_lock, flags);
dm_table_destroy(map);
}
break;
}
spin_unlock_irqrestore(q->queue_lock, flags);
- } else if (!atomic_read(&md->pending))
+ } else if (!atomic_read(&md->pending[0]) &&
+ !atomic_read(&md->pending[1]))
break;
if (interruptible == TASK_INTERRUPTIBLE &&
if (dm_request_based(md))
generic_make_request(c);
else {
- if (bio_barrier(c))
+ if (bio_rw_flagged(c, BIO_RW_BARRIER))
process_barrier(md, c);
else
__split_and_process_bio(md, c);
goto out;
}
- /*
- * It is enought that blk_queue_ordered() is called only once when
- * the first bio-based table is bound.
- *
- * This setting should be moved to alloc_dev() when request-based dm
- * supports barrier.
- */
- if (!md->map && dm_table_bio_based(table))
- blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
-
__unbind(md);
r = __bind(md, table, &limits);
kfree(pools);
}
-static struct block_device_operations dm_blk_dops = {
+static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,