* Heinz Mauelshagen <mge@sistina.com>, Feb 2002
*
* Support for falling back on the write file operation when the address space
- * operations prepare_write and/or commit_write are not available on the
- * backing filesystem.
+ * operations write_begin is not available on the backing filesystem.
* Anton Altaparmakov, 16 Feb 2005
*
* Still To Fix:
static LIST_HEAD(loop_devices);
static DEFINE_MUTEX(loop_devices_mutex);
+static int max_part;
+static int part_shift;
+
/*
* Transfer functions
*/
* space operations write_begin and write_end.
*/
static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
- int bsize, loff_t pos, struct page *unused)
+ loff_t pos, struct page *unused)
{
struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
struct address_space *mapping = file->f_mapping;
* filesystems.
*/
static int do_lo_send_direct_write(struct loop_device *lo,
- struct bio_vec *bvec, int bsize, loff_t pos, struct page *page)
+ struct bio_vec *bvec, loff_t pos, struct page *page)
{
ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
kmap(bvec->bv_page) + bvec->bv_offset,
* destination pages of the backing file.
*/
static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
- int bsize, loff_t pos, struct page *page)
+ loff_t pos, struct page *page)
{
int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
bvec->bv_offset, bvec->bv_len, pos >> 9);
return ret;
}
-static int lo_send(struct loop_device *lo, struct bio *bio, int bsize,
- loff_t pos)
+static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
{
- int (*do_lo_send)(struct loop_device *, struct bio_vec *, int, loff_t,
+ int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
struct page *page);
struct bio_vec *bvec;
struct page *page = NULL;
}
}
bio_for_each_segment(bvec, bio, i) {
- ret = do_lo_send(lo, bvec, bsize, pos, page);
+ ret = do_lo_send(lo, bvec, pos, page);
if (ret < 0)
break;
pos += bvec->bv_len;
struct loop_device *lo = p->lo;
struct page *page = buf->page;
sector_t IV;
- size_t size;
- int ret;
+ int size, ret;
ret = buf->ops->confirm(pipe, buf);
if (unlikely(ret))
int ret;
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
- if (bio_rw(bio) == WRITE)
- ret = lo_send(lo, bio, lo->lo_blocksize, pos);
- else
+
+ if (bio_rw(bio) == WRITE) {
+ int barrier = bio_barrier(bio);
+ struct file *file = lo->lo_backing_file;
+
+ if (barrier) {
+ if (unlikely(!file->f_op->fsync)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = vfs_fsync(file, file->f_path.dentry, 0);
+ if (unlikely(ret)) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ ret = lo_send(lo, bio, pos);
+
+ if (barrier && !ret) {
+ ret = vfs_fsync(file, file->f_path.dentry, 0);
+ if (unlikely(ret))
+ ret = -EIO;
+ }
+ } else
ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
+
+out:
return ret;
}
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
- if (lo->lo_biotail) {
- lo->lo_biotail->bi_next = bio;
- lo->lo_biotail = bio;
- } else
- lo->lo_bio = lo->lo_biotail = bio;
+ bio_list_add(&lo->lo_bio_list, bio);
}
/*
*/
static struct bio *loop_get_bio(struct loop_device *lo)
{
- struct bio *bio;
-
- if ((bio = lo->lo_bio)) {
- if (bio == lo->lo_biotail)
- lo->lo_biotail = NULL;
- lo->lo_bio = bio->bi_next;
- bio->bi_next = NULL;
- }
-
- return bio;
+ return bio_list_pop(&lo->lo_bio_list);
}
static int loop_make_request(struct request_queue *q, struct bio *old_bio)
{
struct loop_device *lo = q->queuedata;
- clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
+ queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
blk_run_address_space(lo->lo_backing_file->f_mapping);
}
set_user_nice(current, -20);
- while (!kthread_should_stop() || lo->lo_bio) {
+ while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
wait_event_interruptible(lo->lo_event,
- lo->lo_bio || kthread_should_stop());
+ !bio_list_empty(&lo->lo_bio_list) ||
+ kthread_should_stop());
- if (!lo->lo_bio)
+ if (bio_list_empty(&lo->lo_bio_list))
continue;
spin_lock_irq(&lo->lo_lock);
bio = loop_get_bio(lo);
}
/*
+ * Helper to flush the IOs in loop, but keeping loop thread running
+ */
+static int loop_flush(struct loop_device *lo)
+{
+ /* loop not yet configured, no running thread, nothing to flush */
+ if (!lo->lo_thread)
+ return 0;
+
+ return loop_switch(lo, NULL);
+}
+
+/*
* Do the actual switch; called from the BIO completion routine
*/
static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
{
struct file *file = p->file;
struct file *old_file = lo->lo_backing_file;
- struct address_space *mapping = file->f_mapping;
+ struct address_space *mapping;
+
+ /* if no new file, only flush of queued bios requested */
+ if (!file)
+ goto out;
+ mapping = file->f_mapping;
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file;
lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+out:
complete(&p->wait);
}
* This can only work if the loop device is used read-only, and if the
* new backing store is the same size and type as the old backing store.
*/
-static int loop_change_fd(struct loop_device *lo, struct file *lo_file,
- struct block_device *bdev, unsigned int arg)
+static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
+ unsigned int arg)
{
struct file *file, *old_file;
struct inode *inode;
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
goto out_putf;
- /* new backing store needs to support loop (eg splice_read) */
- if (!inode->i_fop->splice_read)
- goto out_putf;
-
/* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_putf;
goto out_putf;
fput(old_file);
+ if (max_part > 0)
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
return 0;
out_putf:
return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
}
-static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
+static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct block_device *bdev, unsigned int arg)
{
struct file *file, *f;
while (is_loop_device(f)) {
struct loop_device *l;
- if (f->f_mapping->host->i_rdev == lo_file->f_mapping->host->i_rdev)
+ if (f->f_mapping->host->i_bdev == bdev)
goto out_putf;
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
error = -EINVAL;
if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
const struct address_space_operations *aops = mapping->a_ops;
- /*
- * If we can't read - sorry. If we only can't write - well,
- * it's going to be read-only.
- */
- if (!file->f_op->splice_read)
- goto out_putf;
- if (aops->prepare_write || aops->write_begin)
+
+ if (aops->write_begin)
lo_flags |= LO_FLAGS_USE_AOPS;
if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
lo_flags |= LO_FLAGS_READ_ONLY;
goto out_putf;
}
- if (!(lo_file->f_mode & FMODE_WRITE))
+ if (!(mode & FMODE_WRITE))
lo_flags |= LO_FLAGS_READ_ONLY;
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- lo->lo_bio = lo->lo_biotail = NULL;
+ bio_list_init(&lo->lo_bio_list);
/*
* set queue make_request_fn, and add limits based on lower level
lo->lo_queue->queuedata = lo;
lo->lo_queue->unplug_fn = loop_unplug;
+ if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
+ blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL);
+
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
}
lo->lo_state = Lo_bound;
wake_up_process(lo->lo_thread);
+ if (max_part > 0)
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
return 0;
out_clr:
kthread_stop(lo->lo_thread);
+ lo->lo_queue->unplug_fn = NULL;
lo->lo_backing_file = NULL;
loop_release_xfer(lo);
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- invalidate_bdev(bdev);
+ if (bdev)
+ invalidate_bdev(bdev);
set_capacity(lo->lo_disk, 0);
- bd_set_size(bdev, 0);
+ if (bdev)
+ bd_set_size(bdev, 0);
mapping_set_gfp_mask(filp->f_mapping, gfp);
lo->lo_state = Lo_unbound;
- fput(filp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
+ if (max_part > 0)
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
+ mutex_unlock(&lo->lo_ctl_mutex);
+ /*
+ * Need not hold lo_ctl_mutex to fput backing file.
+ * Calling fput holding lo_ctl_mutex triggers a circular
+ * lock dependency possibility warning as fput can take
+ * bd_mutex which is usually taken before lo_ctl_mutex.
+ */
+ fput(filp);
return 0;
}
{
int err;
struct loop_func_table *xfer;
+ uid_t uid = current_uid();
- if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid &&
+ if (lo->lo_encrypt_key_size &&
+ lo->lo_key_owner != uid &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
if (lo->lo_state != Lo_bound)
if (info->lo_encrypt_key_size) {
memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
info->lo_encrypt_key_size);
- lo->lo_key_owner = current->uid;
+ lo->lo_key_owner = uid;
}
return 0;
return err;
}
-static int lo_ioctl(struct inode * inode, struct file * file,
+static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
+{
+ int err;
+ sector_t sec;
+ loff_t sz;
+
+ err = -ENXIO;
+ if (unlikely(lo->lo_state != Lo_bound))
+ goto out;
+ err = figure_loop_size(lo);
+ if (unlikely(err))
+ goto out;
+ sec = get_capacity(lo->lo_disk);
+ /* the width of sector_t may be narrow for bit-shift */
+ sz = sec;
+ sz <<= 9;
+ mutex_lock(&bdev->bd_mutex);
+ bd_set_size(bdev, sz);
+ mutex_unlock(&bdev->bd_mutex);
+
+ out:
+ return err;
+}
+
+static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+ struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
- err = loop_set_fd(lo, file, inode->i_bdev, arg);
+ err = loop_set_fd(lo, mode, bdev, arg);
break;
case LOOP_CHANGE_FD:
- err = loop_change_fd(lo, file, inode->i_bdev, arg);
+ err = loop_change_fd(lo, bdev, arg);
break;
case LOOP_CLR_FD:
- err = loop_clr_fd(lo, inode->i_bdev);
+ /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
+ err = loop_clr_fd(lo, bdev);
+ if (!err)
+ goto out_unlocked;
break;
case LOOP_SET_STATUS:
err = loop_set_status_old(lo, (struct loop_info __user *) arg);
case LOOP_GET_STATUS64:
err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
break;
+ case LOOP_SET_CAPACITY:
+ err = -EPERM;
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+ err = loop_set_capacity(lo, bdev);
+ break;
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
mutex_unlock(&lo->lo_ctl_mutex);
+
+out_unlocked:
return err;
}
return err;
}
-static long lo_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+ struct loop_device *lo = bdev->bd_disk->private_data;
int err;
switch(cmd) {
lo, (struct compat_loop_info __user *) arg);
mutex_unlock(&lo->lo_ctl_mutex);
break;
+ case LOOP_SET_CAPACITY:
case LOOP_CLR_FD:
case LOOP_GET_STATUS64:
case LOOP_SET_STATUS64:
arg = (unsigned long) compat_ptr(arg);
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
- err = lo_ioctl(inode, file, cmd, arg);
+ err = lo_ioctl(bdev, mode, cmd, arg);
break;
default:
err = -ENOIOCTLCMD;
}
#endif
-static int lo_open(struct inode *inode, struct file *file)
+static int lo_open(struct block_device *bdev, fmode_t mode)
{
- struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+ struct loop_device *lo = bdev->bd_disk->private_data;
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
return 0;
}
-static int lo_release(struct inode *inode, struct file *file)
+static int lo_release(struct gendisk *disk, fmode_t mode)
{
- struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+ struct loop_device *lo = disk->private_data;
+ int err;
mutex_lock(&lo->lo_ctl_mutex);
- --lo->lo_refcnt;
- if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt)
- loop_clr_fd(lo, inode->i_bdev);
+ if (--lo->lo_refcnt)
+ goto out;
- mutex_unlock(&lo->lo_ctl_mutex);
+ if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
+ /*
+ * In autoclear mode, stop the loop thread
+ * and remove configuration after last close.
+ */
+ err = loop_clr_fd(lo, NULL);
+ if (!err)
+ goto out_unlocked;
+ } else {
+ /*
+ * Otherwise keep thread (if running) and config,
+ * but flush possible ongoing bios in thread.
+ */
+ loop_flush(lo);
+ }
+out:
+ mutex_unlock(&lo->lo_ctl_mutex);
+out_unlocked:
return 0;
}
static int max_loop;
module_param(max_loop, int, 0);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
+module_param(max_part, int, 0);
+MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
if (!lo->lo_queue)
goto out_free_dev;
- disk = lo->lo_disk = alloc_disk(1);
+ disk = lo->lo_disk = alloc_disk(1 << part_shift);
if (!disk)
goto out_free_queue;
init_waitqueue_head(&lo->lo_event);
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
- disk->first_minor = i;
+ disk->first_minor = i << part_shift;
disk->fops = &lo_fops;
disk->private_data = lo;
disk->queue = lo->lo_queue;
* themselves and have kernel automatically instantiate actual
* device on-demand.
*/
- if (max_loop > 1UL << MINORBITS)
+
+ part_shift = 0;
+ if (max_part > 0)
+ part_shift = fls(max_part);
+
+ if (max_loop > 1UL << (MINORBITS - part_shift))
return -EINVAL;
if (max_loop) {
range = max_loop;
} else {
nr = 8;
- range = 1UL << MINORBITS;
+ range = 1UL << (MINORBITS - part_shift);
}
if (register_blkdev(LOOP_MAJOR, "loop"))
unsigned long range;
struct loop_device *lo, *next;
- range = max_loop ? max_loop : 1UL << MINORBITS;
+ range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift);
list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
loop_del_one(lo);