block: remove duplicate BUG_ON() in bd_finish_claiming()
[safe/jmp/linux-2.6] / fs / block_dev.c
index a29b4dc..99d6af8 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/uio.h>
 #include <linux/namei.h>
 #include <linux/log2.h>
+#include <linux/kmemleak.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -171,8 +172,18 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
 
-       return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
-                               iov, offset, nr_segs, blkdev_get_blocks, NULL);
+       return blockdev_direct_IO_no_locking_newtrunc(rw, iocb, inode,
+                               I_BDEV(inode), iov, offset, nr_segs,
+                               blkdev_get_blocks, NULL);
+}
+
+int __sync_blockdev(struct block_device *bdev, int wait)
+{
+       if (!bdev)
+               return 0;
+       if (!wait)
+               return filemap_flush(bdev->bd_inode->i_mapping);
+       return filemap_write_and_wait(bdev->bd_inode->i_mapping);
 }
 
 /*
@@ -181,11 +192,7 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
  */
 int sync_blockdev(struct block_device *bdev)
 {
-       int ret = 0;
-
-       if (bdev)
-               ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
-       return ret;
+       return __sync_blockdev(bdev, 1);
 }
 EXPORT_SYMBOL(sync_blockdev);
 
@@ -198,7 +205,7 @@ int fsync_bdev(struct block_device *bdev)
 {
        struct super_block *sb = get_super(bdev);
        if (sb) {
-               int res = fsync_super(sb);
+               int res = sync_filesystem(sb);
                drop_super(sb);
                return res;
        }
@@ -210,8 +217,6 @@ EXPORT_SYMBOL(fsync_bdev);
  * freeze_bdev  --  lock a filesystem and force it into a consistent state
  * @bdev:      blockdevice to lock
  *
- * This takes the block device bd_mount_sem to make sure no new mounts
- * happen on bdev until thaw_bdev() is called.
  * If a superblock is found on this device, we take the s_umount semaphore
  * on it to make sure nobody unmounts until the snapshot creation is done.
  * The reference counter (bd_fsfreeze_count) guarantees that only the last
@@ -226,46 +231,33 @@ struct super_block *freeze_bdev(struct block_device *bdev)
        int error = 0;
 
        mutex_lock(&bdev->bd_fsfreeze_mutex);
-       if (bdev->bd_fsfreeze_count > 0) {
-               bdev->bd_fsfreeze_count++;
+       if (++bdev->bd_fsfreeze_count > 1) {
+               /*
+                * We don't even need to grab a reference - the first call
+                * to freeze_bdev grab an active reference and only the last
+                * thaw_bdev drops it.
+                */
                sb = get_super(bdev);
+               drop_super(sb);
                mutex_unlock(&bdev->bd_fsfreeze_mutex);
                return sb;
        }
-       bdev->bd_fsfreeze_count++;
-
-       down(&bdev->bd_mount_sem);
-       sb = get_super(bdev);
-       if (sb && !(sb->s_flags & MS_RDONLY)) {
-               sb->s_frozen = SB_FREEZE_WRITE;
-               smp_wmb();
-
-               __fsync_super(sb);
-
-               sb->s_frozen = SB_FREEZE_TRANS;
-               smp_wmb();
-
-               sync_blockdev(sb->s_bdev);
-
-               if (sb->s_op->freeze_fs) {
-                       error = sb->s_op->freeze_fs(sb);
-                       if (error) {
-                               printk(KERN_ERR
-                                       "VFS:Filesystem freeze failed\n");
-                               sb->s_frozen = SB_UNFROZEN;
-                               drop_super(sb);
-                               up(&bdev->bd_mount_sem);
-                               bdev->bd_fsfreeze_count--;
-                               mutex_unlock(&bdev->bd_fsfreeze_mutex);
-                               return ERR_PTR(error);
-                       }
-               }
-       }
 
+       sb = get_active_super(bdev);
+       if (!sb)
+               goto out;
+       error = freeze_super(sb);
+       if (error) {
+               deactivate_super(sb);
+               bdev->bd_fsfreeze_count--;
+               mutex_unlock(&bdev->bd_fsfreeze_mutex);
+               return ERR_PTR(error);
+       }
+       deactivate_super(sb);
+ out:
        sync_blockdev(bdev);
        mutex_unlock(&bdev->bd_fsfreeze_mutex);
-
-       return sb;      /* thaw_bdev releases s->s_umount and bd_mount_sem */
+       return sb;      /* thaw_bdev releases s->s_umount */
 }
 EXPORT_SYMBOL(freeze_bdev);
 
@@ -278,44 +270,26 @@ EXPORT_SYMBOL(freeze_bdev);
  */
 int thaw_bdev(struct block_device *bdev, struct super_block *sb)
 {
-       int error = 0;
+       int error = -EINVAL;
 
        mutex_lock(&bdev->bd_fsfreeze_mutex);
-       if (!bdev->bd_fsfreeze_count) {
-               mutex_unlock(&bdev->bd_fsfreeze_mutex);
-               return -EINVAL;
-       }
+       if (!bdev->bd_fsfreeze_count)
+               goto out;
 
-       bdev->bd_fsfreeze_count--;
-       if (bdev->bd_fsfreeze_count > 0) {
-               if (sb)
-                       drop_super(sb);
-               mutex_unlock(&bdev->bd_fsfreeze_mutex);
-               return 0;
-       }
+       error = 0;
+       if (--bdev->bd_fsfreeze_count > 0)
+               goto out;
 
-       if (sb) {
-               BUG_ON(sb->s_bdev != bdev);
-               if (!(sb->s_flags & MS_RDONLY)) {
-                       if (sb->s_op->unfreeze_fs) {
-                               error = sb->s_op->unfreeze_fs(sb);
-                               if (error) {
-                                       printk(KERN_ERR
-                                               "VFS:Filesystem thaw failed\n");
-                                       sb->s_frozen = SB_FREEZE_TRANS;
-                                       bdev->bd_fsfreeze_count++;
-                                       mutex_unlock(&bdev->bd_fsfreeze_mutex);
-                                       return error;
-                               }
-                       }
-                       sb->s_frozen = SB_UNFROZEN;
-                       smp_wmb();
-                       wake_up(&sb->s_wait_unfrozen);
-               }
-               drop_super(sb);
-       }
+       if (!sb)
+               goto out;
 
-       up(&bdev->bd_mount_sem);
+       error = thaw_super(sb);
+       if (error) {
+               bdev->bd_fsfreeze_count++;
+               mutex_unlock(&bdev->bd_fsfreeze_mutex);
+               return error;
+       }
+out:
        mutex_unlock(&bdev->bd_fsfreeze_mutex);
        return 0;
 }
@@ -331,19 +305,13 @@ static int blkdev_readpage(struct file * file, struct page * page)
        return block_read_full_page(page, blkdev_get_block);
 }
 
-static int blkdev_readpages(struct file *file, struct address_space *mapping,
-                       struct list_head *pages, unsigned nr_pages)
-{
-       return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
-}
-
 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned flags,
                        struct page **pagep, void **fsdata)
 {
        *pagep = NULL;
-       return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
-                               blkdev_get_block);
+       return block_write_begin_newtrunc(file, mapping, pos, len, flags,
+                               pagep, fsdata, blkdev_get_block);
 }
 
 static int blkdev_write_end(struct file *file, struct address_space *mapping,
@@ -391,15 +359,28 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
        return retval;
 }
        
-/*
- *     Filp is never NULL; the only case when ->fsync() is called with
- *     NULL first argument is nfsd_sync_dir() and that's not a directory.
- */
-static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
+int blkdev_fsync(struct file *filp, int datasync)
 {
-       return sync_blockdev(I_BDEV(filp->f_mapping->host));
+       struct inode *bd_inode = filp->f_mapping->host;
+       struct block_device *bdev = I_BDEV(bd_inode);
+       int error;
+
+       /*
+        * There is no need to serialise calls to blkdev_issue_flush with
+        * i_mutex and doing so causes performance issues with concurrent
+        * O_SYNC writers to a block device.
+        */
+       mutex_unlock(&bd_inode->i_mutex);
+
+       error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT);
+       if (error == -EOPNOTSUPP)
+               error = 0;
+
+       mutex_lock(&bd_inode->i_mutex);
+
+       return error;
 }
+EXPORT_SYMBOL(blkdev_fsync);
 
 /*
  * pseudo-fs
@@ -420,7 +401,6 @@ static void bdev_destroy_inode(struct inode *inode)
 {
        struct bdev_inode *bdi = BDEV_I(inode);
 
-       bdi->bdev.bd_inode_backing_dev_info = NULL;
        kmem_cache_free(bdev_cachep, bdi);
 }
 
@@ -431,7 +411,6 @@ static void init_once(void *foo)
 
        memset(bdev, 0, sizeof(*bdev));
        mutex_init(&bdev->bd_mutex);
-       sema_init(&bdev->bd_mount_sem, 1);
        INIT_LIST_HEAD(&bdev->bd_inodes);
        INIT_LIST_HEAD(&bdev->bd_list);
 #ifdef CONFIG_SYSFS
@@ -498,6 +477,11 @@ void __init bdev_cache_init(void)
        bd_mnt = kern_mount(&bd_type);
        if (IS_ERR(bd_mnt))
                panic("Cannot create bdev pseudo-fs");
+       /*
+        * This vfsmount structure is only used to obtain the
+        * blockdev_superblock, so tell kmemleak not to report it.
+        */
+       kmemleak_not_leak(bd_mnt);
        blockdev_superblock = bd_mnt->mnt_sb;   /* For writeback */
 }
 
@@ -559,6 +543,16 @@ struct block_device *bdget(dev_t dev)
 
 EXPORT_SYMBOL(bdget);
 
+/**
+ * bdgrab -- Grab a reference to an already referenced block device
+ * @bdev:      Block device to grab a reference to.
+ */
+struct block_device *bdgrab(struct block_device *bdev)
+{
+       atomic_inc(&bdev->bd_inode->i_count);
+       return bdev;
+}
+
 long nr_blockdev_pages(void)
 {
        struct block_device *bdev;
@@ -629,41 +623,233 @@ void bd_forget(struct inode *inode)
                iput(bdev->bd_inode);
 }
 
-int bd_claim(struct block_device *bdev, void *holder)
+/**
+ * bd_may_claim - test whether a block device can be claimed
+ * @bdev: block device of interest
+ * @whole: whole block device containing @bdev, may equal @bdev
+ * @holder: holder trying to claim @bdev
+ *
+ * Test whther @bdev can be claimed by @holder.
+ *
+ * CONTEXT:
+ * spin_lock(&bdev_lock).
+ *
+ * RETURNS:
+ * %true if @bdev can be claimed, %false otherwise.
+ */
+static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
+                        void *holder)
 {
-       int res;
-       spin_lock(&bdev_lock);
-
-       /* first decide result */
        if (bdev->bd_holder == holder)
-               res = 0;         /* already a holder */
+               return true;     /* already a holder */
        else if (bdev->bd_holder != NULL)
-               res = -EBUSY;    /* held by someone else */
+               return false;    /* held by someone else */
        else if (bdev->bd_contains == bdev)
-               res = 0;         /* is a whole device which isn't held */
+               return true;     /* is a whole device which isn't held */
 
-       else if (bdev->bd_contains->bd_holder == bd_claim)
-               res = 0;         /* is a partition of a device that is being partitioned */
-       else if (bdev->bd_contains->bd_holder != NULL)
-               res = -EBUSY;    /* is a partition of a held device */
+       else if (whole->bd_holder == bd_claim)
+               return true;     /* is a partition of a device that is being partitioned */
+       else if (whole->bd_holder != NULL)
+               return false;    /* is a partition of a held device */
        else
-               res = 0;         /* is a partition of an un-held device */
+               return true;     /* is a partition of an un-held device */
+}
 
-       /* now impose change */
-       if (res==0) {
-               /* note that for a whole device bd_holders
-                * will be incremented twice, and bd_holder will
-                * be set to bd_claim before being set to holder
-                */
-               bdev->bd_contains->bd_holders ++;
-               bdev->bd_contains->bd_holder = bd_claim;
-               bdev->bd_holders++;
-               bdev->bd_holder = holder;
+/**
+ * bd_prepare_to_claim - prepare to claim a block device
+ * @bdev: block device of interest
+ * @whole: the whole device containing @bdev, may equal @bdev
+ * @holder: holder trying to claim @bdev
+ *
+ * Prepare to claim @bdev.  This function fails if @bdev is already
+ * claimed by another holder and waits if another claiming is in
+ * progress.  This function doesn't actually claim.  On successful
+ * return, the caller has ownership of bd_claiming and bd_holder[s].
+ *
+ * CONTEXT:
+ * spin_lock(&bdev_lock).  Might release bdev_lock, sleep and regrab
+ * it multiple times.
+ *
+ * RETURNS:
+ * 0 if @bdev can be claimed, -EBUSY otherwise.
+ */
+static int bd_prepare_to_claim(struct block_device *bdev,
+                              struct block_device *whole, void *holder)
+{
+retry:
+       /* if someone else claimed, fail */
+       if (!bd_may_claim(bdev, whole, holder))
+               return -EBUSY;
+
+       /* if someone else is claiming, wait for it to finish */
+       if (whole->bd_claiming && whole->bd_claiming != holder) {
+               wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
+               DEFINE_WAIT(wait);
+
+               prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+               spin_unlock(&bdev_lock);
+               schedule();
+               finish_wait(wq, &wait);
+               spin_lock(&bdev_lock);
+               goto retry;
+       }
+
+       /* yay, all mine */
+       return 0;
+}
+
+/**
+ * bd_start_claiming - start claiming a block device
+ * @bdev: block device of interest
+ * @holder: holder trying to claim @bdev
+ *
+ * @bdev is about to be opened exclusively.  Check @bdev can be opened
+ * exclusively and mark that an exclusive open is in progress.  Each
+ * successful call to this function must be matched with a call to
+ * either bd_finish_claiming() or bd_abort_claiming() (which do not
+ * fail).
+ *
+ * This function is used to gain exclusive access to the block device
+ * without actually causing other exclusive open attempts to fail. It
+ * should be used when the open sequence itself requires exclusive
+ * access but may subsequently fail.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * Pointer to the block device containing @bdev on success, ERR_PTR()
+ * value on failure.
+ */
+static struct block_device *bd_start_claiming(struct block_device *bdev,
+                                             void *holder)
+{
+       struct gendisk *disk;
+       struct block_device *whole;
+       int partno, err;
+
+       might_sleep();
+
+       /*
+        * @bdev might not have been initialized properly yet, look up
+        * and grab the outer block device the hard way.
+        */
+       disk = get_gendisk(bdev->bd_dev, &partno);
+       if (!disk)
+               return ERR_PTR(-ENXIO);
+
+       whole = bdget_disk(disk, 0);
+       module_put(disk->fops->owner);
+       put_disk(disk);
+       if (!whole)
+               return ERR_PTR(-ENOMEM);
+
+       /* prepare to claim, if successful, mark claiming in progress */
+       spin_lock(&bdev_lock);
+
+       err = bd_prepare_to_claim(bdev, whole, holder);
+       if (err == 0) {
+               whole->bd_claiming = holder;
+               spin_unlock(&bdev_lock);
+               return whole;
+       } else {
+               spin_unlock(&bdev_lock);
+               bdput(whole);
+               return ERR_PTR(err);
        }
+}
+
+/* releases bdev_lock */
+static void __bd_abort_claiming(struct block_device *whole, void *holder)
+{
+       BUG_ON(whole->bd_claiming != holder);
+       whole->bd_claiming = NULL;
+       wake_up_bit(&whole->bd_claiming, 0);
+
        spin_unlock(&bdev_lock);
-       return res;
+       bdput(whole);
 }
 
+/**
+ * bd_abort_claiming - abort claiming a block device
+ * @whole: whole block device returned by bd_start_claiming()
+ * @holder: holder trying to claim @bdev
+ *
+ * Abort a claiming block started by bd_start_claiming().  Note that
+ * @whole is not the block device to be claimed but the whole device
+ * returned by bd_start_claiming().
+ *
+ * CONTEXT:
+ * Grabs and releases bdev_lock.
+ */
+static void bd_abort_claiming(struct block_device *whole, void *holder)
+{
+       spin_lock(&bdev_lock);
+       __bd_abort_claiming(whole, holder);             /* releases bdev_lock */
+}
+
+/* increment holders when we have a legitimate claim. requires bdev_lock */
+static void __bd_claim(struct block_device *bdev, struct block_device *whole,
+                                       void *holder)
+{
+       /* note that for a whole device bd_holders
+        * will be incremented twice, and bd_holder will
+        * be set to bd_claim before being set to holder
+        */
+       whole->bd_holders++;
+       whole->bd_holder = bd_claim;
+       bdev->bd_holders++;
+       bdev->bd_holder = holder;
+}
+
+/**
+ * bd_finish_claiming - finish claiming a block device
+ * @bdev: block device of interest (passed to bd_start_claiming())
+ * @whole: whole block device returned by bd_start_claiming()
+ * @holder: holder trying to claim @bdev
+ *
+ * Finish a claiming block started by bd_start_claiming().
+ *
+ * CONTEXT:
+ * Grabs and releases bdev_lock.
+ */
+static void bd_finish_claiming(struct block_device *bdev,
+                               struct block_device *whole, void *holder)
+{
+       spin_lock(&bdev_lock);
+       BUG_ON(!bd_may_claim(bdev, whole, holder));
+       __bd_claim(bdev, whole, holder);
+       __bd_abort_claiming(whole, holder); /* not actually an abort */
+}
+
+/**
+ * bd_claim - claim a block device
+ * @bdev: block device to claim
+ * @holder: holder trying to claim @bdev
+ *
+ * Try to claim @bdev which must have been opened successfully.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * 0 if successful, -EBUSY if @bdev is already claimed.
+ */
+int bd_claim(struct block_device *bdev, void *holder)
+{
+       struct block_device *whole = bdev->bd_contains;
+       int res;
+
+       might_sleep();
+
+       spin_lock(&bdev_lock);
+       res = bd_prepare_to_claim(bdev, whole, holder);
+       if (res == 0)
+               __bd_claim(bdev, whole, holder);
+       spin_unlock(&bdev_lock);
+
+       return res;
+}
 EXPORT_SYMBOL(bd_claim);
 
 void bd_release(struct block_device *bdev)
@@ -1100,7 +1286,7 @@ EXPORT_SYMBOL(revalidate_disk);
 int check_disk_change(struct block_device *bdev)
 {
        struct gendisk *disk = bdev->bd_disk;
-       struct block_device_operations * bdops = disk->fops;
+       const struct block_device_operations *bdops = disk->fops;
 
        if (!bdops->media_changed)
                return 0;
@@ -1228,8 +1414,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                        bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
                }
        } else {
-               put_disk(disk);
                module_put(disk->fops->owner);
+               put_disk(disk);
                disk = NULL;
                if (bdev->bd_contains == bdev) {
                        if (bdev->bd_disk->fops->open) {
@@ -1277,6 +1463,7 @@ EXPORT_SYMBOL(blkdev_get);
 
 static int blkdev_open(struct inode * inode, struct file * filp)
 {
+       struct block_device *whole = NULL;
        struct block_device *bdev;
        int res;
 
@@ -1299,22 +1486,25 @@ static int blkdev_open(struct inode * inode, struct file * filp)
        if (bdev == NULL)
                return -ENOMEM;
 
+       if (filp->f_mode & FMODE_EXCL) {
+               whole = bd_start_claiming(bdev, filp);
+               if (IS_ERR(whole)) {
+                       bdput(bdev);
+                       return PTR_ERR(whole);
+               }
+       }
+
        filp->f_mapping = bdev->bd_inode->i_mapping;
 
        res = blkdev_get(bdev, filp->f_mode);
-       if (res)
-               return res;
 
-       if (filp->f_mode & FMODE_EXCL) {
-               res = bd_claim(bdev, filp);
-               if (res)
-                       goto out_blkdev_put;
+       if (whole) {
+               if (res == 0)
+                       bd_finish_claiming(bdev, whole, filp);
+               else
+                       bd_abort_claiming(whole, filp);
        }
 
-       return 0;
-
- out_blkdev_put:
-       blkdev_put(bdev, filp->f_mode);
        return res;
 }
 
@@ -1390,6 +1580,33 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 }
 
 /*
+ * Write data to the block device.  Only intended for the block device itself
+ * and the raw driver which basically is a fake block device.
+ *
+ * Does not take i_mutex for the write and thus is not for general purpose
+ * use.
+ */
+ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
+                        unsigned long nr_segs, loff_t pos)
+{
+       struct file *file = iocb->ki_filp;
+       ssize_t ret;
+
+       BUG_ON(iocb->ki_pos != pos);
+
+       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       if (ret > 0 || ret == -EIOCBQUEUED) {
+               ssize_t err;
+
+               err = generic_write_sync(file, pos, ret);
+               if (err < 0 && ret > 0)
+                       ret = err;
+       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(blkdev_aio_write);
+
+/*
  * Try to release a page associated with block device when the system
  * is under memory pressure.
  */
@@ -1405,7 +1622,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
 
 static const struct address_space_operations def_blk_aops = {
        .readpage       = blkdev_readpage,
-       .readpages      = blkdev_readpages,
        .writepage      = blkdev_writepage,
        .sync_page      = block_sync_page,
        .write_begin    = blkdev_write_begin,
@@ -1422,9 +1638,9 @@ const struct file_operations def_blk_fops = {
        .read           = do_sync_read,
        .write          = do_sync_write,
        .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write_nolock,
+       .aio_write      = blkdev_aio_write,
        .mmap           = generic_file_mmap,
-       .fsync          = block_fsync,
+       .fsync          = blkdev_fsync,
        .unlocked_ioctl = block_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = compat_blkdev_ioctl,
@@ -1499,27 +1715,34 @@ EXPORT_SYMBOL(lookup_bdev);
  */
 struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
 {
-       struct block_device *bdev;
-       int error = 0;
+       struct block_device *bdev, *whole;
+       int error;
 
        bdev = lookup_bdev(path);
        if (IS_ERR(bdev))
                return bdev;
 
+       whole = bd_start_claiming(bdev, holder);
+       if (IS_ERR(whole)) {
+               bdput(bdev);
+               return whole;
+       }
+
        error = blkdev_get(bdev, mode);
        if (error)
-               return ERR_PTR(error);
+               goto out_abort_claiming;
+
        error = -EACCES;
        if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
-               goto blkdev_put;
-       error = bd_claim(bdev, holder);
-       if (error)
-               goto blkdev_put;
+               goto out_blkdev_put;
 
+       bd_finish_claiming(bdev, whole, holder);
        return bdev;
-       
-blkdev_put:
+
+out_blkdev_put:
        blkdev_put(bdev, mode);
+out_abort_claiming:
+       bd_abort_claiming(whole, holder);
        return ERR_PTR(error);
 }