X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fblock_dev.c;h=2f2873b9a04109f1c16e5d8f7863f08f50341e59;hb=e71bf0d0ee89e51b92776391c5634938236977d5;hp=f5958f413bd1135787e2dc731f92b562c452d390;hpb=7f9c51f0d9783c78db5c2aa16806d0c256ac667f;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/block_dev.c b/fs/block_dev.c index f5958f4..2f2873b 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -5,31 +5,35 @@ * Copyright (C) 2001 Andrea Arcangeli SuSE */ -#include #include #include #include #include #include #include -#include #include +#include #include #include #include #include #include +#include #include #include #include #include +#include #include +#include "internal.h" struct bdev_inode { struct block_device bdev; struct inode vfs_inode; }; +static const struct address_space_operations def_blk_aops; + static inline struct bdev_inode *BDEV_I(struct inode *inode) { return container_of(inode, struct bdev_inode, vfs_inode); @@ -55,17 +59,19 @@ static sector_t max_block(struct block_device *bdev) return retval; } -/* Kill _all_ buffers, dirty or not.. */ +/* Kill _all_ buffers and pagecache , dirty or not.. */ static void kill_bdev(struct block_device *bdev) { - invalidate_bdev(bdev, 1); + if (bdev->bd_inode->i_mapping->nrpages == 0) + return; + invalidate_bh_lrus(); truncate_inode_pages(bdev->bd_inode->i_mapping, 0); } int set_blocksize(struct block_device *bdev, int size) { /* Size must be a power of two, and between 512 and PAGE_SIZE */ - if (size > PAGE_SIZE || size < 512 || (size & (size-1))) + if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) return -EINVAL; /* Size cannot be smaller than the size supported by the device */ @@ -178,19 +184,31 @@ static int blkdev_readpage(struct file * file, struct page * page) return block_read_full_page(page, blkdev_get_block); } -static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) +static int blkdev_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) { - return block_prepare_write(page, from, to, blkdev_get_block); + *pagep = NULL; + return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + blkdev_get_block); } -static int blkdev_commit_write(struct file *file, struct page *page, unsigned from, unsigned to) +static int blkdev_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) { - return block_commit_write(page, from, to); + int ret; + ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); + + unlock_page(page); + page_cache_release(page); + + return ret; } /* * private llseek: - * for a block special file file->f_dentry->d_inode->i_size is zero + * for a block special file file->f_path.dentry->d_inode->i_size is zero * so we compute the size by hand (just as in block_read/write above) */ static loff_t block_llseek(struct file *file, loff_t offset, int origin) @@ -235,11 +253,11 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); -static kmem_cache_t * bdev_cachep __read_mostly; +static struct kmem_cache * bdev_cachep __read_mostly; static struct inode *bdev_alloc_inode(struct super_block *sb) { - struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL); + struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -253,24 +271,20 @@ static void bdev_destroy_inode(struct inode *inode) kmem_cache_free(bdev_cachep, bdi); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void *foo) { struct bdev_inode *ei = (struct bdev_inode *) foo; struct block_device *bdev = &ei->bdev; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - { - memset(bdev, 0, sizeof(*bdev)); - mutex_init(&bdev->bd_mutex); - mutex_init(&bdev->bd_mount_mutex); - INIT_LIST_HEAD(&bdev->bd_inodes); - INIT_LIST_HEAD(&bdev->bd_list); + memset(bdev, 0, sizeof(*bdev)); + mutex_init(&bdev->bd_mutex); + sema_init(&bdev->bd_mount_sem, 1); + INIT_LIST_HEAD(&bdev->bd_inodes); + INIT_LIST_HEAD(&bdev->bd_list); #ifdef CONFIG_SYSFS - INIT_LIST_HEAD(&bdev->bd_holder_list); + INIT_LIST_HEAD(&bdev->bd_holder_list); #endif - inode_init_once(&ei->vfs_inode); - } + inode_init_once(&ei->vfs_inode); } static inline void __bd_forget(struct inode *inode) @@ -292,7 +306,7 @@ static void bdev_clear_inode(struct inode *inode) spin_unlock(&bdev_lock); } -static struct super_operations bdev_sops = { +static const struct super_operations bdev_sops = { .statfs = simple_statfs, .alloc_inode = bdev_alloc_inode, .destroy_inode = bdev_destroy_inode, @@ -300,10 +314,10 @@ static struct super_operations bdev_sops = { .clear_inode = bdev_clear_inode, }; -static struct super_block *bd_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static int bd_get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, struct vfsmount *mnt) { - return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576); + return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt); } static struct file_system_type bd_type = { @@ -321,12 +335,11 @@ void __init bdev_cache_init(void) bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_PANIC), - init_once, NULL); + init_once); err = register_filesystem(&bd_type); if (err) panic("Cannot register bdev pseudo-fs"); bd_mnt = kern_mount(&bd_type); - err = PTR_ERR(bd_mnt); if (IS_ERR(bd_mnt)) panic("Cannot create bdev pseudo-fs"); blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ @@ -392,12 +405,10 @@ EXPORT_SYMBOL(bdget); long nr_blockdev_pages(void) { - struct list_head *p; + struct block_device *bdev; long ret = 0; spin_lock(&bdev_lock); - list_for_each(p, &all_bdevs) { - struct block_device *bdev; - bdev = list_entry(p, struct block_device, bd_list); + list_for_each_entry(bdev, &all_bdevs, bd_list) { ret += bdev->bd_inode->i_mapping->nrpages; } spin_unlock(&bdev_lock); @@ -414,21 +425,31 @@ EXPORT_SYMBOL(bdput); static struct block_device *bd_acquire(struct inode *inode) { struct block_device *bdev; + spin_lock(&bdev_lock); bdev = inode->i_bdev; - if (bdev && igrab(bdev->bd_inode)) { + if (bdev) { + atomic_inc(&bdev->bd_inode->i_count); spin_unlock(&bdev_lock); return bdev; } spin_unlock(&bdev_lock); + bdev = bdget(inode->i_rdev); if (bdev) { spin_lock(&bdev_lock); - if (inode->i_bdev) - __bd_forget(inode); - inode->i_bdev = bdev; - inode->i_mapping = bdev->bd_inode->i_mapping; - list_add(&inode->i_devices, &bdev->bd_inodes); + if (!inode->i_bdev) { + /* + * We take an additional bd_inode->i_count for inode, + * and it's released in clear_inode() of inode. + * So, we can access it via ->i_mapping always + * without igrab(). + */ + atomic_inc(&bdev->bd_inode->i_count); + inode->i_bdev = bdev; + inode->i_mapping = bdev->bd_inode->i_mapping; + list_add(&inode->i_devices, &bdev->bd_inodes); + } spin_unlock(&bdev_lock); } return bdev; @@ -438,10 +459,18 @@ static struct block_device *bd_acquire(struct inode *inode) void bd_forget(struct inode *inode) { + struct block_device *bdev = NULL; + spin_lock(&bdev_lock); - if (inode->i_bdev) + if (inode->i_bdev) { + if (inode->i_sb != blockdev_superblock) + bdev = inode->i_bdev; __bd_forget(inode); + } spin_unlock(&bdev_lock); + + if (bdev) + iput(bdev->bd_inode); } int bd_claim(struct block_device *bdev, void *holder) @@ -514,9 +543,9 @@ EXPORT_SYMBOL(bd_release); static struct kobject *bdev_get_kobj(struct block_device *bdev) { if (bdev->bd_contains != bdev) - return kobject_get(&bdev->bd_part->kobj); + return kobject_get(&bdev->bd_part->dev.kobj); else - return kobject_get(&bdev->bd_disk->kobj); + return kobject_get(&bdev->bd_disk->dev.kobj); } static struct kobject *bdev_get_holder(struct block_device *bdev) @@ -527,11 +556,11 @@ static struct kobject *bdev_get_holder(struct block_device *bdev) return kobject_get(bdev->bd_disk->holder_dir); } -static void add_symlink(struct kobject *from, struct kobject *to) +static int add_symlink(struct kobject *from, struct kobject *to) { if (!from || !to) - return; - sysfs_create_link(from, to, kobject_name(to)); + return 0; + return sysfs_create_link(from, to, kobject_name(to)); } static void del_symlink(struct kobject *from, struct kobject *to) @@ -624,38 +653,62 @@ static void free_bd_holder(struct bd_holder *bo) } /** + * find_bd_holder - find matching struct bd_holder from the block device + * + * @bdev: struct block device to be searched + * @bo: target struct bd_holder + * + * Returns matching entry with @bo in @bdev->bd_holder_list. + * If found, increment the reference count and return the pointer. + * If not found, returns NULL. + */ +static struct bd_holder *find_bd_holder(struct block_device *bdev, + struct bd_holder *bo) +{ + struct bd_holder *tmp; + + list_for_each_entry(tmp, &bdev->bd_holder_list, list) + if (tmp->sdir == bo->sdir) { + tmp->count++; + return tmp; + } + + return NULL; +} + +/** * add_bd_holder - create sysfs symlinks for bd_claim() relationship * * @bdev: block device to be bd_claimed * @bo: preallocated and initialized by alloc_bd_holder() * - * If there is no matching entry with @bo in @bdev->bd_holder_list, - * add @bo to the list, create symlinks. + * Add @bo to @bdev->bd_holder_list, create symlinks. * - * Returns 1 if @bo was added to the list. - * Returns 0 if @bo wasn't used by any reason and should be freed. + * Returns 0 if symlinks are created. + * Returns -ve if something fails. */ static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo) { - struct bd_holder *tmp; + int err; if (!bo) - return 0; - - list_for_each_entry(tmp, &bdev->bd_holder_list, list) { - if (tmp->sdir == bo->sdir) { - tmp->count++; - return 0; - } - } + return -EINVAL; if (!bd_holder_grab_dirs(bdev, bo)) - return 0; + return -EBUSY; + + err = add_symlink(bo->sdir, bo->sdev); + if (err) + return err; + + err = add_symlink(bo->hdir, bo->hdev); + if (err) { + del_symlink(bo->sdir, bo->sdev); + return err; + } - add_symlink(bo->sdir, bo->sdev); - add_symlink(bo->hdir, bo->hdev); list_add_tail(&bo->list, &bdev->bd_holder_list); - return 1; + return 0; } /** @@ -713,8 +766,8 @@ static struct bd_holder *del_bd_holder(struct block_device *bdev, static int bd_claim_by_kobject(struct block_device *bdev, void *holder, struct kobject *kobj) { - int res; - struct bd_holder *bo; + int err; + struct bd_holder *bo, *found; if (!kobj) return -EINVAL; @@ -724,12 +777,24 @@ static int bd_claim_by_kobject(struct block_device *bdev, void *holder, return -ENOMEM; mutex_lock(&bdev->bd_mutex); - res = bd_claim(bdev, holder); - if (res || !add_bd_holder(bdev, bo)) - free_bd_holder(bo); - mutex_unlock(&bdev->bd_mutex); - return res; + err = bd_claim(bdev, holder); + if (err) + goto fail; + + found = find_bd_holder(bdev, bo); + if (found) + goto fail; + + err = add_bd_holder(bdev, bo); + if (err) + bd_release(bdev); + else + bo = NULL; +fail: + mutex_unlock(&bdev->bd_mutex); + free_bd_holder(bo); + return err; } /** @@ -743,15 +808,12 @@ static int bd_claim_by_kobject(struct block_device *bdev, void *holder, static void bd_release_from_kobject(struct block_device *bdev, struct kobject *kobj) { - struct bd_holder *bo; - if (!kobj) return; mutex_lock(&bdev->bd_mutex); bd_release(bdev); - if ((bo = del_bd_holder(bdev, kobj))) - free_bd_holder(bo); + free_bd_holder(del_bd_holder(bdev, kobj)); mutex_unlock(&bdev->bd_mutex); } @@ -830,7 +892,7 @@ int check_disk_change(struct block_device *bdev) if (bdops->revalidate_disk) bdops->revalidate_disk(bdev->bd_disk); - if (bdev->bd_disk->minors > 1) + if (disk_max_parts(bdev->bd_disk)) bdev->bd_invalidated = 1; return 1; } @@ -852,16 +914,43 @@ void bd_set_size(struct block_device *bdev, loff_t size) } EXPORT_SYMBOL(bd_set_size); -static int do_open(struct block_device *bdev, struct file *file) +static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, + int for_part); +static int __blkdev_put(struct block_device *bdev, int for_part); + +/* + * bd_mutex locking: + * + * mutex_lock(part->bd_mutex) + * mutex_lock_nested(whole->bd_mutex, 1) + */ + +static int do_open(struct block_device *bdev, struct file *file, int for_part) { struct module *owner = NULL; struct gendisk *disk; - int ret = -ENXIO; - int part; + struct hd_struct *part = NULL; + int ret; + int partno; + int perm = 0; + + if (file->f_mode & FMODE_READ) + perm |= MAY_READ; + if (file->f_mode & FMODE_WRITE) + perm |= MAY_WRITE; + /* + * hooks: /n/, see "layering violations". + */ + ret = devcgroup_inode_permission(bdev->bd_inode, perm); + if (ret != 0) { + bdput(bdev); + return ret; + } + ret = -ENXIO; file->f_mapping = bdev->bd_inode->i_mapping; lock_kernel(); - disk = get_gendisk(bdev->bd_dev, &part); + disk = get_gendisk(bdev->bd_dev, &partno); if (!disk) { unlock_kernel(); bdput(bdev); @@ -869,11 +958,11 @@ static int do_open(struct block_device *bdev, struct file *file) } owner = disk->fops->owner; - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, for_part); if (!bdev->bd_openers) { bdev->bd_disk = disk; bdev->bd_contains = bdev; - if (!part) { + if (!partno) { struct backing_dev_info *bdi; if (disk->fops->open) { ret = disk->fops->open(bdev->bd_inode, file); @@ -890,31 +979,26 @@ static int do_open(struct block_device *bdev, struct file *file) if (bdev->bd_invalidated) rescan_partitions(disk, bdev); } else { - struct hd_struct *p; struct block_device *whole; whole = bdget_disk(disk, 0); ret = -ENOMEM; if (!whole) goto out_first; - ret = blkdev_get(whole, file->f_mode, file->f_flags); + BUG_ON(for_part); + ret = __blkdev_get(whole, file->f_mode, file->f_flags, 1); if (ret) goto out_first; bdev->bd_contains = whole; - mutex_lock(&whole->bd_mutex); - whole->bd_part_count++; - p = disk->part[part - 1]; + part = disk_get_part(disk, partno); bdev->bd_inode->i_data.backing_dev_info = whole->bd_inode->i_data.backing_dev_info; - if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { - whole->bd_part_count--; - mutex_unlock(&whole->bd_mutex); + if (!(disk->flags & GENHD_FL_UP) || + !part || !part->nr_sects) { ret = -ENXIO; goto out_first; } - kobject_get(&p->kobj); - bdev->bd_part = p; - bd_set_size(bdev, (loff_t) p->nr_sects << 9); - mutex_unlock(&whole->bd_mutex); + bdev->bd_part = part; + bd_set_size(bdev, (loff_t)part->nr_sects << 9); } } else { put_disk(disk); @@ -927,13 +1011,11 @@ static int do_open(struct block_device *bdev, struct file *file) } if (bdev->bd_invalidated) rescan_partitions(bdev->bd_disk, bdev); - } else { - mutex_lock(&bdev->bd_contains->bd_mutex); - bdev->bd_contains->bd_part_count++; - mutex_unlock(&bdev->bd_contains->bd_mutex); } } bdev->bd_openers++; + if (for_part) + bdev->bd_part_count++; mutex_unlock(&bdev->bd_mutex); unlock_kernel(); return 0; @@ -942,9 +1024,10 @@ out_first: bdev->bd_disk = NULL; bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; if (bdev != bdev->bd_contains) - blkdev_put(bdev->bd_contains); + __blkdev_put(bdev->bd_contains, 1); bdev->bd_contains = NULL; put_disk(disk); + disk_put_part(part); module_put(owner); out: mutex_unlock(&bdev->bd_mutex); @@ -954,7 +1037,8 @@ out: return ret; } -int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) +static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, + int for_part) { /* * This crockload is due to bad choice of ->open() type. @@ -966,12 +1050,16 @@ int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) struct dentry fake_dentry = {}; fake_file.f_mode = mode; fake_file.f_flags = flags; - fake_file.f_dentry = &fake_dentry; + fake_file.f_path.dentry = &fake_dentry; fake_dentry.d_inode = bdev->bd_inode; - return do_open(bdev, &fake_file); + return do_open(bdev, &fake_file, for_part); } +int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) +{ + return __blkdev_get(bdev, mode, flags, 0); +} EXPORT_SYMBOL(blkdev_get); static int blkdev_open(struct inode * inode, struct file * filp) @@ -988,8 +1076,10 @@ static int blkdev_open(struct inode * inode, struct file * filp) filp->f_flags |= O_LARGEFILE; bdev = bd_acquire(inode); + if (bdev == NULL) + return -ENOMEM; - res = do_open(bdev, filp); + res = do_open(bdev, filp, 0); if (res) return res; @@ -1003,14 +1093,18 @@ static int blkdev_open(struct inode * inode, struct file * filp) return res; } -int blkdev_put(struct block_device *bdev) +static int __blkdev_put(struct block_device *bdev, int for_part) { int ret = 0; struct inode *bd_inode = bdev->bd_inode; struct gendisk *disk = bdev->bd_disk; + struct block_device *victim = NULL; - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, for_part); lock_kernel(); + if (for_part) + bdev->bd_part_count--; + if (!--bdev->bd_openers) { sync_blockdev(bdev); kill_bdev(bdev); @@ -1018,10 +1112,6 @@ int blkdev_put(struct block_device *bdev) if (bdev->bd_contains == bdev) { if (disk->fops->release) ret = disk->fops->release(bd_inode, NULL); - } else { - mutex_lock(&bdev->bd_contains->bd_mutex); - bdev->bd_contains->bd_part_count--; - mutex_unlock(&bdev->bd_contains->bd_mutex); } if (!bdev->bd_openers) { struct module *owner = disk->fops->owner; @@ -1030,22 +1120,27 @@ int blkdev_put(struct block_device *bdev) module_put(owner); if (bdev->bd_contains != bdev) { - kobject_put(&bdev->bd_part->kobj); + disk_put_part(bdev->bd_part); bdev->bd_part = NULL; } bdev->bd_disk = NULL; bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; - if (bdev != bdev->bd_contains) { - blkdev_put(bdev->bd_contains); - } + if (bdev != bdev->bd_contains) + victim = bdev->bd_contains; bdev->bd_contains = NULL; } unlock_kernel(); mutex_unlock(&bdev->bd_mutex); bdput(bdev); + if (victim) + __blkdev_put(victim, 1); return ret; } +int blkdev_put(struct block_device *bdev) +{ + return __blkdev_put(bdev, 0); +} EXPORT_SYMBOL(blkdev_put); static int blkdev_close(struct inode * inode, struct file * filp) @@ -1056,33 +1151,17 @@ static int blkdev_close(struct inode * inode, struct file * filp) return blkdev_put(bdev); } -static ssize_t blkdev_file_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count }; - - return generic_file_write_nolock(file, &local_iov, 1, ppos); -} - -static ssize_t blkdev_file_aio_write(struct kiocb *iocb, const char __user *buf, - size_t count, loff_t pos) -{ - struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count }; - - return generic_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos); -} - static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) { return blkdev_ioctl(file->f_mapping->host, file, cmd, arg); } -struct address_space_operations def_blk_aops = { +static const struct address_space_operations def_blk_aops = { .readpage = blkdev_readpage, .writepage = blkdev_writepage, .sync_page = block_sync_page, - .prepare_write = blkdev_prepare_write, - .commit_write = blkdev_commit_write, + .write_begin = blkdev_write_begin, + .write_end = blkdev_write_end, .writepages = generic_writepages, .direct_IO = blkdev_direct_IO, }; @@ -1091,19 +1170,16 @@ const struct file_operations def_blk_fops = { .open = blkdev_open, .release = blkdev_close, .llseek = block_llseek, - .read = generic_file_read, - .write = blkdev_file_write, + .read = do_sync_read, + .write = do_sync_write, .aio_read = generic_file_aio_read, - .aio_write = blkdev_file_aio_write, + .aio_write = generic_file_aio_write_nolock, .mmap = generic_file_mmap, .fsync = block_fsync, .unlocked_ioctl = block_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_blkdev_ioctl, #endif - .readv = generic_file_readv, - .writev = generic_file_write_nolock, - .sendfile = generic_file_sendfile, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, }; @@ -1143,24 +1219,25 @@ struct block_device *lookup_bdev(const char *path) if (error) return ERR_PTR(error); - inode = nd.dentry->d_inode; + inode = nd.path.dentry->d_inode; error = -ENOTBLK; if (!S_ISBLK(inode->i_mode)) goto fail; error = -EACCES; - if (nd.mnt->mnt_flags & MNT_NODEV) + if (nd.path.mnt->mnt_flags & MNT_NODEV) goto fail; error = -ENOMEM; bdev = bd_acquire(inode); if (!bdev) goto fail; out: - path_release(&nd); + path_put(&nd.path); return bdev; fail: bdev = ERR_PTR(error); goto out; } +EXPORT_SYMBOL(lookup_bdev); /** * open_bdev_excl - open a block device by name and set it up for use @@ -1217,3 +1294,24 @@ void close_bdev_excl(struct block_device *bdev) } EXPORT_SYMBOL(close_bdev_excl); + +int __invalidate_device(struct block_device *bdev) +{ + struct super_block *sb = get_super(bdev); + int res = 0; + + if (sb) { + /* + * no need to lock the super, get_super holds the + * read mutex so the filesystem cannot go away + * under us (->put_super runs with the write lock + * hold). + */ + shrink_dcache_sb(sb); + res = invalidate_inodes(sb); + drop_super(sb); + } + invalidate_bdev(bdev); + return res; +} +EXPORT_SYMBOL(__invalidate_device);