* (C) 1997 Linus Torvalds
*/
-#include <linux/config.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/dcache.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
#include <linux/inotify.h>
+#include <linux/mount.h>
/*
* This is needed for the following functions:
#define I_HASHBITS i_hash_shift
#define I_HASHMASK i_hash_mask
-static unsigned int i_hash_mask;
-static unsigned int i_hash_shift;
+static unsigned int i_hash_mask __read_mostly;
+static unsigned int i_hash_shift __read_mostly;
/*
* Each inode can be on two separate lists. One is
LIST_HEAD(inode_in_use);
LIST_HEAD(inode_unused);
-static struct hlist_head *inode_hashtable;
+static struct hlist_head *inode_hashtable __read_mostly;
/*
* A simple spinlock to protect the list manipulations.
DEFINE_SPINLOCK(inode_lock);
/*
- * iprune_sem provides exclusion between the kswapd or try_to_free_pages
+ * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
* icache shrinking path, and the umount path. Without this exclusion,
* by the time prune_icache calls iput for the inode whose pages it has
* been invalidating, or by the time it calls clear_inode & destroy_inode
* from its final dispose_list, the struct super_block they refer to
* (for inode->i_sb->s_op) may already have been freed and reused.
*/
-DECLARE_MUTEX(iprune_sem);
+static DEFINE_MUTEX(iprune_mutex);
/*
* Statistics gathering..
*/
struct inodes_stat_t inodes_stat;
-static kmem_cache_t * inode_cachep;
+static struct kmem_cache * inode_cachep __read_mostly;
static struct inode *alloc_inode(struct super_block *sb)
{
- static struct address_space_operations empty_aops;
+ static const struct address_space_operations empty_aops;
static struct inode_operations empty_iops;
- static struct file_operations empty_fops;
+ static const struct file_operations empty_fops;
struct inode *inode;
if (sb->s_op->alloc_inode)
inode = sb->s_op->alloc_inode(sb);
else
- inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL);
+ inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
if (inode) {
struct address_space * const mapping = &inode->i_data;
inode->i_bdev = NULL;
inode->i_cdev = NULL;
inode->i_rdev = 0;
- inode->i_security = NULL;
inode->dirtied_when = 0;
if (security_inode_alloc(inode)) {
if (inode->i_sb->s_op->destroy_inode)
bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
mapping->backing_dev_info = bdi;
}
- memset(&inode->u, 0, sizeof(inode->u));
+ inode->i_private = NULL;
inode->i_mapping = mapping;
}
return inode;
void destroy_inode(struct inode *inode)
{
- if (inode_has_buffers(inode))
- BUG();
+ BUG_ON(inode_has_buffers(inode));
security_inode_free(inode);
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_dentry);
INIT_LIST_HEAD(&inode->i_devices);
- sema_init(&inode->i_sem, 1);
+ mutex_init(&inode->i_mutex);
init_rwsem(&inode->i_alloc_sem);
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
rwlock_init(&inode->i_data.tree_lock);
i_size_ordered_init(inode);
#ifdef CONFIG_INOTIFY
INIT_LIST_HEAD(&inode->inotify_watches);
- sema_init(&inode->inotify_sem, 1);
+ mutex_init(&inode->inotify_mutex);
#endif
}
EXPORT_SYMBOL(inode_init_once);
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct inode * inode = (struct inode *) foo;
might_sleep();
invalidate_inode_buffers(inode);
- if (inode->i_data.nrpages)
- BUG();
- if (!(inode->i_state & I_FREEING))
- BUG();
- if (inode->i_state & I_CLEAR)
- BUG();
+ BUG_ON(inode->i_data.nrpages);
+ BUG_ON(!(inode->i_state & I_FREEING));
+ BUG_ON(inode->i_state & I_CLEAR);
wait_on_inode(inode);
DQUOT_DROP(inode);
if (inode->i_sb && inode->i_sb->s_op->clear_inode)
inode->i_sb->s_op->clear_inode(inode);
- if (inode->i_bdev)
+ if (S_ISBLK(inode->i_mode) && inode->i_bdev)
bd_forget(inode);
- if (inode->i_cdev)
+ if (S_ISCHR(inode->i_mode) && inode->i_cdev)
cd_forget(inode);
inode->i_state = I_CLEAR;
}
/*
* We can reschedule here without worrying about the list's
* consistency because the per-sb list of inodes must not
- * change during umount anymore, and because iprune_sem keeps
+ * change during umount anymore, and because iprune_mutex keeps
* shrink_icache_memory() away.
*/
cond_resched_lock(&inode_lock);
int busy;
LIST_HEAD(throw_away);
- down(&iprune_sem);
+ mutex_lock(&iprune_mutex);
spin_lock(&inode_lock);
inotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock);
dispose_list(&throw_away);
- up(&iprune_sem);
+ mutex_unlock(&iprune_mutex);
return busy;
}
EXPORT_SYMBOL(invalidate_inodes);
-
-int __invalidate_device(struct block_device *bdev)
-{
- struct super_block *sb = get_super(bdev);
- int res = 0;
-
- if (sb) {
- /*
- * no need to lock the super, get_super holds the
- * read semaphore so the filesystem cannot go away
- * under us (->put_super runs with the write lock
- * hold).
- */
- shrink_dcache_sb(sb);
- res = invalidate_inodes(sb);
- drop_super(sb);
- }
- invalidate_bdev(bdev, 0);
- return res;
-}
-EXPORT_SYMBOL(__invalidate_device);
static int can_unuse(struct inode *inode)
{
int nr_scanned;
unsigned long reap = 0;
- down(&iprune_sem);
+ mutex_lock(&iprune_mutex);
spin_lock(&inode_lock);
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
struct inode *inode;
__iget(inode);
spin_unlock(&inode_lock);
if (remove_inode_buffers(inode))
- reap += invalidate_inode_pages(&inode->i_data);
+ reap += invalidate_mapping_pages(&inode->i_data,
+ 0, -1);
iput(inode);
spin_lock(&inode_lock);
nr_pruned++;
}
inodes_stat.nr_unused -= nr_pruned;
+ if (current_is_kswapd())
+ __count_vm_events(KSWAPD_INODESTEAL, reap);
+ else
+ __count_vm_events(PGINODESTEAL, reap);
spin_unlock(&inode_lock);
dispose_list(&freeable);
- up(&iprune_sem);
-
- if (current_is_kswapd())
- mod_page_state(kswapd_inodesteal, reap);
- else
- mod_page_state(pginodesteal, reap);
+ mutex_unlock(&iprune_mutex);
}
/*
* This function is passed the number of inodes to scan, and it returns the
* total number of remaining possibly-reclaimable inodes.
*/
-static int shrink_icache_memory(int nr, unsigned int gfp_mask)
+static int shrink_icache_memory(int nr, gfp_t gfp_mask)
{
if (nr) {
/*
return inode;
}
-static inline unsigned long hash(struct super_block *sb, unsigned long hashval)
+static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
unsigned long tmp;
struct inode *igrab(struct inode *inode)
{
spin_lock(&inode_lock);
- if (!(inode->i_state & (I_FREEING|I_WILL_FREE)))
+ if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)))
__iget(inode);
else
/*
* @head: the head of the list to search
* @test: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @test
+ * @wait: if true wait for the inode to be unlocked, if false do not
*
* ifind() searches for the inode specified by @data in the inode
* cache. This is a generalized version of ifind_fast() for file systems where
*
* Note, @test is called with the inode_lock held, so can't sleep.
*/
-static inline struct inode *ifind(struct super_block *sb,
+static struct inode *ifind(struct super_block *sb,
struct hlist_head *head, int (*test)(struct inode *, void *),
- void *data)
+ void *data, const int wait)
{
struct inode *inode;
if (inode) {
__iget(inode);
spin_unlock(&inode_lock);
- wait_on_inode(inode);
+ if (likely(wait))
+ wait_on_inode(inode);
return inode;
}
spin_unlock(&inode_lock);
*
* Otherwise NULL is returned.
*/
-static inline struct inode *ifind_fast(struct super_block *sb,
+static struct inode *ifind_fast(struct super_block *sb,
struct hlist_head *head, unsigned long ino)
{
struct inode *inode;
}
/**
- * ilookup5 - search for an inode in the inode cache
+ * ilookup5_nowait - search for an inode in the inode cache
* @sb: super block of file system to search
* @hashval: hash value (usually inode number) to search for
* @test: callback used for comparisons between inodes
* identification of an inode.
*
* If the inode is in the cache, the inode is returned with an incremented
- * reference count.
+ * reference count. Note, the inode lock is not waited upon so you have to be
+ * very careful what you do with the returned inode. You probably should be
+ * using ilookup5() instead.
+ *
+ * Otherwise NULL is returned.
+ *
+ * Note, @test is called with the inode_lock held, so can't sleep.
+ */
+struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *), void *data)
+{
+ struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+
+ return ifind(sb, head, test, data, 0);
+}
+
+EXPORT_SYMBOL(ilookup5_nowait);
+
+/**
+ * ilookup5 - search for an inode in the inode cache
+ * @sb: super block of file system to search
+ * @hashval: hash value (usually inode number) to search for
+ * @test: callback used for comparisons between inodes
+ * @data: opaque data pointer to pass to @test
+ *
+ * ilookup5() uses ifind() to search for the inode specified by @hashval and
+ * @data in the inode cache. This is a generalized version of ilookup() for
+ * file systems where the inode number is not sufficient for unique
+ * identification of an inode.
+ *
+ * If the inode is in the cache, the inode lock is waited upon and the inode is
+ * returned with an incremented reference count.
*
* Otherwise NULL is returned.
*
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
- return ifind(sb, head, test, data);
+ return ifind(sb, head, test, data, 1);
}
EXPORT_SYMBOL(ilookup5);
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
- inode = ifind(sb, head, test, data);
+ inode = ifind(sb, head, test, data, 1);
if (inode)
return inode;
/*
*/
void generic_delete_inode(struct inode *inode)
{
- struct super_operations *op = inode->i_sb->s_op;
+ const struct super_operations *op = inode->i_sb->s_op;
list_del_init(&inode->i_list);
list_del_init(&inode->i_sb_list);
- inode->i_state|=I_FREEING;
+ inode->i_state |= I_FREEING;
inodes_stat.nr_inodes--;
spin_unlock(&inode_lock);
- if (inode->i_data.nrpages)
- truncate_inode_pages(&inode->i_data, 0);
-
security_inode_delete(inode);
if (op->delete_inode) {
void (*delete)(struct inode *) = op->delete_inode;
if (!is_bad_inode(inode))
DQUOT_INIT(inode);
- /* s_op->delete_inode internally recalls clear_inode() */
+ /* Filesystems implementing their own
+ * s_op->delete_inode are required to call
+ * truncate_inode_pages and clear_inode()
+ * internally */
delete(inode);
- } else
+ } else {
+ truncate_inode_pages(&inode->i_data, 0);
clear_inode(inode);
+ }
spin_lock(&inode_lock);
hlist_del_init(&inode->i_hash);
spin_unlock(&inode_lock);
wake_up_inode(inode);
- if (inode->i_state != I_CLEAR)
- BUG();
+ BUG_ON(inode->i_state != I_CLEAR);
destroy_inode(inode);
}
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
clear_inode(inode);
+ wake_up_inode(inode);
destroy_inode(inode);
}
*/
static inline void iput_final(struct inode *inode)
{
- struct super_operations *op = inode->i_sb->s_op;
+ const struct super_operations *op = inode->i_sb->s_op;
void (*drop)(struct inode *) = generic_drop_inode;
if (op && op->drop_inode)
void iput(struct inode *inode)
{
if (inode) {
- struct super_operations *op = inode->i_sb->s_op;
+ const struct super_operations *op = inode->i_sb->s_op;
BUG_ON(inode->i_state == I_CLEAR);
res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
return res;
}
-
EXPORT_SYMBOL(bmap);
/**
- * update_atime - update the access time
- * @inode: inode accessed
+ * touch_atime - update the access time
+ * @mnt: mount the inode is accessed on
+ * @dentry: dentry accessed
*
* Update the accessed time on an inode and mark it for writeback.
* This function automatically handles read only file systems and media,
* as well as the "noatime" flag and inode specific "noatime" markers.
*/
-void update_atime(struct inode *inode)
+void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
{
+ struct inode *inode = dentry->d_inode;
struct timespec now;
- if (IS_NOATIME(inode))
+ if (inode->i_flags & S_NOATIME)
return;
- if (IS_NODIRATIME(inode) && S_ISDIR(inode->i_mode))
+ if (IS_NOATIME(inode))
return;
- if (IS_RDONLY(inode))
+ if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
return;
- now = current_fs_time(inode->i_sb);
- if (!timespec_equal(&inode->i_atime, &now)) {
- inode->i_atime = now;
- mark_inode_dirty_sync(inode);
- } else {
- if (!timespec_equal(&inode->i_atime, &now))
- inode->i_atime = now;
+ /*
+ * We may have a NULL vfsmount when coming from NFSD
+ */
+ if (mnt) {
+ if (mnt->mnt_flags & MNT_NOATIME)
+ return;
+ if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
+ return;
+
+ if (mnt->mnt_flags & MNT_RELATIME) {
+ /*
+ * With relative atime, only update atime if the
+ * previous atime is earlier than either the ctime or
+ * mtime.
+ */
+ if (timespec_compare(&inode->i_mtime,
+ &inode->i_atime) < 0 &&
+ timespec_compare(&inode->i_ctime,
+ &inode->i_atime) < 0)
+ return;
+ }
}
-}
-EXPORT_SYMBOL(update_atime);
+ now = current_fs_time(inode->i_sb);
+ if (timespec_equal(&inode->i_atime, &now))
+ return;
+
+ inode->i_atime = now;
+ mark_inode_dirty_sync(inode);
+}
+EXPORT_SYMBOL(touch_atime);
/**
- * inode_update_time - update mtime and ctime time
- * @inode: inode accessed
- * @ctime_too: update ctime too
+ * file_update_time - update mtime and ctime time
+ * @file: file accessed
*
- * Update the mtime time on an inode and mark it for writeback.
- * When ctime_too is specified update the ctime too.
+ * Update the mtime and ctime members of an inode and mark the inode
+ * for writeback. Note that this function is meant exclusively for
+ * usage in the file write path of filesystems, and filesystems may
+ * choose to explicitly ignore update via this function with the
+ * S_NOCTIME inode flag, e.g. for network filesystem where these
+ * timestamps are handled by the server.
*/
-void inode_update_time(struct inode *inode, int ctime_too)
+void file_update_time(struct file *file)
{
+ struct inode *inode = file->f_path.dentry->d_inode;
struct timespec now;
int sync_it = 0;
return;
now = current_fs_time(inode->i_sb);
- if (!timespec_equal(&inode->i_mtime, &now))
+ if (!timespec_equal(&inode->i_mtime, &now)) {
+ inode->i_mtime = now;
sync_it = 1;
- inode->i_mtime = now;
+ }
- if (ctime_too) {
- if (!timespec_equal(&inode->i_ctime, &now))
- sync_it = 1;
+ if (!timespec_equal(&inode->i_ctime, &now)) {
inode->i_ctime = now;
+ sync_it = 1;
}
+
if (sync_it)
mark_inode_dirty_sync(inode);
}
-EXPORT_SYMBOL(inode_update_time);
+EXPORT_SYMBOL(file_update_time);
int inode_needs_sync(struct inode *inode)
{
EXPORT_SYMBOL(inode_needs_sync);
-/*
- * Quota functions that want to walk the inode lists..
- */
-#ifdef CONFIG_QUOTA
-
-/* Function back in dquot.c */
-int remove_inode_dquot_ref(struct inode *, int, struct list_head *);
-
-void remove_dquot_ref(struct super_block *sb, int type,
- struct list_head *tofree_head)
-{
- struct inode *inode;
-
- if (!sb->dq_op)
- return; /* nothing to do */
- spin_lock(&inode_lock); /* This lock is for inodes code */
-
- /*
- * We don't have to lock against quota code - test IS_QUOTAINIT is
- * just for speedup...
- */
- list_for_each_entry(inode, &sb->s_inodes, i_sb_list)
- if (!IS_NOQUOTA(inode))
- remove_inode_dquot_ref(inode, type, tofree_head);
-
- spin_unlock(&inode_lock);
-}
-
-#endif
-
int inode_wait(void *word)
{
schedule();
wake_up_bit(&inode->i_state, __I_LOCK);
}
+/*
+ * We rarely want to lock two inodes that do not have a parent/child
+ * relationship (such as directory, child inode) simultaneously. The
+ * vast majority of file systems should be able to get along fine
+ * without this. Do not use these functions except as a last resort.
+ */
+void inode_double_lock(struct inode *inode1, struct inode *inode2)
+{
+ if (inode1 == NULL || inode2 == NULL || inode1 == inode2) {
+ if (inode1)
+ mutex_lock(&inode1->i_mutex);
+ else if (inode2)
+ mutex_lock(&inode2->i_mutex);
+ return;
+ }
+
+ if (inode1 < inode2) {
+ mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
+ } else {
+ mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
+ }
+}
+EXPORT_SYMBOL(inode_double_lock);
+
+void inode_double_unlock(struct inode *inode1, struct inode *inode2)
+{
+ if (inode1)
+ mutex_unlock(&inode1->i_mutex);
+
+ if (inode2 && inode2 != inode1)
+ mutex_unlock(&inode2->i_mutex);
+}
+EXPORT_SYMBOL(inode_double_unlock);
+
static __initdata unsigned long ihash_entries;
static int __init set_ihash_entries(char *str)
{
int loop;
/* inode slab cache */
- inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
- 0, SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_once, NULL);
+ inode_cachep = kmem_cache_create("inode_cache",
+ sizeof(struct inode),
+ 0,
+ (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+ SLAB_MEM_SPREAD),
+ init_once,
+ NULL);
set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
/* Hash may have been set up in inode_init_early */