X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Finode.c;h=913ab2d9a5d10ecd183630c1a25770435613aca5;hb=05bf9e839d9de4e8a094274a0a2fd07beb47eaf1;hp=6d695037a0a3df62f6f977091881ae7a596705c9;hpb=cb2c0233755429037462e16ea0d5497a0092738c;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/inode.c b/fs/inode.c index 6d69503..913ab2d 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -4,7 +4,6 @@ * (C) 1997 Linus Torvalds */ -#include #include #include #include @@ -21,6 +20,9 @@ #include #include #include +#include +#include +#include /* * This is needed for the following functions: @@ -54,8 +56,8 @@ #define I_HASHBITS i_hash_shift #define I_HASHMASK i_hash_mask -static unsigned int i_hash_mask; -static unsigned int i_hash_shift; +static unsigned int i_hash_mask __read_mostly; +static unsigned int i_hash_shift __read_mostly; /* * Each inode can be on two separate lists. One is @@ -71,7 +73,7 @@ static unsigned int i_hash_shift; LIST_HEAD(inode_in_use); LIST_HEAD(inode_unused); -static struct hlist_head *inode_hashtable; +static struct hlist_head *inode_hashtable __read_mostly; /* * A simple spinlock to protect the list manipulations. @@ -82,102 +84,138 @@ static struct hlist_head *inode_hashtable; DEFINE_SPINLOCK(inode_lock); /* - * iprune_sem provides exclusion between the kswapd or try_to_free_pages + * iprune_mutex provides exclusion between the kswapd or try_to_free_pages * icache shrinking path, and the umount path. Without this exclusion, * by the time prune_icache calls iput for the inode whose pages it has * been invalidating, or by the time it calls clear_inode & destroy_inode * from its final dispose_list, the struct super_block they refer to * (for inode->i_sb->s_op) may already have been freed and reused. */ -DECLARE_MUTEX(iprune_sem); +static DEFINE_MUTEX(iprune_mutex); /* * Statistics gathering.. */ struct inodes_stat_t inodes_stat; -static kmem_cache_t * inode_cachep; +static struct kmem_cache * inode_cachep __read_mostly; -static struct inode *alloc_inode(struct super_block *sb) +static void wake_up_inode(struct inode *inode) { - static struct address_space_operations empty_aops; - static struct inode_operations empty_iops; - static struct file_operations empty_fops; - struct inode *inode; - - if (sb->s_op->alloc_inode) - inode = sb->s_op->alloc_inode(sb); - else - inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL); + /* + * Prevent speculative execution through spin_unlock(&inode_lock); + */ + smp_mb(); + wake_up_bit(&inode->i_state, __I_LOCK); +} - if (inode) { - struct address_space * const mapping = &inode->i_data; - - inode->i_sb = sb; - inode->i_blkbits = sb->s_blocksize_bits; - inode->i_flags = 0; - atomic_set(&inode->i_count, 1); - inode->i_op = &empty_iops; - inode->i_fop = &empty_fops; - inode->i_nlink = 1; - atomic_set(&inode->i_writecount, 0); - inode->i_size = 0; - inode->i_blocks = 0; - inode->i_bytes = 0; - inode->i_generation = 0; +/** + * inode_init_always - perform inode structure intialisation + * @sb: superblock inode belongs to + * @inode: inode to initialise + * + * These are initializations that need to be done on every inode + * allocation as the fields are not initialised by slab allocation. + */ +struct inode *inode_init_always(struct super_block *sb, struct inode *inode) +{ + static const struct address_space_operations empty_aops; + static struct inode_operations empty_iops; + static const struct file_operations empty_fops; + + struct address_space * const mapping = &inode->i_data; + + inode->i_sb = sb; + inode->i_blkbits = sb->s_blocksize_bits; + inode->i_flags = 0; + atomic_set(&inode->i_count, 1); + inode->i_op = &empty_iops; + inode->i_fop = &empty_fops; + inode->i_nlink = 1; + inode->i_uid = 0; + inode->i_gid = 0; + atomic_set(&inode->i_writecount, 0); + inode->i_size = 0; + inode->i_blocks = 0; + inode->i_bytes = 0; + inode->i_generation = 0; #ifdef CONFIG_QUOTA - memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); + memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); #endif - inode->i_pipe = NULL; - inode->i_bdev = NULL; - inode->i_cdev = NULL; - inode->i_rdev = 0; - inode->i_security = NULL; - inode->dirtied_when = 0; - if (security_inode_alloc(inode)) { - if (inode->i_sb->s_op->destroy_inode) - inode->i_sb->s_op->destroy_inode(inode); - else - kmem_cache_free(inode_cachep, (inode)); - return NULL; - } + inode->i_pipe = NULL; + inode->i_bdev = NULL; + inode->i_cdev = NULL; + inode->i_rdev = 0; + inode->dirtied_when = 0; + if (security_inode_alloc(inode)) { + if (inode->i_sb->s_op->destroy_inode) + inode->i_sb->s_op->destroy_inode(inode); + else + kmem_cache_free(inode_cachep, (inode)); + return NULL; + } - mapping->a_ops = &empty_aops; - mapping->host = inode; - mapping->flags = 0; - mapping_set_gfp_mask(mapping, GFP_HIGHUSER); - mapping->assoc_mapping = NULL; - mapping->backing_dev_info = &default_backing_dev_info; + spin_lock_init(&inode->i_lock); + lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); - /* - * If the block_device provides a backing_dev_info for client - * inodes then use that. Otherwise the inode share the bdev's - * backing_dev_info. - */ - if (sb->s_bdev) { - struct backing_dev_info *bdi; + mutex_init(&inode->i_mutex); + lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); - bdi = sb->s_bdev->bd_inode_backing_dev_info; - if (!bdi) - bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; - mapping->backing_dev_info = bdi; - } - memset(&inode->u, 0, sizeof(inode->u)); - inode->i_mapping = mapping; + init_rwsem(&inode->i_alloc_sem); + lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); + + mapping->a_ops = &empty_aops; + mapping->host = inode; + mapping->flags = 0; + mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); + mapping->assoc_mapping = NULL; + mapping->backing_dev_info = &default_backing_dev_info; + mapping->writeback_index = 0; + + /* + * If the block_device provides a backing_dev_info for client + * inodes then use that. Otherwise the inode share the bdev's + * backing_dev_info. + */ + if (sb->s_bdev) { + struct backing_dev_info *bdi; + + bdi = sb->s_bdev->bd_inode_backing_dev_info; + if (!bdi) + bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; + mapping->backing_dev_info = bdi; } + inode->i_private = NULL; + inode->i_mapping = mapping; + return inode; } +EXPORT_SYMBOL(inode_init_always); + +static struct inode *alloc_inode(struct super_block *sb) +{ + struct inode *inode; + + if (sb->s_op->alloc_inode) + inode = sb->s_op->alloc_inode(sb); + else + inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); + + if (inode) + return inode_init_always(sb, inode); + return NULL; +} void destroy_inode(struct inode *inode) { - if (inode_has_buffers(inode)) - BUG(); + BUG_ON(inode_has_buffers(inode)); security_inode_free(inode); if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); else kmem_cache_free(inode_cachep, (inode)); } +EXPORT_SYMBOL(destroy_inode); /* @@ -191,28 +229,27 @@ void inode_init_once(struct inode *inode) INIT_HLIST_NODE(&inode->i_hash); INIT_LIST_HEAD(&inode->i_dentry); INIT_LIST_HEAD(&inode->i_devices); - sema_init(&inode->i_sem, 1); - init_rwsem(&inode->i_alloc_sem); INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); - rwlock_init(&inode->i_data.tree_lock); + spin_lock_init(&inode->i_data.tree_lock); spin_lock_init(&inode->i_data.i_mmap_lock); INIT_LIST_HEAD(&inode->i_data.private_list); spin_lock_init(&inode->i_data.private_lock); INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); - spin_lock_init(&inode->i_lock); i_size_ordered_init(inode); +#ifdef CONFIG_INOTIFY + INIT_LIST_HEAD(&inode->inotify_watches); + mutex_init(&inode->inotify_mutex); +#endif } EXPORT_SYMBOL(inode_init_once); -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void *foo) { struct inode * inode = (struct inode *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - inode_init_once(inode); + inode_init_once(inode); } /* @@ -225,7 +262,7 @@ void __iget(struct inode * inode) return; } atomic_inc(&inode->i_count); - if (!(inode->i_state & (I_DIRTY|I_LOCK))) + if (!(inode->i_state & (I_DIRTY|I_SYNC))) list_move(&inode->i_list, &inode_in_use); inodes_stat.nr_unused--; } @@ -243,19 +280,16 @@ void clear_inode(struct inode *inode) might_sleep(); invalidate_inode_buffers(inode); - if (inode->i_data.nrpages) - BUG(); - if (!(inode->i_state & I_FREEING)) - BUG(); - if (inode->i_state & I_CLEAR) - BUG(); - wait_on_inode(inode); + BUG_ON(inode->i_data.nrpages); + BUG_ON(!(inode->i_state & I_FREEING)); + BUG_ON(inode->i_state & I_CLEAR); + inode_sync_wait(inode); DQUOT_DROP(inode); - if (inode->i_sb && inode->i_sb->s_op->clear_inode) + if (inode->i_sb->s_op->clear_inode) inode->i_sb->s_op->clear_inode(inode); - if (inode->i_bdev) + if (S_ISBLK(inode->i_mode) && inode->i_bdev) bd_forget(inode); - if (inode->i_cdev) + if (S_ISCHR(inode->i_mode) && inode->i_cdev) cd_forget(inode); inode->i_state = I_CLEAR; } @@ -276,12 +310,19 @@ static void dispose_list(struct list_head *head) while (!list_empty(head)) { struct inode *inode; - inode = list_entry(head->next, struct inode, i_list); + inode = list_first_entry(head, struct inode, i_list); list_del(&inode->i_list); if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); + + spin_lock(&inode_lock); + hlist_del_init(&inode->i_hash); + list_del_init(&inode->i_sb_list); + spin_unlock(&inode_lock); + + wake_up_inode(inode); destroy_inode(inode); nr_disposed++; } @@ -306,7 +347,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) /* * We can reschedule here without worrying about the list's * consistency because the per-sb list of inodes must not - * change during umount anymore, and because iprune_sem keeps + * change during umount anymore, and because iprune_mutex keeps * shrink_icache_memory() away. */ cond_resched_lock(&inode_lock); @@ -317,8 +358,6 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) inode = list_entry(tmp, struct inode, i_sb_list); invalidate_inode_buffers(inode); if (!atomic_read(&inode->i_count)) { - hlist_del_init(&inode->i_hash); - list_del(&inode->i_sb_list); list_move(&inode->i_list, dispose); inode->i_state |= I_FREEING; count++; @@ -344,39 +383,19 @@ int invalidate_inodes(struct super_block * sb) int busy; LIST_HEAD(throw_away); - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); + inotify_unmount_inodes(&sb->s_inodes); busy = invalidate_list(&sb->s_inodes, &throw_away); spin_unlock(&inode_lock); dispose_list(&throw_away); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); return busy; } EXPORT_SYMBOL(invalidate_inodes); - -int __invalidate_device(struct block_device *bdev) -{ - struct super_block *sb = get_super(bdev); - int res = 0; - - if (sb) { - /* - * no need to lock the super, get_super holds the - * read semaphore so the filesystem cannot go away - * under us (->put_super runs with the write lock - * hold). - */ - shrink_dcache_sb(sb); - res = invalidate_inodes(sb); - drop_super(sb); - } - invalidate_bdev(bdev, 0); - return res; -} -EXPORT_SYMBOL(__invalidate_device); static int can_unuse(struct inode *inode) { @@ -411,7 +430,7 @@ static void prune_icache(int nr_to_scan) int nr_scanned; unsigned long reap = 0; - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { struct inode *inode; @@ -429,7 +448,8 @@ static void prune_icache(int nr_to_scan) __iget(inode); spin_unlock(&inode_lock); if (remove_inode_buffers(inode)) - reap += invalidate_inode_pages(&inode->i_data); + reap += invalidate_mapping_pages(&inode->i_data, + 0, -1); iput(inode); spin_lock(&inode_lock); @@ -439,22 +459,19 @@ static void prune_icache(int nr_to_scan) if (!can_unuse(inode)) continue; } - hlist_del_init(&inode->i_hash); - list_del_init(&inode->i_sb_list); list_move(&inode->i_list, &freeable); inode->i_state |= I_FREEING; nr_pruned++; } inodes_stat.nr_unused -= nr_pruned; + if (current_is_kswapd()) + __count_vm_events(KSWAPD_INODESTEAL, reap); + else + __count_vm_events(PGINODESTEAL, reap); spin_unlock(&inode_lock); dispose_list(&freeable); - up(&iprune_sem); - - if (current_is_kswapd()) - mod_page_state(kswapd_inodesteal, reap); - else - mod_page_state(pginodesteal, reap); + mutex_unlock(&iprune_mutex); } /* @@ -466,7 +483,7 @@ static void prune_icache(int nr_to_scan) * This function is passed the number of inodes to scan, and it returns the * total number of remaining possibly-reclaimable inodes. */ -static int shrink_icache_memory(int nr, unsigned int gfp_mask) +static int shrink_icache_memory(int nr, gfp_t gfp_mask) { if (nr) { /* @@ -481,6 +498,11 @@ static int shrink_icache_memory(int nr, unsigned int gfp_mask) return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; } +static struct shrinker icache_shrinker = { + .shrink = shrink_icache_memory, + .seeks = DEFAULT_SEEKS, +}; + static void __wait_on_freeing_inode(struct inode *inode); /* * Called with the inode lock held. @@ -494,8 +516,7 @@ static struct inode * find_inode(struct super_block * sb, struct hlist_head *hea struct inode * inode = NULL; repeat: - hlist_for_each (node, head) { - inode = hlist_entry(node, struct inode, i_hash); + hlist_for_each_entry(inode, node, head, i_hash) { if (inode->i_sb != sb) continue; if (!test(inode, data)) @@ -519,8 +540,7 @@ static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head struct inode * inode = NULL; repeat: - hlist_for_each (node, head) { - inode = hlist_entry(node, struct inode, i_hash); + hlist_for_each_entry(inode, node, head, i_hash) { if (inode->i_ino != ino) continue; if (inode->i_sb != sb) @@ -534,15 +554,69 @@ repeat: return node ? inode : NULL; } +static unsigned long hash(struct super_block *sb, unsigned long hashval) +{ + unsigned long tmp; + + tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / + L1_CACHE_BYTES; + tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); + return tmp & I_HASHMASK; +} + +static inline void +__inode_add_to_lists(struct super_block *sb, struct hlist_head *head, + struct inode *inode) +{ + inodes_stat.nr_inodes++; + list_add(&inode->i_list, &inode_in_use); + list_add(&inode->i_sb_list, &sb->s_inodes); + if (head) + hlist_add_head(&inode->i_hash, head); +} + +/** + * inode_add_to_lists - add a new inode to relevant lists + * @sb: superblock inode belongs to + * @inode: inode to mark in use + * + * When an inode is allocated it needs to be accounted for, added to the in use + * list, the owning superblock and the inode hash. This needs to be done under + * the inode_lock, so export a function to do this rather than the inode lock + * itself. We calculate the hash list to add to here so it is all internal + * which requires the caller to have already set up the inode number in the + * inode to add. + */ +void inode_add_to_lists(struct super_block *sb, struct inode *inode) +{ + struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino); + + spin_lock(&inode_lock); + __inode_add_to_lists(sb, head, inode); + spin_unlock(&inode_lock); +} +EXPORT_SYMBOL_GPL(inode_add_to_lists); + /** * new_inode - obtain an inode * @sb: superblock * - * Allocates a new inode for given superblock. + * Allocates a new inode for given superblock. The default gfp_mask + * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. + * If HIGHMEM pages are unsuitable or it is known that pages allocated + * for the page cache are not reclaimable or migratable, + * mapping_set_gfp_mask() must be called with suitable flags on the + * newly created inode's mapping + * */ struct inode *new_inode(struct super_block *sb) { - static unsigned long last_ino; + /* + * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW + * error if st_ino won't fit in target struct field. Use 32bit counter + * here to attempt to avoid that. + */ + static unsigned int last_ino; struct inode * inode; spin_lock_prefetch(&inode_lock); @@ -550,9 +624,7 @@ struct inode *new_inode(struct super_block *sb) inode = alloc_inode(sb); if (inode) { spin_lock(&inode_lock); - inodes_stat.nr_inodes++; - list_add(&inode->i_list, &inode_in_use); - list_add(&inode->i_sb_list, &sb->s_inodes); + __inode_add_to_lists(sb, NULL, inode); inode->i_ino = ++last_ino; inode->i_state = 0; spin_unlock(&inode_lock); @@ -564,6 +636,18 @@ EXPORT_SYMBOL(new_inode); void unlock_new_inode(struct inode *inode) { +#ifdef CONFIG_DEBUG_LOCK_ALLOC + if (inode->i_mode & S_IFDIR) { + struct file_system_type *type = inode->i_sb->s_type; + + /* + * ensure nobody is actually holding i_mutex + */ + mutex_destroy(&inode->i_mutex); + mutex_init(&inode->i_mutex); + lockdep_set_class(&inode->i_mutex, &type->i_mutex_dir_key); + } +#endif /* * This is special! We do not need the spinlock * when clearing I_LOCK, because we're guaranteed @@ -599,10 +683,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h if (set(inode, data)) goto set_failed; - inodes_stat.nr_inodes++; - list_add(&inode->i_list, &inode_in_use); - list_add(&inode->i_sb_list, &sb->s_inodes); - hlist_add_head(&inode->i_hash, head); + __inode_add_to_lists(sb, head, inode); inode->i_state = I_LOCK|I_NEW; spin_unlock(&inode_lock); @@ -648,10 +729,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he old = find_inode_fast(sb, head, ino); if (!old) { inode->i_ino = ino; - inodes_stat.nr_inodes++; - list_add(&inode->i_list, &inode_in_use); - list_add(&inode->i_sb_list, &sb->s_inodes); - hlist_add_head(&inode->i_hash, head); + __inode_add_to_lists(sb, head, inode); inode->i_state = I_LOCK|I_NEW; spin_unlock(&inode_lock); @@ -675,16 +753,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he return inode; } -static inline unsigned long hash(struct super_block *sb, unsigned long hashval) -{ - unsigned long tmp; - - tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / - L1_CACHE_BYTES; - tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); - return tmp & I_HASHMASK; -} - /** * iunique - get a unique inode number * @sb: superblock @@ -701,33 +769,34 @@ static inline unsigned long hash(struct super_block *sb, unsigned long hashval) */ ino_t iunique(struct super_block *sb, ino_t max_reserved) { - static ino_t counter; + /* + * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW + * error if st_ino won't fit in target struct field. Use 32bit counter + * here to attempt to avoid that. + */ + static unsigned int counter; struct inode *inode; - struct hlist_head * head; + struct hlist_head *head; ino_t res; + spin_lock(&inode_lock); -retry: - if (counter > max_reserved) { - head = inode_hashtable + hash(sb,counter); + do { + if (counter <= max_reserved) + counter = max_reserved + 1; res = counter++; + head = inode_hashtable + hash(sb, res); inode = find_inode_fast(sb, head, res); - if (!inode) { - spin_unlock(&inode_lock); - return res; - } - } else { - counter = max_reserved + 1; - } - goto retry; - -} + } while (inode != NULL); + spin_unlock(&inode_lock); + return res; +} EXPORT_SYMBOL(iunique); struct inode *igrab(struct inode *inode) { spin_lock(&inode_lock); - if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) + if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))) __iget(inode); else /* @@ -748,6 +817,7 @@ EXPORT_SYMBOL(igrab); * @head: the head of the list to search * @test: callback used for comparisons between inodes * @data: opaque data pointer to pass to @test + * @wait: if true wait for the inode to be unlocked, if false do not * * ifind() searches for the inode specified by @data in the inode * cache. This is a generalized version of ifind_fast() for file systems where @@ -760,9 +830,9 @@ EXPORT_SYMBOL(igrab); * * Note, @test is called with the inode_lock held, so can't sleep. */ -static inline struct inode *ifind(struct super_block *sb, +static struct inode *ifind(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), - void *data) + void *data, const int wait) { struct inode *inode; @@ -771,7 +841,8 @@ static inline struct inode *ifind(struct super_block *sb, if (inode) { __iget(inode); spin_unlock(&inode_lock); - wait_on_inode(inode); + if (likely(wait)) + wait_on_inode(inode); return inode; } spin_unlock(&inode_lock); @@ -793,7 +864,7 @@ static inline struct inode *ifind(struct super_block *sb, * * Otherwise NULL is returned. */ -static inline struct inode *ifind_fast(struct super_block *sb, +static struct inode *ifind_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino) { struct inode *inode; @@ -811,7 +882,7 @@ static inline struct inode *ifind_fast(struct super_block *sb, } /** - * ilookup5 - search for an inode in the inode cache + * ilookup5_nowait - search for an inode in the inode cache * @sb: super block of file system to search * @hashval: hash value (usually inode number) to search for * @test: callback used for comparisons between inodes @@ -823,7 +894,38 @@ static inline struct inode *ifind_fast(struct super_block *sb, * identification of an inode. * * If the inode is in the cache, the inode is returned with an incremented - * reference count. + * reference count. Note, the inode lock is not waited upon so you have to be + * very careful what you do with the returned inode. You probably should be + * using ilookup5() instead. + * + * Otherwise NULL is returned. + * + * Note, @test is called with the inode_lock held, so can't sleep. + */ +struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, + int (*test)(struct inode *, void *), void *data) +{ + struct hlist_head *head = inode_hashtable + hash(sb, hashval); + + return ifind(sb, head, test, data, 0); +} + +EXPORT_SYMBOL(ilookup5_nowait); + +/** + * ilookup5 - search for an inode in the inode cache + * @sb: super block of file system to search + * @hashval: hash value (usually inode number) to search for + * @test: callback used for comparisons between inodes + * @data: opaque data pointer to pass to @test + * + * ilookup5() uses ifind() to search for the inode specified by @hashval and + * @data in the inode cache. This is a generalized version of ilookup() for + * file systems where the inode number is not sufficient for unique + * identification of an inode. + * + * If the inode is in the cache, the inode lock is waited upon and the inode is + * returned with an incremented reference count. * * Otherwise NULL is returned. * @@ -834,7 +936,7 @@ struct inode *ilookup5(struct super_block *sb, unsigned long hashval, { struct hlist_head *head = inode_hashtable + hash(sb, hashval); - return ifind(sb, head, test, data); + return ifind(sb, head, test, data, 1); } EXPORT_SYMBOL(ilookup5); @@ -870,8 +972,6 @@ EXPORT_SYMBOL(ilookup); * @set: callback used to initialize a new struct inode * @data: opaque data pointer to pass to @test and @set * - * This is iget() without the read_inode() portion of get_new_inode(). - * * iget5_locked() uses ifind() to search for the inode specified by @hashval * and @data in the inode cache and if present it is returned with an increased * reference count. This is a generalized version of iget_locked() for file @@ -891,7 +991,7 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, struct hlist_head *head = inode_hashtable + hash(sb, hashval); struct inode *inode; - inode = ifind(sb, head, test, data); + inode = ifind(sb, head, test, data, 1); if (inode) return inode; /* @@ -908,8 +1008,6 @@ EXPORT_SYMBOL(iget5_locked); * @sb: super block of file system * @ino: inode number to get * - * This is iget() without the read_inode() portion of get_new_inode_fast(). - * * iget_locked() uses ifind_fast() to search for the inode specified by @ino in * the inode cache and if present it is returned with an increased reference * count. This is for file systems where the inode number is sufficient for @@ -937,6 +1035,65 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino) EXPORT_SYMBOL(iget_locked); +int insert_inode_locked(struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + ino_t ino = inode->i_ino; + struct hlist_head *head = inode_hashtable + hash(sb, ino); + struct inode *old; + + inode->i_state |= I_LOCK|I_NEW; + while (1) { + spin_lock(&inode_lock); + old = find_inode_fast(sb, head, ino); + if (likely(!old)) { + hlist_add_head(&inode->i_hash, head); + spin_unlock(&inode_lock); + return 0; + } + __iget(old); + spin_unlock(&inode_lock); + wait_on_inode(old); + if (unlikely(!hlist_unhashed(&old->i_hash))) { + iput(old); + return -EBUSY; + } + iput(old); + } +} + +EXPORT_SYMBOL(insert_inode_locked); + +int insert_inode_locked4(struct inode *inode, unsigned long hashval, + int (*test)(struct inode *, void *), void *data) +{ + struct super_block *sb = inode->i_sb; + struct hlist_head *head = inode_hashtable + hash(sb, hashval); + struct inode *old; + + inode->i_state |= I_LOCK|I_NEW; + + while (1) { + spin_lock(&inode_lock); + old = find_inode(sb, head, test, data); + if (likely(!old)) { + hlist_add_head(&inode->i_hash, head); + spin_unlock(&inode_lock); + return 0; + } + __iget(old); + spin_unlock(&inode_lock); + wait_on_inode(old); + if (unlikely(!hlist_unhashed(&old->i_hash))) { + iput(old); + return -EBUSY; + } + iput(old); + } +} + +EXPORT_SYMBOL(insert_inode_locked4); + /** * __insert_inode_hash - hash an inode * @inode: unhashed inode @@ -984,33 +1141,34 @@ EXPORT_SYMBOL(remove_inode_hash); */ void generic_delete_inode(struct inode *inode) { - struct super_operations *op = inode->i_sb->s_op; + const struct super_operations *op = inode->i_sb->s_op; list_del_init(&inode->i_list); list_del_init(&inode->i_sb_list); - inode->i_state|=I_FREEING; + inode->i_state |= I_FREEING; inodes_stat.nr_inodes--; spin_unlock(&inode_lock); - if (inode->i_data.nrpages) - truncate_inode_pages(&inode->i_data, 0); - security_inode_delete(inode); if (op->delete_inode) { void (*delete)(struct inode *) = op->delete_inode; if (!is_bad_inode(inode)) DQUOT_INIT(inode); - /* s_op->delete_inode internally recalls clear_inode() */ + /* Filesystems implementing their own + * s_op->delete_inode are required to call + * truncate_inode_pages and clear_inode() + * internally */ delete(inode); - } else + } else { + truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); + } spin_lock(&inode_lock); hlist_del_init(&inode->i_hash); spin_unlock(&inode_lock); wake_up_inode(inode); - if (inode->i_state != I_CLEAR) - BUG(); + BUG_ON(inode->i_state != I_CLEAR); destroy_inode(inode); } @@ -1021,10 +1179,10 @@ static void generic_forget_inode(struct inode *inode) struct super_block *sb = inode->i_sb; if (!hlist_unhashed(&inode->i_hash)) { - if (!(inode->i_state & (I_DIRTY|I_LOCK))) + if (!(inode->i_state & (I_DIRTY|I_SYNC))) list_move(&inode->i_list, &inode_unused); inodes_stat.nr_unused++; - if (!sb || (sb->s_flags & MS_ACTIVE)) { + if (sb->s_flags & MS_ACTIVE) { spin_unlock(&inode_lock); return; } @@ -1044,6 +1202,7 @@ static void generic_forget_inode(struct inode *inode) if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); + wake_up_inode(inode); destroy_inode(inode); } @@ -1075,7 +1234,7 @@ EXPORT_SYMBOL_GPL(generic_drop_inode); */ static inline void iput_final(struct inode *inode) { - struct super_operations *op = inode->i_sb->s_op; + const struct super_operations *op = inode->i_sb->s_op; void (*drop)(struct inode *) = generic_drop_inode; if (op && op->drop_inode) @@ -1095,13 +1254,8 @@ static inline void iput_final(struct inode *inode) void iput(struct inode *inode) { if (inode) { - struct super_operations *op = inode->i_sb->s_op; - BUG_ON(inode->i_state == I_CLEAR); - if (op && op->put_inode) - op->put_inode(inode); - if (atomic_dec_and_lock(&inode->i_count, &inode_lock)) iput_final(inode); } @@ -1127,74 +1281,104 @@ sector_t bmap(struct inode * inode, sector_t block) res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); return res; } - EXPORT_SYMBOL(bmap); /** - * update_atime - update the access time - * @inode: inode accessed + * touch_atime - update the access time + * @mnt: mount the inode is accessed on + * @dentry: dentry accessed * * Update the accessed time on an inode and mark it for writeback. * This function automatically handles read only file systems and media, * as well as the "noatime" flag and inode specific "noatime" markers. */ -void update_atime(struct inode *inode) +void touch_atime(struct vfsmount *mnt, struct dentry *dentry) { + struct inode *inode = dentry->d_inode; struct timespec now; - if (IS_NOATIME(inode)) - return; - if (IS_NODIRATIME(inode) && S_ISDIR(inode->i_mode)) - return; - if (IS_RDONLY(inode)) + if (mnt_want_write(mnt)) return; + if (inode->i_flags & S_NOATIME) + goto out; + if (IS_NOATIME(inode)) + goto out; + if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) + goto out; + + if (mnt->mnt_flags & MNT_NOATIME) + goto out; + if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) + goto out; + if (mnt->mnt_flags & MNT_RELATIME) { + /* + * With relative atime, only update atime if the previous + * atime is earlier than either the ctime or mtime. + */ + if (timespec_compare(&inode->i_mtime, &inode->i_atime) < 0 && + timespec_compare(&inode->i_ctime, &inode->i_atime) < 0) + goto out; + } now = current_fs_time(inode->i_sb); - if (!timespec_equal(&inode->i_atime, &now)) { - inode->i_atime = now; - mark_inode_dirty_sync(inode); - } else { - if (!timespec_equal(&inode->i_atime, &now)) - inode->i_atime = now; - } -} + if (timespec_equal(&inode->i_atime, &now)) + goto out; -EXPORT_SYMBOL(update_atime); + inode->i_atime = now; + mark_inode_dirty_sync(inode); +out: + mnt_drop_write(mnt); +} +EXPORT_SYMBOL(touch_atime); /** - * inode_update_time - update mtime and ctime time - * @inode: inode accessed - * @ctime_too: update ctime too + * file_update_time - update mtime and ctime time + * @file: file accessed * - * Update the mtime time on an inode and mark it for writeback. - * When ctime_too is specified update the ctime too. + * Update the mtime and ctime members of an inode and mark the inode + * for writeback. Note that this function is meant exclusively for + * usage in the file write path of filesystems, and filesystems may + * choose to explicitly ignore update via this function with the + * S_NOCTIME inode flag, e.g. for network filesystem where these + * timestamps are handled by the server. */ -void inode_update_time(struct inode *inode, int ctime_too) +void file_update_time(struct file *file) { + struct inode *inode = file->f_path.dentry->d_inode; struct timespec now; int sync_it = 0; + int err; if (IS_NOCMTIME(inode)) return; - if (IS_RDONLY(inode)) + + err = mnt_want_write(file->f_path.mnt); + if (err) return; now = current_fs_time(inode->i_sb); - if (!timespec_equal(&inode->i_mtime, &now)) + if (!timespec_equal(&inode->i_mtime, &now)) { + inode->i_mtime = now; sync_it = 1; - inode->i_mtime = now; + } - if (ctime_too) { - if (!timespec_equal(&inode->i_ctime, &now)) - sync_it = 1; + if (!timespec_equal(&inode->i_ctime, &now)) { inode->i_ctime = now; + sync_it = 1; + } + + if (IS_I_VERSION(inode)) { + inode_inc_iversion(inode); + sync_it = 1; } + if (sync_it) mark_inode_dirty_sync(inode); + mnt_drop_write(file->f_path.mnt); } -EXPORT_SYMBOL(inode_update_time); +EXPORT_SYMBOL(file_update_time); int inode_needs_sync(struct inode *inode) { @@ -1207,66 +1391,29 @@ int inode_needs_sync(struct inode *inode) EXPORT_SYMBOL(inode_needs_sync); -/* - * Quota functions that want to walk the inode lists.. - */ -#ifdef CONFIG_QUOTA - -/* Function back in dquot.c */ -int remove_inode_dquot_ref(struct inode *, int, struct list_head *); - -void remove_dquot_ref(struct super_block *sb, int type, - struct list_head *tofree_head) -{ - struct inode *inode; - - if (!sb->dq_op) - return; /* nothing to do */ - spin_lock(&inode_lock); /* This lock is for inodes code */ - - /* - * We don't have to lock against quota code - test IS_QUOTAINIT is - * just for speedup... - */ - list_for_each_entry(inode, &sb->s_inodes, i_sb_list) - if (!IS_NOQUOTA(inode)) - remove_inode_dquot_ref(inode, type, tofree_head); - - spin_unlock(&inode_lock); -} - -#endif - int inode_wait(void *word) { schedule(); return 0; } +EXPORT_SYMBOL(inode_wait); /* - * If we try to find an inode in the inode hash while it is being deleted, we - * have to wait until the filesystem completes its deletion before reporting - * that it isn't found. This is because iget will immediately call - * ->read_inode, and we want to be sure that evidence of the deletion is found - * by ->read_inode. + * If we try to find an inode in the inode hash while it is being + * deleted, we have to wait until the filesystem completes its + * deletion before reporting that it isn't found. This function waits + * until the deletion _might_ have completed. Callers are responsible + * to recheck inode state. + * + * It doesn't matter if I_LOCK is not set initially, a call to + * wake_up_inode() after removing from the hash list will DTRT. + * * This is called with inode_lock held. */ static void __wait_on_freeing_inode(struct inode *inode) { wait_queue_head_t *wq; DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); - - /* - * I_FREEING and I_CLEAR are cleared in process context under - * inode_lock, so we have to give the tasks who would clear them - * a chance to run and acquire inode_lock. - */ - if (!(inode->i_state & I_LOCK)) { - spin_unlock(&inode_lock); - yield(); - spin_lock(&inode_lock); - return; - } wq = bit_waitqueue(&inode->i_state, __I_LOCK); prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); spin_unlock(&inode_lock); @@ -1275,14 +1422,41 @@ static void __wait_on_freeing_inode(struct inode *inode) spin_lock(&inode_lock); } -void wake_up_inode(struct inode *inode) +/* + * We rarely want to lock two inodes that do not have a parent/child + * relationship (such as directory, child inode) simultaneously. The + * vast majority of file systems should be able to get along fine + * without this. Do not use these functions except as a last resort. + */ +void inode_double_lock(struct inode *inode1, struct inode *inode2) { - /* - * Prevent speculative execution through spin_unlock(&inode_lock); - */ - smp_mb(); - wake_up_bit(&inode->i_state, __I_LOCK); + if (inode1 == NULL || inode2 == NULL || inode1 == inode2) { + if (inode1) + mutex_lock(&inode1->i_mutex); + else if (inode2) + mutex_lock(&inode2->i_mutex); + return; + } + + if (inode1 < inode2) { + mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); + } else { + mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD); + } +} +EXPORT_SYMBOL(inode_double_lock); + +void inode_double_unlock(struct inode *inode1, struct inode *inode2) +{ + if (inode1) + mutex_unlock(&inode1->i_mutex); + + if (inode2 && inode2 != inode1) + mutex_unlock(&inode2->i_mutex); } +EXPORT_SYMBOL(inode_double_unlock); static __initdata unsigned long ihash_entries; static int __init set_ihash_entries(char *str) @@ -1321,14 +1495,18 @@ void __init inode_init_early(void) INIT_HLIST_HEAD(&inode_hashtable[loop]); } -void __init inode_init(unsigned long mempages) +void __init inode_init(void) { int loop; /* inode slab cache */ - inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode), - 0, SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_once, NULL); - set_shrinker(DEFAULT_SEEKS, shrink_icache_memory); + inode_cachep = kmem_cache_create("inode_cache", + sizeof(struct inode), + 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| + SLAB_MEM_SPREAD), + init_once); + register_shrinker(&icache_shrinker); /* Hash may have been set up in inode_init_early */ if (!hashdist)