X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Ffile_table.c;h=4bef4c01ec6ff4b0ef7ce00816f3c124517031c0;hb=7707e61c70999a1f9f1fd9ac92e293c198585152;hp=03d83cb686b12598191d97edd55779315d74dc8a;hpb=1da177e4c3f41524e886b7f1b8a0c1fc7321cac2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/file_table.c b/fs/file_table.c index 03d83cb..4bef4c0 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -8,121 +8,263 @@ #include #include #include +#include #include #include -#include #include #include #include +#include #include +#include #include +#include +#include +#include + +#include /* sysctl tunables... */ struct files_stat_struct files_stat = { .max_files = NR_FILE }; -EXPORT_SYMBOL(files_stat); /* Needed by unix.o */ - /* public. Not pretty! */ - __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); +__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); + +/* SLAB cache for file structures */ +static struct kmem_cache *filp_cachep __read_mostly; -static DEFINE_SPINLOCK(filp_count_lock); +static struct percpu_counter nr_files __cacheline_aligned_in_smp; -/* slab constructors and destructors are called from arbitrary - * context and must be fully threaded - use a local spinlock - * to protect files_stat.nr_files +static inline void file_free_rcu(struct rcu_head *head) +{ + struct file *f = container_of(head, struct file, f_u.fu_rcuhead); + + put_cred(f->f_cred); + kmem_cache_free(filp_cachep, f); +} + +static inline void file_free(struct file *f) +{ + percpu_counter_dec(&nr_files); + file_check_state(f); + call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); +} + +/* + * Return the total number of open files in the system */ -void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags) -{ - if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { - unsigned long flags; - spin_lock_irqsave(&filp_count_lock, flags); - files_stat.nr_files++; - spin_unlock_irqrestore(&filp_count_lock, flags); - } +static int get_nr_files(void) +{ + return percpu_counter_read_positive(&nr_files); } -void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags) +/* + * Return the maximum number of open files in the system + */ +int get_max_files(void) { - unsigned long flags; - spin_lock_irqsave(&filp_count_lock, flags); - files_stat.nr_files--; - spin_unlock_irqrestore(&filp_count_lock, flags); + return files_stat.max_files; } +EXPORT_SYMBOL_GPL(get_max_files); -static inline void file_free(struct file *f) +/* + * Handle nr_files sysctl + */ +#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) +int proc_nr_files(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) { - kmem_cache_free(filp_cachep, f); + files_stat.nr_files = get_nr_files(); + return proc_dointvec(table, write, buffer, lenp, ppos); } +#else +int proc_nr_files(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return -ENOSYS; +} +#endif /* Find an unused file structure and return a pointer to it. * Returns NULL, if there are no more free file structures or * we run out of memory. + * + * Be very careful using this. You are responsible for + * getting write access to any mount that you might assign + * to this filp, if it is opened for write. If this is not + * done, you will imbalance int the mount's writer count + * and a warning at __fput() time. */ struct file *get_empty_filp(void) { -static int old_max; + const struct cred *cred = current_cred(); + static int old_max; struct file * f; /* * Privileged users can go above max_files */ - if (files_stat.nr_files < files_stat.max_files || - capable(CAP_SYS_ADMIN)) { - f = kmem_cache_alloc(filp_cachep, GFP_KERNEL); - if (f) { - memset(f, 0, sizeof(*f)); - if (security_file_alloc(f)) { - file_free(f); - goto fail; - } - eventpoll_init_file(f); - atomic_set(&f->f_count, 1); - f->f_uid = current->fsuid; - f->f_gid = current->fsgid; - rwlock_init(&f->f_owner.lock); - /* f->f_version: 0 */ - INIT_LIST_HEAD(&f->f_list); - f->f_maxcount = INT_MAX; - return f; - } + if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { + /* + * percpu_counters are inaccurate. Do an expensive check before + * we go and fail. + */ + if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) + goto over; } + f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); + if (f == NULL) + goto fail; + + percpu_counter_inc(&nr_files); + if (security_file_alloc(f)) + goto fail_sec; + + INIT_LIST_HEAD(&f->f_u.fu_list); + atomic_long_set(&f->f_count, 1); + rwlock_init(&f->f_owner.lock); + f->f_cred = get_cred(cred); + spin_lock_init(&f->f_lock); + eventpoll_init_file(f); + /* f->f_version: 0 */ + return f; + +over: /* Ran out of filps - report that */ - if (files_stat.max_files >= old_max) { + if (get_nr_files() > old_max) { printk(KERN_INFO "VFS: file-max limit %d reached\n", - files_stat.max_files); - old_max = files_stat.max_files; - } else { - /* Big problems... */ - printk(KERN_WARNING "VFS: filp allocation failed\n"); + get_max_files()); + old_max = get_nr_files(); } + goto fail; + +fail_sec: + file_free(f); fail: return NULL; } EXPORT_SYMBOL(get_empty_filp); -void fastcall fput(struct file *file) +/** + * alloc_file - allocate and initialize a 'struct file' + * @mnt: the vfsmount on which the file will reside + * @dentry: the dentry representing the new file + * @mode: the mode with which the new file will be opened + * @fop: the 'struct file_operations' for the new file + * + * Use this instead of get_empty_filp() to get a new + * 'struct file'. Do so because of the same initialization + * pitfalls reasons listed for init_file(). This is a + * preferred interface to using init_file(). + * + * If all the callers of init_file() are eliminated, its + * code should be moved into this function. + */ +struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry, + fmode_t mode, const struct file_operations *fop) { - if (atomic_dec_and_test(&file->f_count)) + struct file *file; + + file = get_empty_filp(); + if (!file) + return NULL; + + init_file(file, mnt, dentry, mode, fop); + return file; +} +EXPORT_SYMBOL(alloc_file); + +/** + * init_file - initialize a 'struct file' + * @file: the already allocated 'struct file' to initialized + * @mnt: the vfsmount on which the file resides + * @dentry: the dentry representing this file + * @mode: the mode the file is opened with + * @fop: the 'struct file_operations' for this file + * + * Use this instead of setting the members directly. Doing so + * avoids making mistakes like forgetting the mntget() or + * forgetting to take a write on the mnt. + * + * Note: This is a crappy interface. It is here to make + * merging with the existing users of get_empty_filp() + * who have complex failure logic easier. All users + * of this should be moving to alloc_file(). + */ +int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry, + fmode_t mode, const struct file_operations *fop) +{ + int error = 0; + file->f_path.dentry = dentry; + file->f_path.mnt = mntget(mnt); + file->f_mapping = dentry->d_inode->i_mapping; + file->f_mode = mode; + file->f_op = fop; + + /* + * These mounts don't really matter in practice + * for r/o bind mounts. They aren't userspace- + * visible. We do this for consistency, and so + * that we can do debugging checks at __fput() + */ + if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) { + file_take_write(file); + error = mnt_clone_write(mnt); + WARN_ON(error); + } + return error; +} +EXPORT_SYMBOL(init_file); + +void fput(struct file *file) +{ + if (atomic_long_dec_and_test(&file->f_count)) __fput(file); } EXPORT_SYMBOL(fput); +/** + * drop_file_write_access - give up ability to write to a file + * @file: the file to which we will stop writing + * + * This is a central place which will give up the ability + * to write to @file, along with access to write through + * its vfsmount. + */ +void drop_file_write_access(struct file *file) +{ + struct vfsmount *mnt = file->f_path.mnt; + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = dentry->d_inode; + + put_write_access(inode); + + if (special_file(inode->i_mode)) + return; + if (file_check_writeable(file) != 0) + return; + mnt_drop_write(mnt); + file_release_write(file); +} +EXPORT_SYMBOL_GPL(drop_file_write_access); + /* __fput is called from task context when aio completion releases the last * last use of a struct file *. Do not use otherwise. */ -void fastcall __fput(struct file *file) +void __fput(struct file *file) { - struct dentry *dentry = file->f_dentry; - struct vfsmount *mnt = file->f_vfsmnt; + struct dentry *dentry = file->f_path.dentry; + struct vfsmount *mnt = file->f_path.mnt; struct inode *inode = dentry->d_inode; might_sleep(); + + fsnotify_close(file); /* * The function eventpoll_release() should be the first called * in the file cleanup chain. @@ -130,32 +272,43 @@ void fastcall __fput(struct file *file) eventpoll_release(file); locks_remove_flock(file); + if (unlikely(file->f_flags & FASYNC)) { + if (file->f_op && file->f_op->fasync) + file->f_op->fasync(-1, file, 0); + } if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); - if (unlikely(inode->i_cdev != NULL)) + if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) cdev_put(inode->i_cdev); fops_put(file->f_op); - if (file->f_mode & FMODE_WRITE) - put_write_access(inode); + put_pid(file->f_owner.pid); file_kill(file); - file->f_dentry = NULL; - file->f_vfsmnt = NULL; + if (file->f_mode & FMODE_WRITE) + drop_file_write_access(file); + file->f_path.dentry = NULL; + file->f_path.mnt = NULL; file_free(file); dput(dentry); mntput(mnt); } -struct file fastcall *fget(unsigned int fd) +struct file *fget(unsigned int fd) { struct file *file; struct files_struct *files = current->files; - spin_lock(&files->file_lock); + rcu_read_lock(); file = fcheck_files(files, fd); - if (file) - get_file(file); - spin_unlock(&files->file_lock); + if (file) { + if (!atomic_long_inc_not_zero(&file->f_count)) { + /* File object ref couldn't be taken */ + rcu_read_unlock(); + return NULL; + } + } + rcu_read_unlock(); + return file; } @@ -168,7 +321,7 @@ EXPORT_SYMBOL(fget); * and a flag is returned to be passed to the corresponding fput_light(). * There must not be a cloning between an fget_light/fput_light pair. */ -struct file fastcall *fget_light(unsigned int fd, int *fput_needed) +struct file *fget_light(unsigned int fd, int *fput_needed) { struct file *file; struct files_struct *files = current->files; @@ -177,21 +330,25 @@ struct file fastcall *fget_light(unsigned int fd, int *fput_needed) if (likely((atomic_read(&files->count) == 1))) { file = fcheck_files(files, fd); } else { - spin_lock(&files->file_lock); + rcu_read_lock(); file = fcheck_files(files, fd); if (file) { - get_file(file); - *fput_needed = 1; + if (atomic_long_inc_not_zero(&file->f_count)) + *fput_needed = 1; + else + /* Didn't get the reference, someone's freed */ + file = NULL; } - spin_unlock(&files->file_lock); + rcu_read_unlock(); } + return file; } void put_filp(struct file *file) { - if (atomic_dec_and_test(&file->f_count)) { + if (atomic_long_dec_and_test(&file->f_count)) { security_file_free(file); file_kill(file); file_free(file); @@ -203,28 +360,27 @@ void file_move(struct file *file, struct list_head *list) if (!list) return; file_list_lock(); - list_move(&file->f_list, list); + list_move(&file->f_u.fu_list, list); file_list_unlock(); } void file_kill(struct file *file) { - if (!list_empty(&file->f_list)) { + if (!list_empty(&file->f_u.fu_list)) { file_list_lock(); - list_del_init(&file->f_list); + list_del_init(&file->f_u.fu_list); file_list_unlock(); } } int fs_may_remount_ro(struct super_block *sb) { - struct list_head *p; + struct file *file; /* Check that no files are currently opened for writing. */ file_list_lock(); - list_for_each(p, &sb->s_files) { - struct file *file = list_entry(p, struct file, f_list); - struct inode *inode = file->f_dentry->d_inode; + list_for_each_entry(file, &sb->s_files, f_u.fu_list) { + struct inode *inode = file->f_path.dentry->d_inode; /* File with pending delete? */ if (inode->i_nlink == 0) @@ -241,10 +397,53 @@ too_bad: return 0; } +/** + * mark_files_ro - mark all files read-only + * @sb: superblock in question + * + * All files are marked read-only. We don't care about pending + * delete files so this should be used in 'force' mode only. + */ +void mark_files_ro(struct super_block *sb) +{ + struct file *f; + +retry: + file_list_lock(); + list_for_each_entry(f, &sb->s_files, f_u.fu_list) { + struct vfsmount *mnt; + if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) + continue; + if (!file_count(f)) + continue; + if (!(f->f_mode & FMODE_WRITE)) + continue; + f->f_mode &= ~FMODE_WRITE; + if (file_check_writeable(f) != 0) + continue; + file_release_write(f); + mnt = mntget(f->f_path.mnt); + file_list_unlock(); + /* + * This can sleep, so we can't hold + * the file_list_lock() spinlock. + */ + mnt_drop_write(mnt); + mntput(mnt); + goto retry; + } + file_list_unlock(); +} + void __init files_init(unsigned long mempages) { int n; - /* One file with associated inode and dcache is very roughly 1K. + + filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + + /* + * One file with associated inode and dcache is very roughly 1K. * Per default don't use more than 10% of our memory for files. */ @@ -252,4 +451,6 @@ void __init files_init(unsigned long mempages) files_stat.max_files = n; if (files_stat.max_files < NR_FILE) files_stat.max_files = NR_FILE; + files_defer_init(); + percpu_counter_init(&nr_files, 0); }