X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=fs%2Ffile_table.c;h=54018fe488403082ac5274e24a3fee9480fa0a16;hp=43e9e1737de2cb365c954afeddfb64ae0433779b;hb=7eef4091a653c243a87e5375c54504cc03bec4d8;hpb=2832e9366a1fcd6f76957a42157be041240f994e diff --git a/fs/file_table.c b/fs/file_table.c index 43e9e17..54018fe 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -8,95 +8,137 @@ #include #include #include +#include #include #include -#include #include #include +#include #include +#include #include +#include #include #include +#include +#include + +#include /* sysctl tunables... */ struct files_stat_struct files_stat = { .max_files = NR_FILE }; -EXPORT_SYMBOL(files_stat); /* Needed by unix.o */ - /* public. Not pretty! */ - __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); +__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); + +/* SLAB cache for file structures */ +static struct kmem_cache *filp_cachep __read_mostly; -static DEFINE_SPINLOCK(filp_count_lock); +static struct percpu_counter nr_files __cacheline_aligned_in_smp; + +static inline void file_free_rcu(struct rcu_head *head) +{ + struct file *f = container_of(head, struct file, f_u.fu_rcuhead); -/* slab constructors and destructors are called from arbitrary - * context and must be fully threaded - use a local spinlock - * to protect files_stat.nr_files + put_cred(f->f_cred); + kmem_cache_free(filp_cachep, f); +} + +static inline void file_free(struct file *f) +{ + percpu_counter_dec(&nr_files); + file_check_state(f); + call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); +} + +/* + * Return the total number of open files in the system */ -void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags) +static int get_nr_files(void) { - if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { - unsigned long flags; - spin_lock_irqsave(&filp_count_lock, flags); - files_stat.nr_files++; - spin_unlock_irqrestore(&filp_count_lock, flags); - } + return percpu_counter_read_positive(&nr_files); } -void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags) +/* + * Return the maximum number of open files in the system + */ +int get_max_files(void) { - unsigned long flags; - spin_lock_irqsave(&filp_count_lock, flags); - files_stat.nr_files--; - spin_unlock_irqrestore(&filp_count_lock, flags); + return files_stat.max_files; } +EXPORT_SYMBOL_GPL(get_max_files); -static inline void file_free(struct file *f) +/* + * Handle nr_files sysctl + */ +#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) +int proc_nr_files(ctl_table *table, int write, struct file *filp, + void __user *buffer, size_t *lenp, loff_t *ppos) { - kmem_cache_free(filp_cachep, f); + files_stat.nr_files = get_nr_files(); + return proc_dointvec(table, write, filp, buffer, lenp, ppos); +} +#else +int proc_nr_files(ctl_table *table, int write, struct file *filp, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return -ENOSYS; } +#endif /* Find an unused file structure and return a pointer to it. * Returns NULL, if there are no more free file structures or * we run out of memory. + * + * Be very careful using this. You are responsible for + * getting write access to any mount that you might assign + * to this filp, if it is opened for write. If this is not + * done, you will imbalance int the mount's writer count + * and a warning at __fput() time. */ struct file *get_empty_filp(void) { + const struct cred *cred = current_cred(); static int old_max; struct file * f; /* * Privileged users can go above max_files */ - if (files_stat.nr_files >= files_stat.max_files && - !capable(CAP_SYS_ADMIN)) - goto over; + if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { + /* + * percpu_counters are inaccurate. Do an expensive check before + * we go and fail. + */ + if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) + goto over; + } - f = kmem_cache_alloc(filp_cachep, GFP_KERNEL); + f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); if (f == NULL) goto fail; - memset(f, 0, sizeof(*f)); + percpu_counter_inc(&nr_files); if (security_file_alloc(f)) goto fail_sec; - eventpoll_init_file(f); - atomic_set(&f->f_count, 1); - f->f_uid = current->fsuid; - f->f_gid = current->fsgid; + INIT_LIST_HEAD(&f->f_u.fu_list); + atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); + f->f_cred = get_cred(cred); + spin_lock_init(&f->f_lock); + eventpoll_init_file(f); /* f->f_version: 0 */ - INIT_LIST_HEAD(&f->f_list); return f; over: /* Ran out of filps - report that */ - if (files_stat.nr_files > old_max) { + if (get_nr_files() > old_max) { printk(KERN_INFO "VFS: file-max limit %d reached\n", - files_stat.max_files); - old_max = files_stat.nr_files; + get_max_files()); + old_max = get_nr_files(); } goto fail; @@ -108,21 +150,117 @@ fail: EXPORT_SYMBOL(get_empty_filp); -void fastcall fput(struct file *file) +/** + * alloc_file - allocate and initialize a 'struct file' + * @mnt: the vfsmount on which the file will reside + * @dentry: the dentry representing the new file + * @mode: the mode with which the new file will be opened + * @fop: the 'struct file_operations' for the new file + * + * Use this instead of get_empty_filp() to get a new + * 'struct file'. Do so because of the same initialization + * pitfalls reasons listed for init_file(). This is a + * preferred interface to using init_file(). + * + * If all the callers of init_file() are eliminated, its + * code should be moved into this function. + */ +struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry, + fmode_t mode, const struct file_operations *fop) +{ + struct file *file; + + file = get_empty_filp(); + if (!file) + return NULL; + + init_file(file, mnt, dentry, mode, fop); + return file; +} +EXPORT_SYMBOL(alloc_file); + +/** + * init_file - initialize a 'struct file' + * @file: the already allocated 'struct file' to initialized + * @mnt: the vfsmount on which the file resides + * @dentry: the dentry representing this file + * @mode: the mode the file is opened with + * @fop: the 'struct file_operations' for this file + * + * Use this instead of setting the members directly. Doing so + * avoids making mistakes like forgetting the mntget() or + * forgetting to take a write on the mnt. + * + * Note: This is a crappy interface. It is here to make + * merging with the existing users of get_empty_filp() + * who have complex failure logic easier. All users + * of this should be moving to alloc_file(). + */ +int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry, + fmode_t mode, const struct file_operations *fop) { - if (atomic_dec_and_test(&file->f_count)) + int error = 0; + file->f_path.dentry = dentry; + file->f_path.mnt = mntget(mnt); + file->f_mapping = dentry->d_inode->i_mapping; + file->f_mode = mode; + file->f_op = fop; + + /* + * These mounts don't really matter in practice + * for r/o bind mounts. They aren't userspace- + * visible. We do this for consistency, and so + * that we can do debugging checks at __fput() + */ + if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) { + file_take_write(file); + error = mnt_want_write(mnt); + WARN_ON(error); + } + return error; +} +EXPORT_SYMBOL(init_file); + +void fput(struct file *file) +{ + if (atomic_long_dec_and_test(&file->f_count)) __fput(file); } EXPORT_SYMBOL(fput); +/** + * drop_file_write_access - give up ability to write to a file + * @file: the file to which we will stop writing + * + * This is a central place which will give up the ability + * to write to @file, along with access to write through + * its vfsmount. + */ +void drop_file_write_access(struct file *file) +{ + struct vfsmount *mnt = file->f_path.mnt; + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = dentry->d_inode; + + put_write_access(inode); + + if (special_file(inode->i_mode)) + return; + if (file_check_writeable(file) != 0) + return; + mnt_drop_write(mnt); + file_release_write(file); +} +EXPORT_SYMBOL_GPL(drop_file_write_access); + /* __fput is called from task context when aio completion releases the last * last use of a struct file *. Do not use otherwise. */ -void fastcall __fput(struct file *file) +void __fput(struct file *file) { - struct dentry *dentry = file->f_dentry; - struct vfsmount *mnt = file->f_vfsmnt; + struct dentry *dentry = file->f_path.dentry; + struct vfsmount *mnt = file->f_path.mnt; struct inode *inode = dentry->d_inode; might_sleep(); @@ -135,32 +273,44 @@ void fastcall __fput(struct file *file) eventpoll_release(file); locks_remove_flock(file); + if (unlikely(file->f_flags & FASYNC)) { + if (file->f_op && file->f_op->fasync) + file->f_op->fasync(-1, file, 0); + } if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); - if (unlikely(inode->i_cdev != NULL)) + ima_file_free(file); + if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) cdev_put(inode->i_cdev); fops_put(file->f_op); - if (file->f_mode & FMODE_WRITE) - put_write_access(inode); + put_pid(file->f_owner.pid); file_kill(file); - file->f_dentry = NULL; - file->f_vfsmnt = NULL; + if (file->f_mode & FMODE_WRITE) + drop_file_write_access(file); + file->f_path.dentry = NULL; + file->f_path.mnt = NULL; file_free(file); dput(dentry); mntput(mnt); } -struct file fastcall *fget(unsigned int fd) +struct file *fget(unsigned int fd) { struct file *file; struct files_struct *files = current->files; - spin_lock(&files->file_lock); + rcu_read_lock(); file = fcheck_files(files, fd); - if (file) - get_file(file); - spin_unlock(&files->file_lock); + if (file) { + if (!atomic_long_inc_not_zero(&file->f_count)) { + /* File object ref couldn't be taken */ + rcu_read_unlock(); + return NULL; + } + } + rcu_read_unlock(); + return file; } @@ -173,7 +323,7 @@ EXPORT_SYMBOL(fget); * and a flag is returned to be passed to the corresponding fput_light(). * There must not be a cloning between an fget_light/fput_light pair. */ -struct file fastcall *fget_light(unsigned int fd, int *fput_needed) +struct file *fget_light(unsigned int fd, int *fput_needed) { struct file *file; struct files_struct *files = current->files; @@ -182,21 +332,25 @@ struct file fastcall *fget_light(unsigned int fd, int *fput_needed) if (likely((atomic_read(&files->count) == 1))) { file = fcheck_files(files, fd); } else { - spin_lock(&files->file_lock); + rcu_read_lock(); file = fcheck_files(files, fd); if (file) { - get_file(file); - *fput_needed = 1; + if (atomic_long_inc_not_zero(&file->f_count)) + *fput_needed = 1; + else + /* Didn't get the reference, someone's freed */ + file = NULL; } - spin_unlock(&files->file_lock); + rcu_read_unlock(); } + return file; } void put_filp(struct file *file) { - if (atomic_dec_and_test(&file->f_count)) { + if (atomic_long_dec_and_test(&file->f_count)) { security_file_free(file); file_kill(file); file_free(file); @@ -208,28 +362,27 @@ void file_move(struct file *file, struct list_head *list) if (!list) return; file_list_lock(); - list_move(&file->f_list, list); + list_move(&file->f_u.fu_list, list); file_list_unlock(); } void file_kill(struct file *file) { - if (!list_empty(&file->f_list)) { + if (!list_empty(&file->f_u.fu_list)) { file_list_lock(); - list_del_init(&file->f_list); + list_del_init(&file->f_u.fu_list); file_list_unlock(); } } int fs_may_remount_ro(struct super_block *sb) { - struct list_head *p; + struct file *file; /* Check that no files are currently opened for writing. */ file_list_lock(); - list_for_each(p, &sb->s_files) { - struct file *file = list_entry(p, struct file, f_list); - struct inode *inode = file->f_dentry->d_inode; + list_for_each_entry(file, &sb->s_files, f_u.fu_list) { + struct inode *inode = file->f_path.dentry->d_inode; /* File with pending delete? */ if (inode->i_nlink == 0) @@ -249,7 +402,12 @@ too_bad: void __init files_init(unsigned long mempages) { int n; - /* One file with associated inode and dcache is very roughly 1K. + + filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + + /* + * One file with associated inode and dcache is very roughly 1K. * Per default don't use more than 10% of our memory for files. */ @@ -257,4 +415,6 @@ void __init files_init(unsigned long mempages) files_stat.max_files = n; if (files_stat.max_files < NR_FILE) files_stat.max_files = NR_FILE; + files_defer_init(); + percpu_counter_init(&nr_files, 0); }