X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Ffile_table.c;h=bbeeac6efa1a85eca90bd59e85c620022502f3d7;hb=a1c8c4d1ff54c6c86930ee3c4c73c69eeb9ede61;hp=c3a5e2fd663b772d7eeb997171d6b052b82d680c;hpb=2109a2d1b175dfcffbfdac693bdbe4c4ab62f11f;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/file_table.c b/fs/file_table.c index c3a5e2f..bbeeac6 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -8,102 +8,135 @@ #include #include #include +#include #include #include -#include #include #include #include #include #include +#include #include #include +#include +#include + +#include /* sysctl tunables... */ struct files_stat_struct files_stat = { .max_files = NR_FILE }; -EXPORT_SYMBOL(files_stat); /* Needed by unix.o */ - /* public. Not pretty! */ - __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); +__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); -static DEFINE_SPINLOCK(filp_count_lock); +/* SLAB cache for file structures */ +static struct kmem_cache *filp_cachep __read_mostly; -/* slab constructors and destructors are called from arbitrary - * context and must be fully threaded - use a local spinlock - * to protect files_stat.nr_files - */ -void filp_ctor(void *objp, struct kmem_cache *cachep, unsigned long cflags) +static struct percpu_counter nr_files __cacheline_aligned_in_smp; + +static inline void file_free_rcu(struct rcu_head *head) { - if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { - unsigned long flags; - spin_lock_irqsave(&filp_count_lock, flags); - files_stat.nr_files++; - spin_unlock_irqrestore(&filp_count_lock, flags); - } + struct file *f = container_of(head, struct file, f_u.fu_rcuhead); + + put_cred(f->f_cred); + kmem_cache_free(filp_cachep, f); } -void filp_dtor(void *objp, struct kmem_cache *cachep, unsigned long dflags) +static inline void file_free(struct file *f) { - unsigned long flags; - spin_lock_irqsave(&filp_count_lock, flags); - files_stat.nr_files--; - spin_unlock_irqrestore(&filp_count_lock, flags); + percpu_counter_dec(&nr_files); + file_check_state(f); + call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); } -static inline void file_free_rcu(struct rcu_head *head) +/* + * Return the total number of open files in the system + */ +static int get_nr_files(void) { - struct file *f = container_of(head, struct file, f_u.fu_rcuhead); - kmem_cache_free(filp_cachep, f); + return percpu_counter_read_positive(&nr_files); } -static inline void file_free(struct file *f) +/* + * Return the maximum number of open files in the system + */ +int get_max_files(void) { - call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); + return files_stat.max_files; } +EXPORT_SYMBOL_GPL(get_max_files); + +/* + * Handle nr_files sysctl + */ +#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) +int proc_nr_files(ctl_table *table, int write, struct file *filp, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + files_stat.nr_files = get_nr_files(); + return proc_dointvec(table, write, filp, buffer, lenp, ppos); +} +#else +int proc_nr_files(ctl_table *table, int write, struct file *filp, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return -ENOSYS; +} +#endif /* Find an unused file structure and return a pointer to it. * Returns NULL, if there are no more free file structures or * we run out of memory. + * + * Be very careful using this. You are responsible for + * getting write access to any mount that you might assign + * to this filp, if it is opened for write. If this is not + * done, you will imbalance int the mount's writer count + * and a warning at __fput() time. */ struct file *get_empty_filp(void) { + const struct cred *cred = current_cred(); static int old_max; struct file * f; /* * Privileged users can go above max_files */ - if (files_stat.nr_files >= files_stat.max_files && - !capable(CAP_SYS_ADMIN)) - goto over; + if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { + /* + * percpu_counters are inaccurate. Do an expensive check before + * we go and fail. + */ + if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) + goto over; + } - f = kmem_cache_alloc(filp_cachep, GFP_KERNEL); + f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); if (f == NULL) goto fail; - memset(f, 0, sizeof(*f)); + percpu_counter_inc(&nr_files); if (security_file_alloc(f)) goto fail_sec; - eventpoll_init_file(f); - atomic_set(&f->f_count, 1); - f->f_uid = current->fsuid; - f->f_gid = current->fsgid; + INIT_LIST_HEAD(&f->f_u.fu_list); + atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); + f->f_cred = get_cred(cred); + eventpoll_init_file(f); /* f->f_version: 0 */ - INIT_LIST_HEAD(&f->f_u.fu_list); return f; over: /* Ran out of filps - report that */ - if (files_stat.nr_files > old_max) { + if (get_nr_files() > old_max) { printk(KERN_INFO "VFS: file-max limit %d reached\n", - files_stat.max_files); - old_max = files_stat.nr_files; + get_max_files()); + old_max = get_nr_files(); } goto fail; @@ -115,21 +148,118 @@ fail: EXPORT_SYMBOL(get_empty_filp); -void fastcall fput(struct file *file) +/** + * alloc_file - allocate and initialize a 'struct file' + * @mnt: the vfsmount on which the file will reside + * @dentry: the dentry representing the new file + * @mode: the mode with which the new file will be opened + * @fop: the 'struct file_operations' for the new file + * + * Use this instead of get_empty_filp() to get a new + * 'struct file'. Do so because of the same initialization + * pitfalls reasons listed for init_file(). This is a + * preferred interface to using init_file(). + * + * If all the callers of init_file() are eliminated, its + * code should be moved into this function. + */ +struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry, + fmode_t mode, const struct file_operations *fop) { - if (rcuref_dec_and_test(&file->f_count)) + struct file *file; + struct path; + + file = get_empty_filp(); + if (!file) + return NULL; + + init_file(file, mnt, dentry, mode, fop); + return file; +} +EXPORT_SYMBOL(alloc_file); + +/** + * init_file - initialize a 'struct file' + * @file: the already allocated 'struct file' to initialized + * @mnt: the vfsmount on which the file resides + * @dentry: the dentry representing this file + * @mode: the mode the file is opened with + * @fop: the 'struct file_operations' for this file + * + * Use this instead of setting the members directly. Doing so + * avoids making mistakes like forgetting the mntget() or + * forgetting to take a write on the mnt. + * + * Note: This is a crappy interface. It is here to make + * merging with the existing users of get_empty_filp() + * who have complex failure logic easier. All users + * of this should be moving to alloc_file(). + */ +int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry, + fmode_t mode, const struct file_operations *fop) +{ + int error = 0; + file->f_path.dentry = dentry; + file->f_path.mnt = mntget(mnt); + file->f_mapping = dentry->d_inode->i_mapping; + file->f_mode = mode; + file->f_op = fop; + + /* + * These mounts don't really matter in practice + * for r/o bind mounts. They aren't userspace- + * visible. We do this for consistency, and so + * that we can do debugging checks at __fput() + */ + if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) { + file_take_write(file); + error = mnt_want_write(mnt); + WARN_ON(error); + } + return error; +} +EXPORT_SYMBOL(init_file); + +void fput(struct file *file) +{ + if (atomic_long_dec_and_test(&file->f_count)) __fput(file); } EXPORT_SYMBOL(fput); +/** + * drop_file_write_access - give up ability to write to a file + * @file: the file to which we will stop writing + * + * This is a central place which will give up the ability + * to write to @file, along with access to write through + * its vfsmount. + */ +void drop_file_write_access(struct file *file) +{ + struct vfsmount *mnt = file->f_path.mnt; + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = dentry->d_inode; + + put_write_access(inode); + + if (special_file(inode->i_mode)) + return; + if (file_check_writeable(file) != 0) + return; + mnt_drop_write(mnt); + file_release_write(file); +} +EXPORT_SYMBOL_GPL(drop_file_write_access); + /* __fput is called from task context when aio completion releases the last * last use of a struct file *. Do not use otherwise. */ -void fastcall __fput(struct file *file) +void __fput(struct file *file) { - struct dentry *dentry = file->f_dentry; - struct vfsmount *mnt = file->f_vfsmnt; + struct dentry *dentry = file->f_path.dentry; + struct vfsmount *mnt = file->f_path.mnt; struct inode *inode = dentry->d_inode; might_sleep(); @@ -142,23 +272,28 @@ void fastcall __fput(struct file *file) eventpoll_release(file); locks_remove_flock(file); + if (unlikely(file->f_flags & FASYNC)) { + if (file->f_op && file->f_op->fasync) + file->f_op->fasync(-1, file, 0); + } if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); - if (unlikely(inode->i_cdev != NULL)) + if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) cdev_put(inode->i_cdev); fops_put(file->f_op); - if (file->f_mode & FMODE_WRITE) - put_write_access(inode); + put_pid(file->f_owner.pid); file_kill(file); - file->f_dentry = NULL; - file->f_vfsmnt = NULL; + if (file->f_mode & FMODE_WRITE) + drop_file_write_access(file); + file->f_path.dentry = NULL; + file->f_path.mnt = NULL; file_free(file); dput(dentry); mntput(mnt); } -struct file fastcall *fget(unsigned int fd) +struct file *fget(unsigned int fd) { struct file *file; struct files_struct *files = current->files; @@ -166,7 +301,7 @@ struct file fastcall *fget(unsigned int fd) rcu_read_lock(); file = fcheck_files(files, fd); if (file) { - if (!rcuref_inc_lf(&file->f_count)) { + if (!atomic_long_inc_not_zero(&file->f_count)) { /* File object ref couldn't be taken */ rcu_read_unlock(); return NULL; @@ -186,7 +321,7 @@ EXPORT_SYMBOL(fget); * and a flag is returned to be passed to the corresponding fput_light(). * There must not be a cloning between an fget_light/fput_light pair. */ -struct file fastcall *fget_light(unsigned int fd, int *fput_needed) +struct file *fget_light(unsigned int fd, int *fput_needed) { struct file *file; struct files_struct *files = current->files; @@ -198,7 +333,7 @@ struct file fastcall *fget_light(unsigned int fd, int *fput_needed) rcu_read_lock(); file = fcheck_files(files, fd); if (file) { - if (rcuref_inc_lf(&file->f_count)) + if (atomic_long_inc_not_zero(&file->f_count)) *fput_needed = 1; else /* Didn't get the reference, someone's freed */ @@ -213,7 +348,7 @@ struct file fastcall *fget_light(unsigned int fd, int *fput_needed) void put_filp(struct file *file) { - if (rcuref_dec_and_test(&file->f_count)) { + if (atomic_long_dec_and_test(&file->f_count)) { security_file_free(file); file_kill(file); file_free(file); @@ -240,13 +375,12 @@ void file_kill(struct file *file) int fs_may_remount_ro(struct super_block *sb) { - struct list_head *p; + struct file *file; /* Check that no files are currently opened for writing. */ file_list_lock(); - list_for_each(p, &sb->s_files) { - struct file *file = list_entry(p, struct file, f_u.fu_list); - struct inode *inode = file->f_dentry->d_inode; + list_for_each_entry(file, &sb->s_files, f_u.fu_list) { + struct inode *inode = file->f_path.dentry->d_inode; /* File with pending delete? */ if (inode->i_nlink == 0) @@ -266,7 +400,12 @@ too_bad: void __init files_init(unsigned long mempages) { int n; - /* One file with associated inode and dcache is very roughly 1K. + + filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + + /* + * One file with associated inode and dcache is very roughly 1K. * Per default don't use more than 10% of our memory for files. */ @@ -275,4 +414,5 @@ void __init files_init(unsigned long mempages) if (files_stat.max_files < NR_FILE) files_stat.max_files = NR_FILE; files_defer_init(); + percpu_counter_init(&nr_files, 0); }