X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fproc%2Finode.c;h=445a02bcaab31d88963e1d0c7deeeb25572efb97;hb=e071041be037eca208b62b84469a06bdfc692bea;hp=5ac781801ae7efaf005d635ee26a55e50177ea75;hpb=4b6a9316fab51af611dc8671f296734089f6a22a;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 5ac7818..445a02b 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -10,94 +10,56 @@ #include #include #include +#include +#include #include #include #include #include #include +#include #include #include #include "internal.h" -static inline struct proc_dir_entry * de_get(struct proc_dir_entry *de) -{ - if (de) - atomic_inc(&de->count); - return de; -} - -/* - * Decrements the use count and checks for deferred deletion. - */ -static void de_put(struct proc_dir_entry *de) -{ - if (de) { - lock_kernel(); - if (!atomic_read(&de->count)) { - printk("de_put: entry %s already free!\n", de->name); - unlock_kernel(); - return; - } - - if (atomic_dec_and_test(&de->count)) { - if (de->deleted) { - printk("de_put: deferred delete of %s\n", - de->name); - free_proc_entry(de); - } - } - unlock_kernel(); - } -} - -/* - * Decrement the use count of the proc_dir_entry. - */ static void proc_delete_inode(struct inode *inode) { struct proc_dir_entry *de; - struct task_struct *tsk; truncate_inode_pages(&inode->i_data, 0); - /* Let go of any associated process */ - tsk = PROC_I(inode)->task; - if (tsk) - put_task_struct(tsk); + /* Stop tracking associated processes */ + put_pid(PROC_I(inode)->pid); /* Let go of any associated proc directory entry */ de = PROC_I(inode)->pde; - if (de) { - if (de->owner) - module_put(de->owner); - de_put(de); - } + if (de) + pde_put(de); + if (PROC_I(inode)->sysctl) + sysctl_head_put(PROC_I(inode)->sysctl); clear_inode(inode); } struct vfsmount *proc_mnt; -static void proc_read_inode(struct inode * inode) -{ - inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; -} - -static kmem_cache_t * proc_inode_cachep; +static struct kmem_cache * proc_inode_cachep; static struct inode *proc_alloc_inode(struct super_block *sb) { struct proc_inode *ei; struct inode *inode; - ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL); + ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); if (!ei) return NULL; - ei->task = NULL; - ei->type = 0; + ei->pid = NULL; + ei->fd = 0; ei->op.proc_get_link = NULL; ei->pde = NULL; + ei->sysctl = NULL; + ei->sysctl_entry = NULL; inode = &ei->vfs_inode; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; return inode; @@ -108,63 +70,367 @@ static void proc_destroy_inode(struct inode *inode) kmem_cache_free(proc_inode_cachep, PROC_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void *foo) { struct proc_inode *ei = (struct proc_inode *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - inode_init_once(&ei->vfs_inode); + inode_init_once(&ei->vfs_inode); } - -int __init proc_init_inodecache(void) + +void __init proc_init_inodecache(void) { proc_inode_cachep = kmem_cache_create("proc_inode_cache", sizeof(struct proc_inode), - 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, - init_once, NULL); - if (proc_inode_cachep == NULL) - return -ENOMEM; - return 0; + 0, (SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD|SLAB_PANIC), + init_once); } -static int proc_remount(struct super_block *sb, int *flags, char *data) -{ - *flags |= MS_NODIRATIME; - return 0; -} - -static struct super_operations proc_sops = { +static const struct super_operations proc_sops = { .alloc_inode = proc_alloc_inode, .destroy_inode = proc_destroy_inode, - .read_inode = proc_read_inode, .drop_inode = generic_delete_inode, .delete_inode = proc_delete_inode, .statfs = simple_statfs, - .remount_fs = proc_remount, }; -struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, - struct proc_dir_entry *de) +static void __pde_users_dec(struct proc_dir_entry *pde) { - struct inode * inode; + pde->pde_users--; + if (pde->pde_unload_completion && pde->pde_users == 0) + complete(pde->pde_unload_completion); +} + +void pde_users_dec(struct proc_dir_entry *pde) +{ + spin_lock(&pde->pde_unload_lock); + __pde_users_dec(pde); + spin_unlock(&pde->pde_unload_lock); +} + +static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) +{ + struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); + loff_t rv = -EINVAL; + loff_t (*llseek)(struct file *, loff_t, int); + + spin_lock(&pde->pde_unload_lock); + /* + * remove_proc_entry() is going to delete PDE (as part of module + * cleanup sequence). No new callers into module allowed. + */ + if (!pde->proc_fops) { + spin_unlock(&pde->pde_unload_lock); + return rv; + } + /* + * Bump refcount so that remove_proc_entry will wail for ->llseek to + * complete. + */ + pde->pde_users++; + /* + * Save function pointer under lock, to protect against ->proc_fops + * NULL'ifying right after ->pde_unload_lock is dropped. + */ + llseek = pde->proc_fops->llseek; + spin_unlock(&pde->pde_unload_lock); + + if (!llseek) + llseek = default_llseek; + rv = llseek(file, offset, whence); + + pde_users_dec(pde); + return rv; +} + +static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); + ssize_t rv = -EIO; + ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); + + spin_lock(&pde->pde_unload_lock); + if (!pde->proc_fops) { + spin_unlock(&pde->pde_unload_lock); + return rv; + } + pde->pde_users++; + read = pde->proc_fops->read; + spin_unlock(&pde->pde_unload_lock); + + if (read) + rv = read(file, buf, count, ppos); + + pde_users_dec(pde); + return rv; +} + +static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); + ssize_t rv = -EIO; + ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); + + spin_lock(&pde->pde_unload_lock); + if (!pde->proc_fops) { + spin_unlock(&pde->pde_unload_lock); + return rv; + } + pde->pde_users++; + write = pde->proc_fops->write; + spin_unlock(&pde->pde_unload_lock); + + if (write) + rv = write(file, buf, count, ppos); + + pde_users_dec(pde); + return rv; +} + +static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts) +{ + struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); + unsigned int rv = DEFAULT_POLLMASK; + unsigned int (*poll)(struct file *, struct poll_table_struct *); + + spin_lock(&pde->pde_unload_lock); + if (!pde->proc_fops) { + spin_unlock(&pde->pde_unload_lock); + return rv; + } + pde->pde_users++; + poll = pde->proc_fops->poll; + spin_unlock(&pde->pde_unload_lock); + + if (poll) + rv = poll(file, pts); + + pde_users_dec(pde); + return rv; +} + +static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); + long rv = -ENOTTY; + long (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); + int (*ioctl)(struct inode *, struct file *, unsigned int, unsigned long); + + spin_lock(&pde->pde_unload_lock); + if (!pde->proc_fops) { + spin_unlock(&pde->pde_unload_lock); + return rv; + } + pde->pde_users++; + unlocked_ioctl = pde->proc_fops->unlocked_ioctl; + ioctl = pde->proc_fops->ioctl; + spin_unlock(&pde->pde_unload_lock); + + if (unlocked_ioctl) { + rv = unlocked_ioctl(file, cmd, arg); + if (rv == -ENOIOCTLCMD) + rv = -EINVAL; + } else if (ioctl) { + lock_kernel(); + rv = ioctl(file->f_path.dentry->d_inode, file, cmd, arg); + unlock_kernel(); + } + + pde_users_dec(pde); + return rv; +} + +#ifdef CONFIG_COMPAT +static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); + long rv = -ENOTTY; + long (*compat_ioctl)(struct file *, unsigned int, unsigned long); + + spin_lock(&pde->pde_unload_lock); + if (!pde->proc_fops) { + spin_unlock(&pde->pde_unload_lock); + return rv; + } + pde->pde_users++; + compat_ioctl = pde->proc_fops->compat_ioctl; + spin_unlock(&pde->pde_unload_lock); + + if (compat_ioctl) + rv = compat_ioctl(file, cmd, arg); + + pde_users_dec(pde); + return rv; +} +#endif + +static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); + int rv = -EIO; + int (*mmap)(struct file *, struct vm_area_struct *); + + spin_lock(&pde->pde_unload_lock); + if (!pde->proc_fops) { + spin_unlock(&pde->pde_unload_lock); + return rv; + } + pde->pde_users++; + mmap = pde->proc_fops->mmap; + spin_unlock(&pde->pde_unload_lock); + + if (mmap) + rv = mmap(file, vma); + + pde_users_dec(pde); + return rv; +} + +static int proc_reg_open(struct inode *inode, struct file *file) +{ + struct proc_dir_entry *pde = PDE(inode); + int rv = 0; + int (*open)(struct inode *, struct file *); + int (*release)(struct inode *, struct file *); + struct pde_opener *pdeo; /* - * Increment the use count so the dir entry can't disappear. + * What for, you ask? Well, we can have open, rmmod, remove_proc_entry + * sequence. ->release won't be called because ->proc_fops will be + * cleared. Depending on complexity of ->release, consequences vary. + * + * We can't wait for mercy when close will be done for real, it's + * deadlockable: rmmod foo release + * by hand in remove_proc_entry(). For this, save opener's credentials + * for later. */ - de_get(de); + pdeo = kmalloc(sizeof(struct pde_opener), GFP_KERNEL); + if (!pdeo) + return -ENOMEM; + + spin_lock(&pde->pde_unload_lock); + if (!pde->proc_fops) { + spin_unlock(&pde->pde_unload_lock); + kfree(pdeo); + return -EINVAL; + } + pde->pde_users++; + open = pde->proc_fops->open; + release = pde->proc_fops->release; + spin_unlock(&pde->pde_unload_lock); + + if (open) + rv = open(inode, file); + + spin_lock(&pde->pde_unload_lock); + if (rv == 0 && release) { + /* To know what to release. */ + pdeo->inode = inode; + pdeo->file = file; + /* Strictly for "too late" ->release in proc_reg_release(). */ + pdeo->release = release; + list_add(&pdeo->lh, &pde->pde_openers); + } else + kfree(pdeo); + __pde_users_dec(pde); + spin_unlock(&pde->pde_unload_lock); + return rv; +} + +static struct pde_opener *find_pde_opener(struct proc_dir_entry *pde, + struct inode *inode, struct file *file) +{ + struct pde_opener *pdeo; + + list_for_each_entry(pdeo, &pde->pde_openers, lh) { + if (pdeo->inode == inode && pdeo->file == file) + return pdeo; + } + return NULL; +} + +static int proc_reg_release(struct inode *inode, struct file *file) +{ + struct proc_dir_entry *pde = PDE(inode); + int rv = 0; + int (*release)(struct inode *, struct file *); + struct pde_opener *pdeo; + + spin_lock(&pde->pde_unload_lock); + pdeo = find_pde_opener(pde, inode, file); + if (!pde->proc_fops) { + /* + * Can't simply exit, __fput() will think that everything is OK, + * and move on to freeing struct file. remove_proc_entry() will + * find slacker in opener's list and will try to do non-trivial + * things with struct file. Therefore, remove opener from list. + * + * But if opener is removed from list, who will ->release it? + */ + if (pdeo) { + list_del(&pdeo->lh); + spin_unlock(&pde->pde_unload_lock); + rv = pdeo->release(inode, file); + kfree(pdeo); + } else + spin_unlock(&pde->pde_unload_lock); + return rv; + } + pde->pde_users++; + release = pde->proc_fops->release; + if (pdeo) { + list_del(&pdeo->lh); + kfree(pdeo); + } + spin_unlock(&pde->pde_unload_lock); + + if (release) + rv = release(inode, file); - WARN_ON(de && de->deleted); + pde_users_dec(pde); + return rv; +} + +static const struct file_operations proc_reg_file_ops = { + .llseek = proc_reg_llseek, + .read = proc_reg_read, + .write = proc_reg_write, + .poll = proc_reg_poll, + .unlocked_ioctl = proc_reg_unlocked_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = proc_reg_compat_ioctl, +#endif + .mmap = proc_reg_mmap, + .open = proc_reg_open, + .release = proc_reg_release, +}; + +#ifdef CONFIG_COMPAT +static const struct file_operations proc_reg_file_ops_no_compat = { + .llseek = proc_reg_llseek, + .read = proc_reg_read, + .write = proc_reg_write, + .poll = proc_reg_poll, + .unlocked_ioctl = proc_reg_unlocked_ioctl, + .mmap = proc_reg_mmap, + .open = proc_reg_open, + .release = proc_reg_release, +}; +#endif - if (de != NULL && !try_module_get(de->owner)) - goto out_mod; +struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, + struct proc_dir_entry *de) +{ + struct inode * inode; - inode = iget(sb, ino); + inode = iget_locked(sb, ino); if (!inode) - goto out_ino; + return NULL; + if (inode->i_state & I_NEW) { + inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + PROC_I(inode)->fd = 0; + PROC_I(inode)->pde = de; - PROC_I(inode)->pde = de; - if (de) { if (de->mode) { inode->i_mode = de->mode; inode->i_uid = de->uid; @@ -176,31 +442,37 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, inode->i_nlink = de->nlink; if (de->proc_iops) inode->i_op = de->proc_iops; - if (de->proc_fops) - inode->i_fop = de->proc_fops; - } - + if (de->proc_fops) { + if (S_ISREG(inode->i_mode)) { +#ifdef CONFIG_COMPAT + if (!de->proc_fops->compat_ioctl) + inode->i_fop = + &proc_reg_file_ops_no_compat; + else +#endif + inode->i_fop = &proc_reg_file_ops; + } else { + inode->i_fop = de->proc_fops; + } + } + unlock_new_inode(inode); + } else + pde_put(de); return inode; - -out_ino: - if (de != NULL) - module_put(de->owner); -out_mod: - de_put(de); - return NULL; } -int proc_fill_super(struct super_block *s, void *data, int silent) +int proc_fill_super(struct super_block *s) { struct inode * root_inode; - s->s_flags |= MS_NODIRATIME; + s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC; s->s_blocksize = 1024; s->s_blocksize_bits = 10; s->s_magic = PROC_SUPER_MAGIC; s->s_op = &proc_sops; s->s_time_gran = 1; + pde_get(&proc_root); root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root); if (!root_inode) goto out_no_root; @@ -214,6 +486,6 @@ int proc_fill_super(struct super_block *s, void *data, int silent) out_no_root: printk("proc_read_super: get root inode failed\n"); iput(root_inode); + pde_put(&proc_root); return -ENOMEM; } -MODULE_LICENSE("GPL");