dentries: Extract common code to remove dentry from lru
[safe/jmp/linux-2.6] / fs / exec.c
index 070ddf1..a44b142 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -112,14 +112,14 @@ asmlinkage long sys_uselib(const char __user * library)
                goto out;
 
        error = -EINVAL;
-       if (!S_ISREG(nd.dentry->d_inode->i_mode))
+       if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
                goto exit;
 
        error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
        if (error)
                goto exit;
 
-       file = nameidata_to_filp(&nd, O_RDONLY);
+       file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
        error = PTR_ERR(file);
        if (IS_ERR(file))
                goto out;
@@ -148,7 +148,7 @@ out:
        return error;
 exit:
        release_open_intent(&nd);
-       path_release(&nd);
+       path_put(&nd.path);
        goto out;
 }
 
@@ -234,7 +234,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        vma->vm_start = vma->vm_end - PAGE_SIZE;
 
        vma->vm_flags = VM_STACK_FLAGS;
-       vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        err = insert_vm_struct(mm, vma);
        if (err) {
                up_write(&mm->mmap_sem);
@@ -652,13 +652,14 @@ struct file *open_exec(const char *name)
        file = ERR_PTR(err);
 
        if (!err) {
-               struct inode *inode = nd.dentry->d_inode;
+               struct inode *inode = nd.path.dentry->d_inode;
                file = ERR_PTR(-EACCES);
                if (S_ISREG(inode->i_mode)) {
                        int err = vfs_permission(&nd, MAY_EXEC);
                        file = ERR_PTR(err);
                        if (!err) {
-                               file = nameidata_to_filp(&nd, O_RDONLY);
+                               file = nameidata_to_filp(&nd,
+                                                       O_RDONLY|O_LARGEFILE);
                                if (!IS_ERR(file)) {
                                        err = deny_write_access(file);
                                        if (err) {
@@ -671,7 +672,7 @@ out:
                        }
                }
                release_open_intent(&nd);
-               path_release(&nd);
+               path_put(&nd.path);
        }
        goto out;
 }
@@ -760,7 +761,7 @@ static int de_thread(struct task_struct *tsk)
         */
        read_lock(&tasklist_lock);
        spin_lock_irq(lock);
-       if (sig->flags & SIGNAL_GROUP_EXIT) {
+       if (signal_group_exit(sig)) {
                /*
                 * Another group action in progress, just
                 * return so that the signal is processed.
@@ -775,34 +776,16 @@ static int de_thread(struct task_struct *tsk)
         * Reparenting needs write_lock on tasklist_lock,
         * so it is safe to do it under read_lock.
         */
-       if (unlikely(tsk->group_leader == child_reaper(tsk)))
-               tsk->nsproxy->pid_ns->child_reaper = tsk;
+       if (unlikely(tsk->group_leader == task_child_reaper(tsk)))
+               task_active_pid_ns(tsk)->child_reaper = tsk;
 
+       sig->group_exit_task = tsk;
        zap_other_threads(tsk);
        read_unlock(&tasklist_lock);
 
-       /*
-        * Account for the thread group leader hanging around:
-        */
-       count = 1;
-       if (!thread_group_leader(tsk)) {
-               count = 2;
-               /*
-                * The SIGALRM timer survives the exec, but needs to point
-                * at us as the new group leader now.  We have a race with
-                * a timer firing now getting the old leader, so we need to
-                * synchronize with any firing (by calling del_timer_sync)
-                * before we can safely let the old group leader die.
-                */
-               sig->tsk = tsk;
-               spin_unlock_irq(lock);
-               if (hrtimer_cancel(&sig->real_timer))
-                       hrtimer_restart(&sig->real_timer);
-               spin_lock_irq(lock);
-       }
-
+       /* Account for the thread group leader hanging around: */
+       count = thread_group_leader(tsk) ? 1 : 2;
        sig->notify_count = count;
-       sig->group_exit_task = tsk;
        while (atomic_read(&sig->count) > count) {
                __set_current_state(TASK_UNINTERRUPTIBLE);
                spin_unlock_irq(lock);
@@ -841,8 +824,8 @@ static int de_thread(struct task_struct *tsk)
                 */
                tsk->start_time = leader->start_time;
 
-               BUG_ON(leader->tgid != tsk->tgid);
-               BUG_ON(tsk->pid == tsk->tgid);
+               BUG_ON(!same_thread_group(leader, tsk));
+               BUG_ON(has_group_leader_pid(tsk));
                /*
                 * An exec() starts a new thread group with the
                 * TGID of the previous thread group. Rehash the
@@ -857,7 +840,7 @@ static int de_thread(struct task_struct *tsk)
                 */
                detach_pid(tsk, PIDTYPE_PID);
                tsk->pid = leader->pid;
-               attach_pid(tsk, PIDTYPE_PID,  find_pid(tsk->pid));
+               attach_pid(tsk, PIDTYPE_PID,  task_pid(leader));
                transfer_pid(leader, tsk, PIDTYPE_PGID);
                transfer_pid(leader, tsk, PIDTYPE_SID);
                list_replace_rcu(&leader->tasks, &tsk->tasks);
@@ -871,15 +854,10 @@ static int de_thread(struct task_struct *tsk)
                leader->exit_state = EXIT_DEAD;
 
                write_unlock_irq(&tasklist_lock);
-        }
+       }
 
        sig->group_exit_task = NULL;
        sig->notify_count = 0;
-       /*
-        * There may be one thread left which is just exiting,
-        * but it's safe to stop telling the group to kill themselves.
-        */
-       sig->flags = 0;
 
 no_thread_group:
        exit_itimers(sig);
@@ -947,12 +925,13 @@ static void flush_old_files(struct files_struct * files)
        spin_unlock(&files->file_lock);
 }
 
-void get_task_comm(char *buf, struct task_struct *tsk)
+char *get_task_comm(char *buf, struct task_struct *tsk)
 {
        /* buf must be at least sizeof(tsk->comm) in size */
        task_lock(tsk);
        strncpy(buf, tsk->comm, sizeof(tsk->comm));
        task_unlock(tsk);
+       return buf;
 }
 
 void set_task_comm(struct task_struct *tsk, char *buf)
@@ -1188,7 +1167,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
 {
        int try,retval;
        struct linux_binfmt *fmt;
-#ifdef __alpha__
+#if defined(__alpha__) && defined(CONFIG_ARCH_SUPPORTS_AOUT)
        /* handle /sbin/loader.. */
        {
            struct exec * eh = (struct exec *) bprm->buf;
@@ -1433,7 +1412,7 @@ static int format_corename(char *corename, const char *pattern, long signr)
                        case 'p':
                                pid_in_pattern = 1;
                                rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%d", current->tgid);
+                                             "%d", task_tgid_vnr(current));
                                if (rc > out_end - out_ptr)
                                        goto out;
                                out_ptr += rc;
@@ -1513,7 +1492,7 @@ static int format_corename(char *corename, const char *pattern, long signr)
        if (!ispipe && !pid_in_pattern
             && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
                rc = snprintf(out_ptr, out_end - out_ptr,
-                             ".%d", current->tgid);
+                             ".%d", task_tgid_vnr(current));
                if (rc > out_end - out_ptr)
                        goto out;
                out_ptr += rc;
@@ -1548,7 +1527,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        int err = -EAGAIN;
 
        spin_lock_irq(&tsk->sighand->siglock);
-       if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
+       if (!signal_group_exit(tsk->signal)) {
                tsk->signal->group_exit_code = exit_code;
                zap_process(tsk);
                err = 0;
@@ -1692,7 +1671,10 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
        if (!binfmt || !binfmt->core_dump)
                goto fail;
        down_write(&mm->mmap_sem);
-       if (!get_dumpable(mm)) {
+       /*
+        * If another thread got here first, or we are not dumpable, bail out.
+        */
+       if (mm->core_waiters || !get_dumpable(mm)) {
                up_write(&mm->mmap_sem);
                goto fail;
        }
@@ -1706,7 +1688,6 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
                flag = O_EXCL;          /* Stop rewrite attacks */
                current->fsuid = 0;     /* Dump root private */
        }
-       set_dumpable(mm, 0);
 
        retval = coredump_wait(exit_code);
        if (retval < 0)
@@ -1778,6 +1759,12 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
           but keep the previous behaviour for now. */
        if (!ispipe && !S_ISREG(inode->i_mode))
                goto close_fail;
+       /*
+        * Dont allow local users get cute and trick others to coredump
+        * into their pre-created files:
+        */
+       if (inode->i_uid != current->fsuid)
+               goto close_fail;
        if (!file->f_op)
                goto close_fail;
        if (!file->f_op->write)