X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fexec.c;h=929b58004b7eace02d784a2a4fbba17fd02b0533;hb=47be12e4eec84c1846f29af64fe25a396b57a026;hp=7f325df5e0140a05444e74114aec91325ee12355;hpb=0840a90d943bcde2fbfeabd3c256236eed2273cd;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/exec.c b/fs/exec.c index 7f325df..929b580 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -24,11 +24,12 @@ #include #include -#include -#include +#include +#include #include #include #include +#include #include #include #include @@ -37,34 +38,30 @@ #include #include #include -#include #include #include #include #include #include -#include #include #include #include -#include #include #include #include +#include +#include +#include #include #include #include - -#ifdef CONFIG_KMOD -#include -#endif +#include "internal.h" int core_uses_pid; char core_pattern[CORENAME_MAX_SIZE] = "core"; int suid_dumpable = 0; -EXPORT_SYMBOL(suid_dumpable); /* The maximal length of core_pattern is also specified in sysctl.c */ static LIST_HEAD(formats); @@ -102,29 +99,42 @@ static inline void put_binfmt(struct linux_binfmt * fmt) * * Also note that we take the address to load from from the file itself. */ -asmlinkage long sys_uselib(const char __user * library) +SYSCALL_DEFINE1(uselib, const char __user *, library) { - struct file * file; + struct file *file; struct nameidata nd; - int error; - - error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC); + char *tmp = getname(library); + int error = PTR_ERR(tmp); + + if (!IS_ERR(tmp)) { + error = path_lookup_open(AT_FDCWD, tmp, + LOOKUP_FOLLOW, &nd, + FMODE_READ|FMODE_EXEC); + putname(tmp); + } if (error) goto out; error = -EINVAL; - if (!S_ISREG(nd.dentry->d_inode->i_mode)) + if (!S_ISREG(nd.path.dentry->d_inode->i_mode)) goto exit; - error = vfs_permission(&nd, MAY_READ | MAY_EXEC); + error = -EACCES; + if (nd.path.mnt->mnt_flags & MNT_NOEXEC) + goto exit; + + error = inode_permission(nd.path.dentry->d_inode, + MAY_READ | MAY_EXEC | MAY_OPEN); if (error) goto exit; - file = nameidata_to_filp(&nd, O_RDONLY); + file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE); error = PTR_ERR(file); if (IS_ERR(file)) goto out; + fsnotify_open(file->f_path.dentry); + error = -ENOEXEC; if(file->f_op) { struct linux_binfmt * fmt; @@ -149,7 +159,7 @@ out: return error; exit: release_open_intent(&nd); - path_release(&nd); + path_put(&nd.path); goto out; } @@ -174,8 +184,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, return NULL; if (write) { - struct rlimit *rlim = current->signal->rlim; unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; + struct rlimit *rlim; + + /* + * We've historically supported up to 32 pages (ARG_MAX) + * of argument strings even with small stacks + */ + if (size <= ARG_MAX) + return page; /* * Limit to 1/4-th the stack size for the argv+env strings. @@ -184,6 +201,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, * - the program will have a reasonable amount of stack left * to work from. */ + rlim = current->signal->rlim; if (size > rlim[RLIMIT_STACK].rlim_cur / 4) { put_page(page); return NULL; @@ -214,13 +232,13 @@ static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, static int __bprm_mm_init(struct linux_binprm *bprm) { - int err = -ENOMEM; + int err; struct vm_area_struct *vma = NULL; struct mm_struct *mm = bprm->mm; bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) - goto err; + return -ENOMEM; down_write(&mm->mmap_sem); vma->vm_mm = mm; @@ -233,28 +251,20 @@ static int __bprm_mm_init(struct linux_binprm *bprm) */ vma->vm_end = STACK_TOP_MAX; vma->vm_start = vma->vm_end - PAGE_SIZE; - vma->vm_flags = VM_STACK_FLAGS; - vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); err = insert_vm_struct(mm, vma); - if (err) { - up_write(&mm->mmap_sem); + if (err) goto err; - } mm->stack_vm = mm->total_vm = 1; up_write(&mm->mmap_sem); - bprm->p = vma->vm_end - sizeof(void *); - return 0; - err: - if (vma) { - bprm->vma = NULL; - kmem_cache_free(vm_area_cachep, vma); - } - + up_write(&mm->mmap_sem); + bprm->vma = NULL; + kmem_cache_free(vm_area_cachep, vma); return err; } @@ -370,7 +380,7 @@ static int count(char __user * __user * argv, int max) if (!p) break; argv++; - if(++i > max) + if (i++ >= max) return -E2BIG; cond_resched(); } @@ -529,7 +539,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) /* * when the old and new regions overlap clear from new_end. */ - free_pgd_range(&tlb, new_end, old_end, new_end, + free_pgd_range(tlb, new_end, old_end, new_end, vma->vm_next ? vma->vm_next->vm_start : 0); } else { /* @@ -538,7 +548,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) * have constraints on va-space that make this illegal (IA64) - * for the others its just a little faster. */ - free_pgd_range(&tlb, old_start, old_end, new_end, + free_pgd_range(tlb, old_start, old_end, new_end, vma->vm_next ? vma->vm_next->vm_start : 0); } tlb_finish_mmu(tlb, new_end, old_end); @@ -598,7 +608,7 @@ int setup_arg_pages(struct linux_binprm *bprm, bprm->exec -= stack_shift; down_write(&mm->mmap_sem); - vm_flags = vma->vm_flags; + vm_flags = VM_STACK_FLAGS; /* * Adjust stack execute permissions; explicitly enable for @@ -646,37 +656,45 @@ EXPORT_SYMBOL(setup_arg_pages); struct file *open_exec(const char *name) { struct nameidata nd; - int err; struct file *file; + int err; - err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC); - file = ERR_PTR(err); - - if (!err) { - struct inode *inode = nd.dentry->d_inode; - file = ERR_PTR(-EACCES); - if (S_ISREG(inode->i_mode)) { - int err = vfs_permission(&nd, MAY_EXEC); - file = ERR_PTR(err); - if (!err) { - file = nameidata_to_filp(&nd, O_RDONLY); - if (!IS_ERR(file)) { - err = deny_write_access(file); - if (err) { - fput(file); - file = ERR_PTR(err); - } - } -out: - return file; - } - } - release_open_intent(&nd); - path_release(&nd); + err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, + FMODE_READ|FMODE_EXEC); + if (err) + goto out; + + err = -EACCES; + if (!S_ISREG(nd.path.dentry->d_inode->i_mode)) + goto out_path_put; + + if (nd.path.mnt->mnt_flags & MNT_NOEXEC) + goto out_path_put; + + err = inode_permission(nd.path.dentry->d_inode, MAY_EXEC | MAY_OPEN); + if (err) + goto out_path_put; + + file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE); + if (IS_ERR(file)) + return file; + + fsnotify_open(file->f_path.dentry); + + err = deny_write_access(file); + if (err) { + fput(file); + goto out; } - goto out; -} + return file; + + out_path_put: + release_open_intent(&nd); + path_put(&nd.path); + out: + return ERR_PTR(err); +} EXPORT_SYMBOL(open_exec); int kernel_read(struct file *file, unsigned long offset, @@ -711,12 +729,10 @@ static int exec_mmap(struct mm_struct *mm) * Make sure that if there is a core dump in progress * for the old mm, we get out and die instead of going * through with the exec. We must hold mmap_sem around - * checking core_waiters and changing tsk->mm. The - * core-inducing thread will increment core_waiters for - * each thread whose ->mm == old_mm. + * checking core_state and changing tsk->mm. */ down_read(&old_mm->mmap_sem); - if (unlikely(old_mm->core_waiters)) { + if (unlikely(old_mm->core_state)) { up_read(&old_mm->mmap_sem); return -EINTR; } @@ -731,6 +747,7 @@ static int exec_mmap(struct mm_struct *mm) if (old_mm) { up_read(&old_mm->mmap_sem); BUG_ON(active_mm != old_mm); + mm_update_next_owner(old_mm); mmput(old_mm); return 0; } @@ -747,84 +764,37 @@ static int exec_mmap(struct mm_struct *mm) static int de_thread(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; - struct sighand_struct *newsighand, *oldsighand = tsk->sighand; + struct sighand_struct *oldsighand = tsk->sighand; spinlock_t *lock = &oldsighand->siglock; - struct task_struct *leader = NULL; int count; - /* - * If we don't share sighandlers, then we aren't sharing anything - * and we can just re-use it all. - */ - if (atomic_read(&oldsighand->count) <= 1) { - exit_itimers(sig); - return 0; - } - - newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); - if (!newsighand) - return -ENOMEM; - if (thread_group_empty(tsk)) goto no_thread_group; /* * Kill all other threads in the thread group. - * We must hold tasklist_lock to call zap_other_threads. */ - read_lock(&tasklist_lock); spin_lock_irq(lock); - if (sig->flags & SIGNAL_GROUP_EXIT) { + if (signal_group_exit(sig)) { /* * Another group action in progress, just * return so that the signal is processed. */ spin_unlock_irq(lock); - read_unlock(&tasklist_lock); - kmem_cache_free(sighand_cachep, newsighand); return -EAGAIN; } - - /* - * child_reaper ignores SIGKILL, change it now. - * Reparenting needs write_lock on tasklist_lock, - * so it is safe to do it under read_lock. - */ - if (unlikely(tsk->group_leader == child_reaper(tsk))) - tsk->nsproxy->pid_ns->child_reaper = tsk; - + sig->group_exit_task = tsk; zap_other_threads(tsk); - read_unlock(&tasklist_lock); - /* - * Account for the thread group leader hanging around: - */ - count = 1; - if (!thread_group_leader(tsk)) { - count = 2; - /* - * The SIGALRM timer survives the exec, but needs to point - * at us as the new group leader now. We have a race with - * a timer firing now getting the old leader, so we need to - * synchronize with any firing (by calling del_timer_sync) - * before we can safely let the old group leader die. - */ - sig->tsk = tsk; - spin_unlock_irq(lock); - if (hrtimer_cancel(&sig->real_timer)) - hrtimer_restart(&sig->real_timer); - spin_lock_irq(lock); - } + /* Account for the thread group leader hanging around: */ + count = thread_group_leader(tsk) ? 1 : 2; + sig->notify_count = count; while (atomic_read(&sig->count) > count) { - sig->group_exit_task = tsk; - sig->notify_count = count; __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(lock); schedule(); spin_lock_irq(lock); } - sig->group_exit_task = NULL; - sig->notify_count = 0; spin_unlock_irq(lock); /* @@ -833,14 +803,17 @@ static int de_thread(struct task_struct *tsk) * and to assume its PID: */ if (!thread_group_leader(tsk)) { - /* - * Wait for the thread group leader to be a zombie. - * It should already be zombie at this point, most - * of the time. - */ - leader = tsk->group_leader; - while (leader->exit_state != EXIT_ZOMBIE) - yield(); + struct task_struct *leader = tsk->group_leader; + + sig->notify_count = -1; /* for exit_notify() */ + for (;;) { + write_lock_irq(&tasklist_lock); + if (likely(leader->exit_state)) + break; + __set_current_state(TASK_UNINTERRUPTIBLE); + write_unlock_irq(&tasklist_lock); + schedule(); + } /* * The only record we have of the real-time age of a @@ -854,10 +827,8 @@ static int de_thread(struct task_struct *tsk) */ tsk->start_time = leader->start_time; - write_lock_irq(&tasklist_lock); - - BUG_ON(leader->tgid != tsk->tgid); - BUG_ON(tsk->pid == tsk->tgid); + BUG_ON(!same_thread_group(leader, tsk)); + BUG_ON(has_group_leader_pid(tsk)); /* * An exec() starts a new thread group with the * TGID of the previous thread group. Rehash the @@ -872,7 +843,7 @@ static int de_thread(struct task_struct *tsk) */ detach_pid(tsk, PIDTYPE_PID); tsk->pid = leader->pid; - attach_pid(tsk, PIDTYPE_PID, find_pid(tsk->pid)); + attach_pid(tsk, PIDTYPE_PID, task_pid(leader)); transfer_pid(leader, tsk, PIDTYPE_PGID); transfer_pid(leader, tsk, PIDTYPE_SID); list_replace_rcu(&leader->tasks, &tsk->tasks); @@ -884,32 +855,28 @@ static int de_thread(struct task_struct *tsk) BUG_ON(leader->exit_state != EXIT_ZOMBIE); leader->exit_state = EXIT_DEAD; - write_unlock_irq(&tasklist_lock); - } - /* - * There may be one thread left which is just exiting, - * but it's safe to stop telling the group to kill themselves. - */ - sig->flags = 0; + release_task(leader); + } + + sig->group_exit_task = NULL; + sig->notify_count = 0; no_thread_group: exit_itimers(sig); - if (leader) - release_task(leader); + flush_itimer_signals(); - if (atomic_read(&oldsighand->count) == 1) { + if (atomic_read(&oldsighand->count) != 1) { + struct sighand_struct *newsighand; /* - * Now that we nuked the rest of the thread group, - * it turns out we are not sharing sighand any more either. - * So we can just keep it. - */ - kmem_cache_free(sighand_cachep, newsighand); - } else { - /* - * Move our state over to newsighand and switch it in. + * This ->sighand is shared with the CLONE_SIGHAND + * but not CLONE_THREAD task, switch to the new one. */ + newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); + if (!newsighand) + return -ENOMEM; + atomic_set(&newsighand->count, 1); memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action)); @@ -961,12 +928,13 @@ static void flush_old_files(struct files_struct * files) spin_unlock(&files->file_lock); } -void get_task_comm(char *buf, struct task_struct *tsk) +char *get_task_comm(char *buf, struct task_struct *tsk) { /* buf must be at least sizeof(tsk->comm) in size */ task_lock(tsk); strncpy(buf, tsk->comm, sizeof(tsk->comm)); task_unlock(tsk); + return buf; } void set_task_comm(struct task_struct *tsk, char *buf) @@ -980,7 +948,6 @@ int flush_old_exec(struct linux_binprm * bprm) { char * name; int i, ch, retval; - struct files_struct *files; char tcomm[sizeof(current->comm)]; /* @@ -991,30 +958,21 @@ int flush_old_exec(struct linux_binprm * bprm) if (retval) goto out; - /* - * Make sure we have private file handles. Ask the - * fork helper to do the work for us and the exit - * helper to do the cleanup of the old one. - */ - files = current->files; /* refcounted so safe to hold */ - retval = unshare_files(); - if (retval) - goto out; + set_mm_exe_file(bprm->mm, bprm->file); + /* * Release all of the old mmap stuff */ retval = exec_mmap(bprm->mm); if (retval) - goto mmap_failed; + goto out; bprm->mm = NULL; /* We're using it now */ /* This is the point of no return */ - put_files_struct(files); - current->sas_ss_sp = current->sas_ss_size = 0; - if (current->euid == current->uid && current->egid == current->gid) + if (current_euid() == current_uid() && current_egid() == current_gid()) set_dumpable(current->mm, 1); else set_dumpable(current->mm, suid_dumpable); @@ -1041,16 +999,17 @@ int flush_old_exec(struct linux_binprm * bprm) */ current->mm->task_size = TASK_SIZE; - if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) { - suid_keys(current); - set_dumpable(current->mm, suid_dumpable); + /* install the new credentials */ + if (bprm->cred->uid != current_euid() || + bprm->cred->gid != current_egid()) { current->pdeath_signal = 0; } else if (file_permission(bprm->file, MAY_READ) || - (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) { - suid_keys(current); + bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) { set_dumpable(current->mm, suid_dumpable); } + current->personality &= ~bprm->per_clear; + /* An exec changes our domain. We are no longer part of the thread group */ @@ -1061,21 +1020,72 @@ int flush_old_exec(struct linux_binprm * bprm) return 0; -mmap_failed: - reset_files_struct(current, files); out: return retval; } EXPORT_SYMBOL(flush_old_exec); +/* + * install the new credentials for this executable + */ +void install_exec_creds(struct linux_binprm *bprm) +{ + security_bprm_committing_creds(bprm); + + commit_creds(bprm->cred); + bprm->cred = NULL; + + /* cred_exec_mutex must be held at least to this point to prevent + * ptrace_attach() from altering our determination of the task's + * credentials; any time after this it may be unlocked */ + + security_bprm_committed_creds(bprm); +} +EXPORT_SYMBOL(install_exec_creds); + +/* + * determine how safe it is to execute the proposed program + * - the caller must hold current->cred_exec_mutex to protect against + * PTRACE_ATTACH + */ +void check_unsafe_exec(struct linux_binprm *bprm, struct files_struct *files) +{ + struct task_struct *p = current, *t; + unsigned long flags; + unsigned n_fs, n_files, n_sighand; + + bprm->unsafe = tracehook_unsafe_exec(p); + + n_fs = 1; + n_files = 1; + n_sighand = 1; + lock_task_sighand(p, &flags); + for (t = next_thread(p); t != p; t = next_thread(t)) { + if (t->fs == p->fs) + n_fs++; + if (t->files == files) + n_files++; + n_sighand++; + } + + if (atomic_read(&p->fs->count) > n_fs || + atomic_read(&p->files->count) > n_files || + atomic_read(&p->sighand->count) > n_sighand) + bprm->unsafe |= LSM_UNSAFE_SHARE; + + unlock_task_sighand(p, &flags); +} + /* * Fill the binprm structure from the inode. * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes + * + * This may be called multiple times for binary chains (scripts for example). */ int prepare_binprm(struct linux_binprm *bprm) { - int mode; + umode_t mode; struct inode * inode = bprm->file->f_path.dentry->d_inode; int retval; @@ -1083,14 +1093,15 @@ int prepare_binprm(struct linux_binprm *bprm) if (bprm->file->f_op == NULL) return -EACCES; - bprm->e_uid = current->euid; - bprm->e_gid = current->egid; + /* clear any previous set[ug]id data from a previous binary */ + bprm->cred->euid = current_euid(); + bprm->cred->egid = current_egid(); - if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) { + if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) { /* Set-uid? */ if (mode & S_ISUID) { - current->personality &= ~PER_CLEAR_ON_SETID; - bprm->e_uid = inode->i_uid; + bprm->per_clear |= PER_CLEAR_ON_SETID; + bprm->cred->euid = inode->i_uid; } /* Set-gid? */ @@ -1100,57 +1111,23 @@ int prepare_binprm(struct linux_binprm *bprm) * executable. */ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { - current->personality &= ~PER_CLEAR_ON_SETID; - bprm->e_gid = inode->i_gid; + bprm->per_clear |= PER_CLEAR_ON_SETID; + bprm->cred->egid = inode->i_gid; } } /* fill in binprm security blob */ - retval = security_bprm_set(bprm); + retval = security_bprm_set_creds(bprm); if (retval) return retval; + bprm->cred_prepared = 1; - memset(bprm->buf,0,BINPRM_BUF_SIZE); - return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE); + memset(bprm->buf, 0, BINPRM_BUF_SIZE); + return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE); } EXPORT_SYMBOL(prepare_binprm); -static int unsafe_exec(struct task_struct *p) -{ - int unsafe = 0; - if (p->ptrace & PT_PTRACED) { - if (p->ptrace & PT_PTRACE_CAP) - unsafe |= LSM_UNSAFE_PTRACE_CAP; - else - unsafe |= LSM_UNSAFE_PTRACE; - } - if (atomic_read(&p->fs->count) > 1 || - atomic_read(&p->files->count) > 1 || - atomic_read(&p->sighand->count) > 1) - unsafe |= LSM_UNSAFE_SHARE; - - return unsafe; -} - -void compute_creds(struct linux_binprm *bprm) -{ - int unsafe; - - if (bprm->e_uid != current->uid) { - suid_keys(current); - current->pdeath_signal = 0; - } - exec_keys(current); - - task_lock(current); - unsafe = unsafe_exec(current); - security_bprm_apply_creds(bprm, unsafe); - task_unlock(current); - security_bprm_post_apply_creds(bprm); -} -EXPORT_SYMBOL(compute_creds); - /* * Arguments are '\0' separated strings found at the location bprm->p * points to; chop off the first by relocating brpm->p to right after @@ -1200,43 +1177,10 @@ EXPORT_SYMBOL(remove_arg_zero); */ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) { + unsigned int depth = bprm->recursion_depth; int try,retval; struct linux_binfmt *fmt; -#ifdef __alpha__ - /* handle /sbin/loader.. */ - { - struct exec * eh = (struct exec *) bprm->buf; - - if (!bprm->loader && eh->fh.f_magic == 0x183 && - (eh->fh.f_flags & 0x3000) == 0x3000) - { - struct file * file; - unsigned long loader; - allow_write_access(bprm->file); - fput(bprm->file); - bprm->file = NULL; - - loader = bprm->vma->vm_end - sizeof(void *); - - file = open_exec("/sbin/loader"); - retval = PTR_ERR(file); - if (IS_ERR(file)) - return retval; - - /* Remember if the application is TASO. */ - bprm->sh_bang = eh->ah.entry < 0x100000000UL; - - bprm->file = file; - bprm->loader = loader; - retval = prepare_binprm(bprm); - if (retval<0) - return retval; - /* should call search_binary_handler recursively here, - but it does not matter */ - } - } -#endif retval = security_bprm_check(bprm); if (retval) return retval; @@ -1260,7 +1204,15 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) continue; read_unlock(&binfmt_lock); retval = fn(bprm, regs); + /* + * Restore the depth counter to its starting value + * in this call, so we don't have to rely on every + * load_binary function to restore it on return. + */ + bprm->recursion_depth = depth; if (retval >= 0) { + if (depth == 0) + tracehook_report_exec(fmt, bprm, regs); put_binfmt(fmt); allow_write_access(bprm->file); if (bprm->file) @@ -1282,8 +1234,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) read_unlock(&binfmt_lock); if (retval != -ENOEXEC || bprm->mm == NULL) { break; -#ifdef CONFIG_KMOD - }else{ +#ifdef CONFIG_MODULES + } else { #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) if (printable(bprm->buf[0]) && printable(bprm->buf[1]) && @@ -1299,6 +1251,14 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) EXPORT_SYMBOL(search_binary_handler); +void free_bprm(struct linux_binprm *bprm) +{ + free_arg_pages(bprm); + if (bprm->cred) + abort_creds(bprm->cred); + kfree(bprm); +} + /* * sys_execve() executes a new program. */ @@ -1309,18 +1269,32 @@ int do_execve(char * filename, { struct linux_binprm *bprm; struct file *file; - unsigned long env_p; + struct files_struct *displaced; int retval; + retval = unshare_files(&displaced); + if (retval) + goto out_ret; + retval = -ENOMEM; bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); if (!bprm) - goto out_ret; + goto out_files; + + retval = mutex_lock_interruptible(¤t->cred_exec_mutex); + if (retval < 0) + goto out_free; + + retval = -ENOMEM; + bprm->cred = prepare_exec_creds(); + if (!bprm->cred) + goto out_unlock; + check_unsafe_exec(bprm, displaced); file = open_exec(filename); retval = PTR_ERR(file); if (IS_ERR(file)) - goto out_kfree; + goto out_unlock; sched_exec(); @@ -1334,14 +1308,10 @@ int do_execve(char * filename, bprm->argc = count(argv, MAX_ARG_STRINGS); if ((retval = bprm->argc) < 0) - goto out_mm; + goto out; bprm->envc = count(envp, MAX_ARG_STRINGS); if ((retval = bprm->envc) < 0) - goto out_mm; - - retval = security_bprm_alloc(bprm); - if (retval) goto out; retval = prepare_binprm(bprm); @@ -1357,28 +1327,24 @@ int do_execve(char * filename, if (retval < 0) goto out; - env_p = bprm->p; retval = copy_strings(bprm->argc, argv, bprm); if (retval < 0) goto out; - bprm->argv_len = env_p - bprm->p; + current->flags &= ~PF_KTHREAD; retval = search_binary_handler(bprm,regs); - if (retval >= 0) { - /* execve success */ - free_arg_pages(bprm); - security_bprm_free(bprm); - acct_update_integrals(current); - kfree(bprm); - return retval; - } + if (retval < 0) + goto out; -out: - free_arg_pages(bprm); - if (bprm->security) - security_bprm_free(bprm); + /* execve succeeded */ + mutex_unlock(¤t->cred_exec_mutex); + acct_update_integrals(current); + free_bprm(bprm); + if (displaced) + put_files_struct(displaced); + return retval; -out_mm: +out: if (bprm->mm) mmput (bprm->mm); @@ -1387,9 +1353,16 @@ out_file: allow_write_access(bprm->file); fput(bprm->file); } -out_kfree: - kfree(bprm); +out_unlock: + mutex_unlock(¤t->cred_exec_mutex); + +out_free: + free_bprm(bprm); + +out_files: + if (displaced) + reset_files_struct(displaced); out_ret: return retval; } @@ -1414,17 +1387,15 @@ EXPORT_SYMBOL(set_binfmt); * name into corename, which must have space for at least * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. */ -static int format_corename(char *corename, const char *pattern, long signr) +static int format_corename(char *corename, long signr) { - const char *pat_ptr = pattern; + const struct cred *cred = current_cred(); + const char *pat_ptr = core_pattern; + int ispipe = (*pat_ptr == '|'); char *out_ptr = corename; char *const out_end = corename + CORENAME_MAX_SIZE; int rc; int pid_in_pattern = 0; - int ispipe = 0; - - if (*pattern == '|') - ispipe = 1; /* Repeat as long as we have more pattern to process and more output space */ @@ -1447,7 +1418,7 @@ static int format_corename(char *corename, const char *pattern, long signr) case 'p': pid_in_pattern = 1; rc = snprintf(out_ptr, out_end - out_ptr, - "%d", current->tgid); + "%d", task_tgid_vnr(current)); if (rc > out_end - out_ptr) goto out; out_ptr += rc; @@ -1455,7 +1426,7 @@ static int format_corename(char *corename, const char *pattern, long signr) /* uid */ case 'u': rc = snprintf(out_ptr, out_end - out_ptr, - "%d", current->uid); + "%d", cred->uid); if (rc > out_end - out_ptr) goto out; out_ptr += rc; @@ -1463,7 +1434,7 @@ static int format_corename(char *corename, const char *pattern, long signr) /* gid */ case 'g': rc = snprintf(out_ptr, out_end - out_ptr, - "%d", current->gid); + "%d", cred->gid); if (rc > out_end - out_ptr) goto out; out_ptr += rc; @@ -1524,10 +1495,9 @@ static int format_corename(char *corename, const char *pattern, long signr) * If core_pattern does not include a %p (as is the default) * and core_uses_pid is set, then .%pid will be appended to * the filename. Do not do this for piped commands. */ - if (!ispipe && !pid_in_pattern - && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) { + if (!ispipe && !pid_in_pattern && core_uses_pid) { rc = snprintf(out_ptr, out_end - out_ptr, - ".%d", current->tgid); + ".%d", task_tgid_vnr(current)); if (rc > out_end - out_ptr) goto out; out_ptr += rc; @@ -1537,9 +1507,10 @@ out: return ispipe; } -static void zap_process(struct task_struct *start) +static int zap_process(struct task_struct *start) { struct task_struct *t; + int nr = 0; start->signal->flags = SIGNAL_GROUP_EXIT; start->signal->group_stop_count = 0; @@ -1547,72 +1518,99 @@ static void zap_process(struct task_struct *start) t = start; do { if (t != current && t->mm) { - t->mm->core_waiters++; sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); + nr++; } - } while ((t = next_thread(t)) != start); + } while_each_thread(start, t); + + return nr; } static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, - int exit_code) + struct core_state *core_state, int exit_code) { struct task_struct *g, *p; unsigned long flags; - int err = -EAGAIN; + int nr = -EAGAIN; spin_lock_irq(&tsk->sighand->siglock); - if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) { + if (!signal_group_exit(tsk->signal)) { + mm->core_state = core_state; tsk->signal->group_exit_code = exit_code; - zap_process(tsk); - err = 0; + nr = zap_process(tsk); } spin_unlock_irq(&tsk->sighand->siglock); - if (err) - return err; + if (unlikely(nr < 0)) + return nr; - if (atomic_read(&mm->mm_users) == mm->core_waiters + 1) + if (atomic_read(&mm->mm_users) == nr + 1) goto done; - + /* + * We should find and kill all tasks which use this mm, and we should + * count them correctly into ->nr_threads. We don't take tasklist + * lock, but this is safe wrt: + * + * fork: + * None of sub-threads can fork after zap_process(leader). All + * processes which were created before this point should be + * visible to zap_threads() because copy_process() adds the new + * process to the tail of init_task.tasks list, and lock/unlock + * of ->siglock provides a memory barrier. + * + * do_exit: + * The caller holds mm->mmap_sem. This means that the task which + * uses this mm can't pass exit_mm(), so it can't exit or clear + * its ->mm. + * + * de_thread: + * It does list_replace_rcu(&leader->tasks, ¤t->tasks), + * we must see either old or new leader, this does not matter. + * However, it can change p->sighand, so lock_task_sighand(p) + * must be used. Since p->mm != NULL and we hold ->mmap_sem + * it can't fail. + * + * Note also that "g" can be the old leader with ->mm == NULL + * and already unhashed and thus removed from ->thread_group. + * This is OK, __unhash_process()->list_del_rcu() does not + * clear the ->next pointer, we will find the new leader via + * next_thread(). + */ rcu_read_lock(); for_each_process(g) { if (g == tsk->group_leader) continue; - + if (g->flags & PF_KTHREAD) + continue; p = g; do { if (p->mm) { - if (p->mm == mm) { - /* - * p->sighand can't disappear, but - * may be changed by de_thread() - */ + if (unlikely(p->mm == mm)) { lock_task_sighand(p, &flags); - zap_process(p); + nr += zap_process(p); unlock_task_sighand(p, &flags); } break; } - } while ((p = next_thread(p)) != g); + } while_each_thread(g, p); } rcu_read_unlock(); done: - return mm->core_waiters; + atomic_set(&core_state->nr_threads, nr); + return nr; } -static int coredump_wait(int exit_code) +static int coredump_wait(int exit_code, struct core_state *core_state) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; - struct completion startup_done; struct completion *vfork_done; int core_waiters; - init_completion(&mm->core_done); - init_completion(&startup_done); - mm->core_startup_done = &startup_done; - - core_waiters = zap_threads(tsk, mm, exit_code); + init_completion(&core_state->startup); + core_state->dumper.task = tsk; + core_state->dumper.next = NULL; + core_waiters = zap_threads(tsk, mm, core_state, exit_code); up_write(&mm->mmap_sem); if (unlikely(core_waiters < 0)) @@ -1629,12 +1627,32 @@ static int coredump_wait(int exit_code) } if (core_waiters) - wait_for_completion(&startup_done); + wait_for_completion(&core_state->startup); fail: - BUG_ON(mm->core_waiters); return core_waiters; } +static void coredump_finish(struct mm_struct *mm) +{ + struct core_thread *curr, *next; + struct task_struct *task; + + next = mm->core_state->dumper.next; + while ((curr = next) != NULL) { + next = curr->next; + task = curr->task; + /* + * see exit_mm(), curr->task must not see + * ->task == NULL before we read ->next. + */ + smp_mb(); + curr->task = NULL; + wake_up_process(task); + } + + mm->core_state = NULL; +} + /* * set_dumpable converts traditional three-value dumpable to two flags and * stores them into mm->flags. It modifies lower two bits of mm->flags, but @@ -1675,7 +1693,6 @@ void set_dumpable(struct mm_struct *mm, int value) break; } } -EXPORT_SYMBOL_GPL(set_dumpable); int get_dumpable(struct mm_struct *mm) { @@ -1685,15 +1702,17 @@ int get_dumpable(struct mm_struct *mm) return (ret >= 2) ? 2 : ret; } -int do_coredump(long signr, int exit_code, struct pt_regs * regs) +void do_coredump(long signr, int exit_code, struct pt_regs *regs) { + struct core_state core_state; char corename[CORENAME_MAX_SIZE + 1]; struct mm_struct *mm = current->mm; struct linux_binfmt * binfmt; struct inode * inode; struct file * file; + const struct cred *old_cred; + struct cred *cred; int retval = 0; - int fsuid = current->fsuid; int flag = 0; int ispipe = 0; unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; @@ -1706,9 +1725,20 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) binfmt = current->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; + + cred = prepare_creds(); + if (!cred) { + retval = -ENOMEM; + goto fail; + } + down_write(&mm->mmap_sem); - if (!get_dumpable(mm)) { + /* + * If another thread got here first, or we are not dumpable, bail out. + */ + if (mm->core_state || !get_dumpable(mm)) { up_write(&mm->mmap_sem); + put_cred(cred); goto fail; } @@ -1719,13 +1749,16 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) */ if (get_dumpable(mm) == 2) { /* Setuid core dump mode */ flag = O_EXCL; /* Stop rewrite attacks */ - current->fsuid = 0; /* Dump root private */ + cred->fsuid = 0; /* Dump root private */ } - set_dumpable(mm, 0); - retval = coredump_wait(exit_code); - if (retval < 0) + retval = coredump_wait(exit_code, &core_state); + if (retval < 0) { + put_cred(cred); goto fail; + } + + old_cred = override_creds(cred); /* * Clear any false indication of pending signals that might @@ -1738,7 +1771,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) * uses lock_kernel() */ lock_kernel(); - ispipe = format_corename(corename, core_pattern, signr); + ispipe = format_corename(corename, signr); unlock_kernel(); /* * Don't bother to check the RLIMIT_CORE value if core_pattern points @@ -1753,6 +1786,11 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) if (ispipe) { helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc); + if (!helper_argv) { + printk(KERN_WARNING "%s failed to allocate memory\n", + __func__); + goto fail_unlock; + } /* Terminate the string before the first option */ delimit = strchr(corename, ' '); if (delimit) @@ -1793,6 +1831,12 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) but keep the previous behaviour for now. */ if (!ispipe && !S_ISREG(inode->i_mode)) goto close_fail; + /* + * Dont allow local users get cute and trick others to coredump + * into their pre-created files: + */ + if (inode->i_uid != current_fsuid()) + goto close_fail; if (!file->f_op) goto close_fail; if (!file->f_op->write) @@ -1810,8 +1854,9 @@ fail_unlock: if (helper_argv) argv_free(helper_argv); - current->fsuid = fsuid; - complete_all(&mm->core_done); + revert_creds(old_cred); + put_cred(cred); + coredump_finish(mm); fail: - return retval; + return; }