#include <linux/swap.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/perf_event.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/key.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/proc_fs.h>
-#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/audit.h>
+#include <linux/tracehook.h>
+#include <linux/kmod.h>
+#include <linux/fsnotify.h>
+#include <linux/fs_struct.h>
+#include <linux/pipe_fs_i.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
-
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
-
-#ifdef __alpha__
-/* for /sbin/loader handling in search_binary_handler() */
-#include <linux/a.out.h>
-#endif
+#include "internal.h"
int core_uses_pid;
char core_pattern[CORENAME_MAX_SIZE] = "core";
+unsigned int core_pipe_limit;
int suid_dumpable = 0;
/* The maximal length of core_pattern is also specified in sysctl.c */
static LIST_HEAD(formats);
static DEFINE_RWLOCK(binfmt_lock);
-int register_binfmt(struct linux_binfmt * fmt)
+int __register_binfmt(struct linux_binfmt * fmt, int insert)
{
if (!fmt)
return -EINVAL;
write_lock(&binfmt_lock);
- list_add(&fmt->lh, &formats);
+ insert ? list_add(&fmt->lh, &formats) :
+ list_add_tail(&fmt->lh, &formats);
write_unlock(&binfmt_lock);
return 0;
}
-EXPORT_SYMBOL(register_binfmt);
+EXPORT_SYMBOL(__register_binfmt);
void unregister_binfmt(struct linux_binfmt * fmt)
{
*
* Also note that we take the address to load from from the file itself.
*/
-asmlinkage long sys_uselib(const char __user * library)
+SYSCALL_DEFINE1(uselib, const char __user *, library)
{
- struct file * file;
- struct nameidata nd;
- int error;
+ struct file *file;
+ char *tmp = getname(library);
+ int error = PTR_ERR(tmp);
- error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
- if (error)
+ if (IS_ERR(tmp))
+ goto out;
+
+ file = do_filp_open(AT_FDCWD, tmp,
+ O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+ MAY_READ | MAY_EXEC | MAY_OPEN);
+ putname(tmp);
+ error = PTR_ERR(file);
+ if (IS_ERR(file))
goto out;
error = -EINVAL;
- if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
+ if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
goto exit;
- error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
- if (error)
+ error = -EACCES;
+ if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
- file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
- error = PTR_ERR(file);
- if (IS_ERR(file))
- goto out;
+ fsnotify_open(file->f_path.dentry);
error = -ENOEXEC;
if(file->f_op) {
}
read_unlock(&binfmt_lock);
}
+exit:
fput(file);
out:
return error;
-exit:
- release_open_intent(&nd);
- path_put(&nd.path);
- goto out;
}
#ifdef CONFIG_MMU
* to work from.
*/
rlim = current->signal->rlim;
- if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
+ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
put_page(page);
return NULL;
}
static int __bprm_mm_init(struct linux_binprm *bprm)
{
- int err = -ENOMEM;
+ int err;
struct vm_area_struct *vma = NULL;
struct mm_struct *mm = bprm->mm;
bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma)
- goto err;
+ return -ENOMEM;
down_write(&mm->mmap_sem);
vma->vm_mm = mm;
*/
vma->vm_end = STACK_TOP_MAX;
vma->vm_start = vma->vm_end - PAGE_SIZE;
-
vma->vm_flags = VM_STACK_FLAGS;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
err = insert_vm_struct(mm, vma);
- if (err) {
- up_write(&mm->mmap_sem);
+ if (err)
goto err;
- }
mm->stack_vm = mm->total_vm = 1;
up_write(&mm->mmap_sem);
-
bprm->p = vma->vm_end - sizeof(void *);
-
return 0;
-
err:
- if (vma) {
- bprm->vma = NULL;
- kmem_cache_free(vm_area_cachep, vma);
- }
-
+ up_write(&mm->mmap_sem);
+ bprm->vma = NULL;
+ kmem_cache_free(vm_area_cachep, vma);
return err;
}
if (!p)
break;
argv++;
- if(++i > max)
+ if (i++ >= max)
return -E2BIG;
cond_resched();
}
/*
* cover the whole range: [new_start, old_end)
*/
- vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
+ if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
+ return -ENOMEM;
/*
* move the page tables downwards, on failure we rely on
tlb_finish_mmu(tlb, new_end, old_end);
/*
- * shrink the vma to just the new range.
+ * Shrink the vma to just the new range. Always succeeds.
*/
vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
return 0;
}
-#define EXTRA_STACK_VM_PAGES 20 /* random */
-
/*
* Finalizes the stack vm_area_struct. The flags and permissions are updated,
* the stack is optionally relocated, and some extra space is added.
struct vm_area_struct *prev = NULL;
unsigned long vm_flags;
unsigned long stack_base;
+ unsigned long stack_size;
+ unsigned long stack_expand;
+ unsigned long rlim_stack;
#ifdef CONFIG_STACK_GROWSUP
/* Limit stack size to 1GB */
- stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
+ stack_base = rlimit_max(RLIMIT_STACK);
if (stack_base > (1 << 30))
stack_base = 1 << 30;
/* Move stack pages down in memory. */
if (stack_shift) {
ret = shift_arg_pages(vma, stack_shift);
- if (ret) {
- up_write(&mm->mmap_sem);
- return ret;
- }
+ if (ret)
+ goto out_unlock;
}
+ stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
+ stack_size = vma->vm_end - vma->vm_start;
+ /*
+ * Align this down to a page boundary as expand_stack
+ * will align it up.
+ */
+ rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
#ifdef CONFIG_STACK_GROWSUP
- stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ if (stack_size + stack_expand > rlim_stack)
+ stack_base = vma->vm_start + rlim_stack;
+ else
+ stack_base = vma->vm_end + stack_expand;
#else
- stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ if (stack_size + stack_expand > rlim_stack)
+ stack_base = vma->vm_end - rlim_stack;
+ else
+ stack_base = vma->vm_start - stack_expand;
#endif
ret = expand_stack(vma, stack_base);
if (ret)
out_unlock:
up_write(&mm->mmap_sem);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(setup_arg_pages);
struct file *open_exec(const char *name)
{
- struct nameidata nd;
- int err;
struct file *file;
+ int err;
+
+ file = do_filp_open(AT_FDCWD, name,
+ O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+ MAY_EXEC | MAY_OPEN);
+ if (IS_ERR(file))
+ goto out;
+
+ err = -EACCES;
+ if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
+ goto exit;
+
+ if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+ goto exit;
+
+ fsnotify_open(file->f_path.dentry);
+
+ err = deny_write_access(file);
+ if (err)
+ goto exit;
- err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
- file = ERR_PTR(err);
-
- if (!err) {
- struct inode *inode = nd.path.dentry->d_inode;
- file = ERR_PTR(-EACCES);
- if (S_ISREG(inode->i_mode)) {
- int err = vfs_permission(&nd, MAY_EXEC);
- file = ERR_PTR(err);
- if (!err) {
- file = nameidata_to_filp(&nd,
- O_RDONLY|O_LARGEFILE);
- if (!IS_ERR(file)) {
- err = deny_write_access(file);
- if (err) {
- fput(file);
- file = ERR_PTR(err);
- }
- }
out:
- return file;
- }
- }
- release_open_intent(&nd);
- path_put(&nd.path);
- }
- goto out;
-}
+ return file;
+exit:
+ fput(file);
+ return ERR_PTR(err);
+}
EXPORT_SYMBOL(open_exec);
-int kernel_read(struct file *file, unsigned long offset,
- char *addr, unsigned long count)
+int kernel_read(struct file *file, loff_t offset,
+ char *addr, unsigned long count)
{
mm_segment_t old_fs;
loff_t pos = offset;
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
old_mm = current->mm;
+ sync_mm_rss(tsk, old_mm);
mm_release(tsk, old_mm);
if (old_mm) {
* Make sure that if there is a core dump in progress
* for the old mm, we get out and die instead of going
* through with the exec. We must hold mmap_sem around
- * checking core_waiters and changing tsk->mm. The
- * core-inducing thread will increment core_waiters for
- * each thread whose ->mm == old_mm.
+ * checking core_state and changing tsk->mm.
*/
down_read(&old_mm->mmap_sem);
- if (unlikely(old_mm->core_waiters)) {
+ if (unlikely(old_mm->core_state)) {
up_read(&old_mm->mmap_sem);
return -EINTR;
}
tsk->active_mm = mm;
activate_mm(active_mm, mm);
task_unlock(tsk);
- mm_update_next_owner(old_mm);
arch_pick_mmap_layout(mm);
if (old_mm) {
up_read(&old_mm->mmap_sem);
BUG_ON(active_mm != old_mm);
+ mm_update_next_owner(old_mm);
mmput(old_mm);
return 0;
}
struct signal_struct *sig = tsk->signal;
struct sighand_struct *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
- struct task_struct *leader = NULL;
int count;
if (thread_group_empty(tsk))
* and to assume its PID:
*/
if (!thread_group_leader(tsk)) {
- leader = tsk->group_leader;
+ struct task_struct *leader = tsk->group_leader;
sig->notify_count = -1; /* for exit_notify() */
for (;;) {
schedule();
}
- if (unlikely(task_child_reaper(tsk) == leader))
- task_active_pid_ns(tsk)->child_reaper = tsk;
/*
* The only record we have of the real-time age of a
* process, regardless of execs it's done, is start_time.
attach_pid(tsk, PIDTYPE_PID, task_pid(leader));
transfer_pid(leader, tsk, PIDTYPE_PGID);
transfer_pid(leader, tsk, PIDTYPE_SID);
+
list_replace_rcu(&leader->tasks, &tsk->tasks);
+ list_replace_init(&leader->sibling, &tsk->sibling);
tsk->group_leader = tsk;
leader->group_leader = tsk;
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD;
-
write_unlock_irq(&tasklist_lock);
+
+ release_task(leader);
}
sig->group_exit_task = NULL;
sig->notify_count = 0;
no_thread_group:
+ if (current->mm)
+ setmax_mm_hiwater_rss(&sig->maxrss, current->mm);
+
exit_itimers(sig);
flush_itimer_signals();
- if (leader)
- release_task(leader);
if (atomic_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
void set_task_comm(struct task_struct *tsk, char *buf)
{
task_lock(tsk);
+
+ /*
+ * Threads may access current->comm without holding
+ * the task lock, so write the string carefully.
+ * Readers without a lock may see incomplete new
+ * names but are safe from non-terminating string reads.
+ */
+ memset(tsk->comm, 0, TASK_COMM_LEN);
+ wmb();
strlcpy(tsk->comm, buf, sizeof(tsk->comm));
task_unlock(tsk);
+ perf_event_comm(tsk);
}
int flush_old_exec(struct linux_binprm * bprm)
{
- char * name;
- int i, ch, retval;
- char tcomm[sizeof(current->comm)];
+ int retval;
/*
* Make sure we have a private signal table and that
bprm->mm = NULL; /* We're using it now */
+ current->flags &= ~PF_RANDOMIZE;
+ flush_thread();
+ current->personality &= ~bprm->per_clear;
+
+ return 0;
+
+out:
+ return retval;
+}
+EXPORT_SYMBOL(flush_old_exec);
+
+void setup_new_exec(struct linux_binprm * bprm)
+{
+ int i, ch;
+ char * name;
+ char tcomm[sizeof(current->comm)];
+
+ arch_pick_mmap_layout(current->mm);
+
/* This is the point of no return */
current->sas_ss_sp = current->sas_ss_size = 0;
- if (current->euid == current->uid && current->egid == current->gid)
+ if (current_euid() == current_uid() && current_egid() == current_gid())
set_dumpable(current->mm, 1);
else
set_dumpable(current->mm, suid_dumpable);
tcomm[i] = '\0';
set_task_comm(current, tcomm);
- current->flags &= ~PF_RANDOMIZE;
- flush_thread();
-
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
* some architectures like powerpc
*/
current->mm->task_size = TASK_SIZE;
- if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
- suid_keys(current);
- set_dumpable(current->mm, suid_dumpable);
+ /* install the new credentials */
+ if (bprm->cred->uid != current_euid() ||
+ bprm->cred->gid != current_egid()) {
current->pdeath_signal = 0;
} else if (file_permission(bprm->file, MAY_READ) ||
- (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
- suid_keys(current);
+ bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) {
set_dumpable(current->mm, suid_dumpable);
}
+ /*
+ * Flush performance counters when crossing a
+ * security domain:
+ */
+ if (!get_dumpable(current->mm))
+ perf_event_exit_task(current);
+
/* An exec changes our domain. We are no longer part of the thread
group */
flush_signal_handlers(current, 0);
flush_old_files(current->files);
+}
+EXPORT_SYMBOL(setup_new_exec);
- return 0;
+/*
+ * Prepare credentials and lock ->cred_guard_mutex.
+ * install_exec_creds() commits the new creds and drops the lock.
+ * Or, if exec fails before, free_bprm() should release ->cred and
+ * and unlock.
+ */
+int prepare_bprm_creds(struct linux_binprm *bprm)
+{
+ if (mutex_lock_interruptible(¤t->cred_guard_mutex))
+ return -ERESTARTNOINTR;
-out:
- return retval;
+ bprm->cred = prepare_exec_creds();
+ if (likely(bprm->cred))
+ return 0;
+
+ mutex_unlock(¤t->cred_guard_mutex);
+ return -ENOMEM;
}
-EXPORT_SYMBOL(flush_old_exec);
+void free_bprm(struct linux_binprm *bprm)
+{
+ free_arg_pages(bprm);
+ if (bprm->cred) {
+ mutex_unlock(¤t->cred_guard_mutex);
+ abort_creds(bprm->cred);
+ }
+ kfree(bprm);
+}
+
+/*
+ * install the new credentials for this executable
+ */
+void install_exec_creds(struct linux_binprm *bprm)
+{
+ security_bprm_committing_creds(bprm);
+
+ commit_creds(bprm->cred);
+ bprm->cred = NULL;
+ /*
+ * cred_guard_mutex must be held at least to this point to prevent
+ * ptrace_attach() from altering our determination of the task's
+ * credentials; any time after this it may be unlocked.
+ */
+ security_bprm_committed_creds(bprm);
+ mutex_unlock(¤t->cred_guard_mutex);
+}
+EXPORT_SYMBOL(install_exec_creds);
+
+/*
+ * determine how safe it is to execute the proposed program
+ * - the caller must hold current->cred_guard_mutex to protect against
+ * PTRACE_ATTACH
+ */
+int check_unsafe_exec(struct linux_binprm *bprm)
+{
+ struct task_struct *p = current, *t;
+ unsigned n_fs;
+ int res = 0;
+
+ bprm->unsafe = tracehook_unsafe_exec(p);
+
+ n_fs = 1;
+ write_lock(&p->fs->lock);
+ rcu_read_lock();
+ for (t = next_thread(p); t != p; t = next_thread(t)) {
+ if (t->fs == p->fs)
+ n_fs++;
+ }
+ rcu_read_unlock();
+
+ if (p->fs->users > n_fs) {
+ bprm->unsafe |= LSM_UNSAFE_SHARE;
+ } else {
+ res = -EAGAIN;
+ if (!p->fs->in_exec) {
+ p->fs->in_exec = 1;
+ res = 1;
+ }
+ }
+ write_unlock(&p->fs->lock);
+
+ return res;
+}
/*
* Fill the binprm structure from the inode.
* Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
+ *
+ * This may be called multiple times for binary chains (scripts for example).
*/
int prepare_binprm(struct linux_binprm *bprm)
{
- int mode;
+ umode_t mode;
struct inode * inode = bprm->file->f_path.dentry->d_inode;
int retval;
if (bprm->file->f_op == NULL)
return -EACCES;
- bprm->e_uid = current->euid;
- bprm->e_gid = current->egid;
+ /* clear any previous set[ug]id data from a previous binary */
+ bprm->cred->euid = current_euid();
+ bprm->cred->egid = current_egid();
- if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
+ if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
/* Set-uid? */
if (mode & S_ISUID) {
- current->personality &= ~PER_CLEAR_ON_SETID;
- bprm->e_uid = inode->i_uid;
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+ bprm->cred->euid = inode->i_uid;
}
/* Set-gid? */
* executable.
*/
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
- current->personality &= ~PER_CLEAR_ON_SETID;
- bprm->e_gid = inode->i_gid;
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+ bprm->cred->egid = inode->i_gid;
}
}
/* fill in binprm security blob */
- retval = security_bprm_set(bprm);
+ retval = security_bprm_set_creds(bprm);
if (retval)
return retval;
+ bprm->cred_prepared = 1;
- memset(bprm->buf,0,BINPRM_BUF_SIZE);
- return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
+ memset(bprm->buf, 0, BINPRM_BUF_SIZE);
+ return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
}
EXPORT_SYMBOL(prepare_binprm);
-static int unsafe_exec(struct task_struct *p)
-{
- int unsafe = 0;
- if (p->ptrace & PT_PTRACED) {
- if (p->ptrace & PT_PTRACE_CAP)
- unsafe |= LSM_UNSAFE_PTRACE_CAP;
- else
- unsafe |= LSM_UNSAFE_PTRACE;
- }
- if (atomic_read(&p->fs->count) > 1 ||
- atomic_read(&p->files->count) > 1 ||
- atomic_read(&p->sighand->count) > 1)
- unsafe |= LSM_UNSAFE_SHARE;
-
- return unsafe;
-}
-
-void compute_creds(struct linux_binprm *bprm)
-{
- int unsafe;
-
- if (bprm->e_uid != current->uid) {
- suid_keys(current);
- current->pdeath_signal = 0;
- }
- exec_keys(current);
-
- task_lock(current);
- unsafe = unsafe_exec(current);
- security_bprm_apply_creds(bprm, unsafe);
- task_unlock(current);
- security_bprm_post_apply_creds(bprm);
-}
-EXPORT_SYMBOL(compute_creds);
-
/*
* Arguments are '\0' separated strings found at the location bprm->p
* points to; chop off the first by relocating brpm->p to right after
*/
int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
{
+ unsigned int depth = bprm->recursion_depth;
int try,retval;
struct linux_binfmt *fmt;
-#ifdef __alpha__
- /* handle /sbin/loader.. */
- {
- struct exec * eh = (struct exec *) bprm->buf;
-
- if (!bprm->loader && eh->fh.f_magic == 0x183 &&
- (eh->fh.f_flags & 0x3000) == 0x3000)
- {
- struct file * file;
- unsigned long loader;
- allow_write_access(bprm->file);
- fput(bprm->file);
- bprm->file = NULL;
-
- loader = bprm->vma->vm_end - sizeof(void *);
-
- file = open_exec("/sbin/loader");
- retval = PTR_ERR(file);
- if (IS_ERR(file))
- return retval;
-
- /* Remember if the application is TASO. */
- bprm->sh_bang = eh->ah.entry < 0x100000000UL;
-
- bprm->file = file;
- bprm->loader = loader;
- retval = prepare_binprm(bprm);
- if (retval<0)
- return retval;
- /* should call search_binary_handler recursively here,
- but it does not matter */
- }
- }
-#endif
retval = security_bprm_check(bprm);
if (retval)
return retval;
continue;
read_unlock(&binfmt_lock);
retval = fn(bprm, regs);
+ /*
+ * Restore the depth counter to its starting value
+ * in this call, so we don't have to rely on every
+ * load_binary function to restore it on return.
+ */
+ bprm->recursion_depth = depth;
if (retval >= 0) {
+ if (depth == 0)
+ tracehook_report_exec(fmt, bprm, regs);
put_binfmt(fmt);
allow_write_access(bprm->file);
if (bprm->file)
read_unlock(&binfmt_lock);
if (retval != -ENOEXEC || bprm->mm == NULL) {
break;
-#ifdef CONFIG_KMOD
- }else{
+#ifdef CONFIG_MODULES
+ } else {
#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
if (printable(bprm->buf[0]) &&
printable(bprm->buf[1]) &&
EXPORT_SYMBOL(search_binary_handler);
-void free_bprm(struct linux_binprm *bprm)
-{
- free_arg_pages(bprm);
- kfree(bprm);
-}
-
/*
* sys_execve() executes a new program.
*/
struct linux_binprm *bprm;
struct file *file;
struct files_struct *displaced;
+ bool clear_in_exec;
int retval;
retval = unshare_files(&displaced);
if (!bprm)
goto out_files;
+ retval = prepare_bprm_creds(bprm);
+ if (retval)
+ goto out_free;
+
+ retval = check_unsafe_exec(bprm);
+ if (retval < 0)
+ goto out_free;
+ clear_in_exec = retval;
+ current->in_execve = 1;
+
file = open_exec(filename);
retval = PTR_ERR(file);
if (IS_ERR(file))
- goto out_kfree;
+ goto out_unmark;
sched_exec();
bprm->argc = count(argv, MAX_ARG_STRINGS);
if ((retval = bprm->argc) < 0)
- goto out_mm;
+ goto out;
bprm->envc = count(envp, MAX_ARG_STRINGS);
if ((retval = bprm->envc) < 0)
- goto out_mm;
-
- retval = security_bprm_alloc(bprm);
- if (retval)
goto out;
retval = prepare_binprm(bprm);
if (retval < 0)
goto out;
+ current->flags &= ~PF_KTHREAD;
retval = search_binary_handler(bprm,regs);
- if (retval >= 0) {
- /* execve success */
- security_bprm_free(bprm);
- acct_update_integrals(current);
- free_bprm(bprm);
- if (displaced)
- put_files_struct(displaced);
- return retval;
- }
+ if (retval < 0)
+ goto out;
-out:
- if (bprm->security)
- security_bprm_free(bprm);
+ /* execve succeeded */
+ current->fs->in_exec = 0;
+ current->in_execve = 0;
+ acct_update_integrals(current);
+ free_bprm(bprm);
+ if (displaced)
+ put_files_struct(displaced);
+ return retval;
-out_mm:
+out:
if (bprm->mm)
mmput (bprm->mm);
allow_write_access(bprm->file);
fput(bprm->file);
}
-out_kfree:
+
+out_unmark:
+ if (clear_in_exec)
+ current->fs->in_exec = 0;
+ current->in_execve = 0;
+
+out_free:
free_bprm(bprm);
out_files:
return retval;
}
-int set_binfmt(struct linux_binfmt *new)
+void set_binfmt(struct linux_binfmt *new)
{
- struct linux_binfmt *old = current->binfmt;
+ struct mm_struct *mm = current->mm;
- if (new) {
- if (!try_module_get(new->module))
- return -1;
- }
- current->binfmt = new;
- if (old)
- module_put(old->module);
- return 0;
+ if (mm->binfmt)
+ module_put(mm->binfmt->module);
+
+ mm->binfmt = new;
+ if (new)
+ __module_get(new->module);
}
EXPORT_SYMBOL(set_binfmt);
* name into corename, which must have space for at least
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
*/
-static int format_corename(char *corename, const char *pattern, long signr)
+static int format_corename(char *corename, long signr)
{
- const char *pat_ptr = pattern;
+ const struct cred *cred = current_cred();
+ const char *pat_ptr = core_pattern;
+ int ispipe = (*pat_ptr == '|');
char *out_ptr = corename;
char *const out_end = corename + CORENAME_MAX_SIZE;
int rc;
int pid_in_pattern = 0;
- int ispipe = 0;
-
- if (*pattern == '|')
- ispipe = 1;
/* Repeat as long as we have more pattern to process and more output
space */
/* uid */
case 'u':
rc = snprintf(out_ptr, out_end - out_ptr,
- "%d", current->uid);
+ "%d", cred->uid);
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
/* gid */
case 'g':
rc = snprintf(out_ptr, out_end - out_ptr,
- "%d", current->gid);
+ "%d", cred->gid);
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
/* core limit size */
case 'c':
rc = snprintf(out_ptr, out_end - out_ptr,
- "%lu", current->signal->rlim[RLIMIT_CORE].rlim_cur);
+ "%lu", rlimit(RLIMIT_CORE));
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
* If core_pattern does not include a %p (as is the default)
* and core_uses_pid is set, then .%pid will be appended to
* the filename. Do not do this for piped commands. */
- if (!ispipe && !pid_in_pattern
- && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) {
+ if (!ispipe && !pid_in_pattern && core_uses_pid) {
rc = snprintf(out_ptr, out_end - out_ptr,
".%d", task_tgid_vnr(current));
if (rc > out_end - out_ptr)
return ispipe;
}
-static void zap_process(struct task_struct *start)
+static int zap_process(struct task_struct *start, int exit_code)
{
struct task_struct *t;
+ int nr = 0;
start->signal->flags = SIGNAL_GROUP_EXIT;
+ start->signal->group_exit_code = exit_code;
start->signal->group_stop_count = 0;
t = start;
do {
if (t != current && t->mm) {
- t->mm->core_waiters++;
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
+ nr++;
}
- } while ((t = next_thread(t)) != start);
+ } while_each_thread(start, t);
+
+ return nr;
}
static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
- int exit_code)
+ struct core_state *core_state, int exit_code)
{
struct task_struct *g, *p;
unsigned long flags;
- int err = -EAGAIN;
+ int nr = -EAGAIN;
spin_lock_irq(&tsk->sighand->siglock);
if (!signal_group_exit(tsk->signal)) {
- tsk->signal->group_exit_code = exit_code;
- zap_process(tsk);
- err = 0;
+ mm->core_state = core_state;
+ nr = zap_process(tsk, exit_code);
}
spin_unlock_irq(&tsk->sighand->siglock);
- if (err)
- return err;
+ if (unlikely(nr < 0))
+ return nr;
- if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
+ if (atomic_read(&mm->mm_users) == nr + 1)
goto done;
-
+ /*
+ * We should find and kill all tasks which use this mm, and we should
+ * count them correctly into ->nr_threads. We don't take tasklist
+ * lock, but this is safe wrt:
+ *
+ * fork:
+ * None of sub-threads can fork after zap_process(leader). All
+ * processes which were created before this point should be
+ * visible to zap_threads() because copy_process() adds the new
+ * process to the tail of init_task.tasks list, and lock/unlock
+ * of ->siglock provides a memory barrier.
+ *
+ * do_exit:
+ * The caller holds mm->mmap_sem. This means that the task which
+ * uses this mm can't pass exit_mm(), so it can't exit or clear
+ * its ->mm.
+ *
+ * de_thread:
+ * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
+ * we must see either old or new leader, this does not matter.
+ * However, it can change p->sighand, so lock_task_sighand(p)
+ * must be used. Since p->mm != NULL and we hold ->mmap_sem
+ * it can't fail.
+ *
+ * Note also that "g" can be the old leader with ->mm == NULL
+ * and already unhashed and thus removed from ->thread_group.
+ * This is OK, __unhash_process()->list_del_rcu() does not
+ * clear the ->next pointer, we will find the new leader via
+ * next_thread().
+ */
rcu_read_lock();
for_each_process(g) {
if (g == tsk->group_leader)
continue;
-
+ if (g->flags & PF_KTHREAD)
+ continue;
p = g;
do {
if (p->mm) {
- if (p->mm == mm) {
- /*
- * p->sighand can't disappear, but
- * may be changed by de_thread()
- */
+ if (unlikely(p->mm == mm)) {
lock_task_sighand(p, &flags);
- zap_process(p);
+ nr += zap_process(p, exit_code);
unlock_task_sighand(p, &flags);
}
break;
}
- } while ((p = next_thread(p)) != g);
+ } while_each_thread(g, p);
}
rcu_read_unlock();
done:
- return mm->core_waiters;
+ atomic_set(&core_state->nr_threads, nr);
+ return nr;
}
-static int coredump_wait(int exit_code)
+static int coredump_wait(int exit_code, struct core_state *core_state)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- struct completion startup_done;
struct completion *vfork_done;
int core_waiters;
- init_completion(&mm->core_done);
- init_completion(&startup_done);
- mm->core_startup_done = &startup_done;
-
- core_waiters = zap_threads(tsk, mm, exit_code);
+ init_completion(&core_state->startup);
+ core_state->dumper.task = tsk;
+ core_state->dumper.next = NULL;
+ core_waiters = zap_threads(tsk, mm, core_state, exit_code);
up_write(&mm->mmap_sem);
if (unlikely(core_waiters < 0))
}
if (core_waiters)
- wait_for_completion(&startup_done);
+ wait_for_completion(&core_state->startup);
fail:
- BUG_ON(mm->core_waiters);
return core_waiters;
}
+static void coredump_finish(struct mm_struct *mm)
+{
+ struct core_thread *curr, *next;
+ struct task_struct *task;
+
+ next = mm->core_state->dumper.next;
+ while ((curr = next) != NULL) {
+ next = curr->next;
+ task = curr->task;
+ /*
+ * see exit_mm(), curr->task must not see
+ * ->task == NULL before we read ->next.
+ */
+ smp_mb();
+ curr->task = NULL;
+ wake_up_process(task);
+ }
+
+ mm->core_state = NULL;
+}
+
/*
* set_dumpable converts traditional three-value dumpable to two flags and
* stores them into mm->flags. It modifies lower two bits of mm->flags, but
}
}
-int get_dumpable(struct mm_struct *mm)
+static int __get_dumpable(unsigned long mm_flags)
{
int ret;
- ret = mm->flags & 0x3;
+ ret = mm_flags & MMF_DUMPABLE_MASK;
return (ret >= 2) ? 2 : ret;
}
-int do_coredump(long signr, int exit_code, struct pt_regs * regs)
+int get_dumpable(struct mm_struct *mm)
+{
+ return __get_dumpable(mm->flags);
+}
+
+static void wait_for_dump_helpers(struct file *file)
{
+ struct pipe_inode_info *pipe;
+
+ pipe = file->f_path.dentry->d_inode->i_pipe;
+
+ pipe_lock(pipe);
+ pipe->readers++;
+ pipe->writers--;
+
+ while ((pipe->readers > 1) && (!signal_pending(current))) {
+ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ pipe_wait(pipe);
+ }
+
+ pipe->readers--;
+ pipe->writers++;
+ pipe_unlock(pipe);
+
+}
+
+
+void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+{
+ struct core_state core_state;
char corename[CORENAME_MAX_SIZE + 1];
struct mm_struct *mm = current->mm;
struct linux_binfmt * binfmt;
struct inode * inode;
- struct file * file;
+ const struct cred *old_cred;
+ struct cred *cred;
int retval = 0;
- int fsuid = current->fsuid;
int flag = 0;
int ispipe = 0;
- unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
char **helper_argv = NULL;
int helper_argc = 0;
- char *delimit;
+ int dump_count = 0;
+ static atomic_t core_dump_count = ATOMIC_INIT(0);
+ struct coredump_params cprm = {
+ .signr = signr,
+ .regs = regs,
+ .limit = rlimit(RLIMIT_CORE),
+ /*
+ * We must use the same mm->flags while dumping core to avoid
+ * inconsistency of bit flags, since this flag is not protected
+ * by any locks.
+ */
+ .mm_flags = mm->flags,
+ };
audit_core_dumps(signr);
- binfmt = current->binfmt;
+ binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
+
+ cred = prepare_creds();
+ if (!cred) {
+ retval = -ENOMEM;
+ goto fail;
+ }
+
down_write(&mm->mmap_sem);
/*
* If another thread got here first, or we are not dumpable, bail out.
*/
- if (mm->core_waiters || !get_dumpable(mm)) {
+ if (mm->core_state || !__get_dumpable(cprm.mm_flags)) {
up_write(&mm->mmap_sem);
+ put_cred(cred);
goto fail;
}
* process nor do we know its entire history. We only know it
* was tainted so we dump it as root in mode 2.
*/
- if (get_dumpable(mm) == 2) { /* Setuid core dump mode */
+ if (__get_dumpable(cprm.mm_flags) == 2) {
+ /* Setuid core dump mode */
flag = O_EXCL; /* Stop rewrite attacks */
- current->fsuid = 0; /* Dump root private */
+ cred->fsuid = 0; /* Dump root private */
}
- retval = coredump_wait(exit_code);
- if (retval < 0)
+ retval = coredump_wait(exit_code, &core_state);
+ if (retval < 0) {
+ put_cred(cred);
goto fail;
+ }
+
+ old_cred = override_creds(cred);
/*
* Clear any false indication of pending signals that might
* uses lock_kernel()
*/
lock_kernel();
- ispipe = format_corename(corename, core_pattern, signr);
+ ispipe = format_corename(corename, signr);
unlock_kernel();
- /*
- * Don't bother to check the RLIMIT_CORE value if core_pattern points
- * to a pipe. Since we're not writing directly to the filesystem
- * RLIMIT_CORE doesn't really apply, as no actual core file will be
- * created unless the pipe reader choses to write out the core file
- * at which point file size limits and permissions will be imposed
- * as it does with any other process
- */
- if ((!ispipe) && (core_limit < binfmt->min_coredump))
+
+ if ((!ispipe) && (cprm.limit < binfmt->min_coredump))
goto fail_unlock;
if (ispipe) {
- helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
- /* Terminate the string before the first option */
- delimit = strchr(corename, ' ');
- if (delimit)
- *delimit = '\0';
- delimit = strrchr(helper_argv[0], '/');
- if (delimit)
- delimit++;
- else
- delimit = helper_argv[0];
- if (!strcmp(delimit, current->comm)) {
- printk(KERN_NOTICE "Recursive core dump detected, "
- "aborting\n");
+ if (cprm.limit == 0) {
+ /*
+ * Normally core limits are irrelevant to pipes, since
+ * we're not writing to the file system, but we use
+ * cprm.limit of 0 here as a speacial value. Any
+ * non-zero limit gets set to RLIM_INFINITY below, but
+ * a limit of 0 skips the dump. This is a consistent
+ * way to catch recursive crashes. We can still crash
+ * if the core_pattern binary sets RLIM_CORE = !0
+ * but it runs as root, and can do lots of stupid things
+ * Note that we use task_tgid_vnr here to grab the pid
+ * of the process group leader. That way we get the
+ * right pid if a thread in a multi-threaded
+ * core_pattern process dies.
+ */
+ printk(KERN_WARNING
+ "Process %d(%s) has RLIMIT_CORE set to 0\n",
+ task_tgid_vnr(current), current->comm);
+ printk(KERN_WARNING "Aborting core\n");
goto fail_unlock;
}
- core_limit = RLIM_INFINITY;
+ dump_count = atomic_inc_return(&core_dump_count);
+ if (core_pipe_limit && (core_pipe_limit < dump_count)) {
+ printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
+ task_tgid_vnr(current), current->comm);
+ printk(KERN_WARNING "Skipping core dump\n");
+ goto fail_dropcount;
+ }
+
+ helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
+ if (!helper_argv) {
+ printk(KERN_WARNING "%s failed to allocate memory\n",
+ __func__);
+ goto fail_dropcount;
+ }
+
+ cprm.limit = RLIM_INFINITY;
/* SIGPIPE can happen, but it's just never processed */
- if (call_usermodehelper_pipe(corename+1, helper_argv, NULL,
- &file)) {
+ if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL,
+ &cprm.file)) {
printk(KERN_INFO "Core dump to %s pipe failed\n",
corename);
- goto fail_unlock;
+ goto fail_dropcount;
}
} else
- file = filp_open(corename,
+ cprm.file = filp_open(corename,
O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
0600);
- if (IS_ERR(file))
- goto fail_unlock;
- inode = file->f_path.dentry->d_inode;
+ if (IS_ERR(cprm.file))
+ goto fail_dropcount;
+ inode = cprm.file->f_path.dentry->d_inode;
if (inode->i_nlink > 1)
goto close_fail; /* multiple links - don't dump */
- if (!ispipe && d_unhashed(file->f_path.dentry))
+ if (!ispipe && d_unhashed(cprm.file->f_path.dentry))
goto close_fail;
/* AK: actually i see no reason to not allow this for named pipes etc.,
/*
* Dont allow local users get cute and trick others to coredump
* into their pre-created files:
+ * Note, this is not relevant for pipes
*/
- if (inode->i_uid != current->fsuid)
+ if (!ispipe && (inode->i_uid != current_fsuid()))
goto close_fail;
- if (!file->f_op)
+ if (!cprm.file->f_op)
goto close_fail;
- if (!file->f_op->write)
+ if (!cprm.file->f_op->write)
goto close_fail;
- if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
+ if (!ispipe &&
+ do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0)
goto close_fail;
- retval = binfmt->core_dump(signr, regs, file, core_limit);
+ retval = binfmt->core_dump(&cprm);
if (retval)
current->signal->group_exit_code |= 0x80;
close_fail:
- filp_close(file, NULL);
+ if (ispipe && core_pipe_limit)
+ wait_for_dump_helpers(cprm.file);
+ filp_close(cprm.file, NULL);
+fail_dropcount:
+ if (dump_count)
+ atomic_dec(&core_dump_count);
fail_unlock:
if (helper_argv)
argv_free(helper_argv);
- current->fsuid = fsuid;
- complete_all(&mm->core_done);
+ revert_creds(old_cred);
+ put_cred(cred);
+ coredump_finish(mm);
fail:
- return retval;
+ return;
}