X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fbinfmt_elf.c;h=535e763ab1a61e56ef5d25d725dc4f39eb899a49;hb=292ec4cf3536a5ed8809e8823341b203e497bbaf;hp=88d180306cf942a9f65672ab57b5aad8f46722e0;hpb=83914441f94c6f2cd468ca97365f6c34f418706e;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 88d1803..535e763 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -12,8 +12,6 @@ #include #include #include -#include -#include #include #include #include @@ -21,23 +19,19 @@ #include #include #include -#include -#include #include -#include #include #include #include #include -#include #include #include #include #include -#include #include #include #include +#include #include #include #include @@ -51,8 +45,8 @@ static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, * If we don't support core dumping, then supply a NULL so we * don't even try. */ -#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) -static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); +#ifdef CONFIG_ELF_CORE +static int elf_core_dump(struct coredump_params *cprm); #else #define elf_core_dump NULL #endif @@ -131,6 +125,15 @@ static int padzero(unsigned long elf_bss) #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; }) #endif +#ifndef ELF_BASE_PLATFORM +/* + * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture. + * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value + * will be copied to the user stack in the same manner as AT_PLATFORM. + */ +#define ELF_BASE_PLATFORM NULL +#endif + static int create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, unsigned long load_addr, unsigned long interp_load_addr) @@ -142,11 +145,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, elf_addr_t __user *envp; elf_addr_t __user *sp; elf_addr_t __user *u_platform; + elf_addr_t __user *u_base_platform; + elf_addr_t __user *u_rand_bytes; const char *k_platform = ELF_PLATFORM; + const char *k_base_platform = ELF_BASE_PLATFORM; + unsigned char k_rand_bytes[16]; int items; elf_addr_t *elf_info; int ei_index = 0; - struct task_struct *tsk = current; + const struct cred *cred = current_cred(); struct vm_area_struct *vma; /* @@ -172,6 +179,28 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, return -EFAULT; } + /* + * If this architecture has a "base" platform capability + * string, copy it to userspace. + */ + u_base_platform = NULL; + if (k_base_platform) { + size_t len = strlen(k_base_platform) + 1; + + u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); + if (__copy_to_user(u_base_platform, k_base_platform, len)) + return -EFAULT; + } + + /* + * Generate 16 random bytes for userspace PRNG seeding. + */ + get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); + u_rand_bytes = (elf_addr_t __user *) + STACK_ALLOC(p, sizeof(k_rand_bytes)); + if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) + return -EFAULT; + /* Create the ELF interpreter info */ elf_info = (elf_addr_t *)current->mm->saved_auxv; /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */ @@ -199,16 +228,21 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, NEW_AUX_ENT(AT_BASE, interp_load_addr); NEW_AUX_ENT(AT_FLAGS, 0); NEW_AUX_ENT(AT_ENTRY, exec->e_entry); - NEW_AUX_ENT(AT_UID, tsk->uid); - NEW_AUX_ENT(AT_EUID, tsk->euid); - NEW_AUX_ENT(AT_GID, tsk->gid); - NEW_AUX_ENT(AT_EGID, tsk->egid); + NEW_AUX_ENT(AT_UID, cred->uid); + NEW_AUX_ENT(AT_EUID, cred->euid); + NEW_AUX_ENT(AT_GID, cred->gid); + NEW_AUX_ENT(AT_EGID, cred->egid); NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm)); + NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes); NEW_AUX_ENT(AT_EXECFN, bprm->exec); if (k_platform) { NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform); } + if (k_base_platform) { + NEW_AUX_ENT(AT_BASE_PLATFORM, + (elf_addr_t)(unsigned long)u_base_platform); + } if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { NEW_AUX_ENT(AT_EXECFD, bprm->interp_data); } @@ -468,22 +502,22 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, } } - /* - * Now fill out the bss section. First pad the last page up - * to the page boundary, and then perform a mmap to make sure - * that there are zero-mapped pages up to and including the - * last bss page. - */ - if (padzero(elf_bss)) { - error = -EFAULT; - goto out_close; - } + if (last_bss > elf_bss) { + /* + * Now fill out the bss section. First pad the last page up + * to the page boundary, and then perform a mmap to make sure + * that there are zero-mapped pages up to and including the + * last bss page. + */ + if (padzero(elf_bss)) { + error = -EFAULT; + goto out_close; + } - /* What we have mapped so far */ - elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); + /* What we have mapped so far */ + elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); - /* Map the last of the bss segment */ - if (last_bss > elf_bss) { + /* Map the last of the bss segment */ down_write(¤t->mm->mmap_sem); error = do_brk(elf_bss, last_bss - elf_bss); up_write(¤t->mm->mmap_sem); @@ -536,7 +570,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) unsigned long error; struct elf_phdr *elf_ppnt, *elf_phdata; unsigned long elf_bss, elf_brk; - int elf_exec_fileno; int retval, i; unsigned int size; unsigned long elf_entry; @@ -591,12 +624,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) goto out_free_ph; } - retval = get_unused_fd(); - if (retval < 0) - goto out_free_ph; - get_file(bprm->file); - fd_install(elf_exec_fileno = retval, bprm->file); - elf_ppnt = elf_phdata; elf_bss = 0; elf_brk = 0; @@ -615,13 +642,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) retval = -ENOEXEC; if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) - goto out_free_file; + goto out_free_ph; retval = -ENOMEM; elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); if (!elf_interpreter) - goto out_free_file; + goto out_free_ph; retval = kernel_read(bprm->file, elf_ppnt->p_offset, elf_interpreter, @@ -636,27 +663,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') goto out_free_interp; - /* - * The early SET_PERSONALITY here is so that the lookup - * for the interpreter happens in the namespace of the - * to-be-execed image. SET_PERSONALITY can select an - * alternate root. - * - * However, SET_PERSONALITY is NOT allowed to switch - * this task into the new images's memory mapping - * policy - that is, TASK_SIZE must still evaluate to - * that which is appropriate to the execing application. - * This is because exit_mmap() needs to have TASK_SIZE - * evaluate to the size of the old image. - * - * So if (say) a 64-bit application is execing a 32-bit - * application it is the architecture's responsibility - * to defer changing the value of TASK_SIZE until the - * switch really is going to happen - do this in - * flush_thread(). - akpm - */ - SET_PERSONALITY(loc->elf_ex, 0); - interpreter = open_exec(elf_interpreter); retval = PTR_ERR(interpreter); if (IS_ERR(interpreter)) @@ -704,9 +710,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* Verify the interpreter has a valid arch */ if (!elf_check_arch(&loc->interp_elf_ex)) goto out_free_dentry; - } else { - /* Executables without an interpreter also need a personality */ - SET_PERSONALITY(loc->elf_ex, 0); } /* Flush all traces of the currently running executable */ @@ -720,13 +723,14 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* Do this immediately, since STACK_TOP as used in setup_arg_pages may depend on the personality. */ - SET_PERSONALITY(loc->elf_ex, 0); + SET_PERSONALITY(loc->elf_ex); if (elf_read_implies_exec(loc->elf_ex, executable_stack)) current->personality |= READ_IMPLIES_EXEC; if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current->flags |= PF_RANDOMIZE; - arch_pick_mmap_layout(current->mm); + + setup_new_exec(bprm); /* Do this so that we can load the interpreter, if need be. We will change some of these later */ @@ -741,7 +745,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) current->mm->start_stack = bprm->p; - /* Now we do a little grungy work by mmaping the ELF image into + /* Now we do a little grungy work by mmapping the ELF image into the correct location in memory. */ for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { @@ -916,19 +920,17 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) kfree(elf_phdata); - sys_close(elf_exec_fileno); - set_binfmt(&elf_format); #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES - retval = arch_setup_additional_pages(bprm, executable_stack); + retval = arch_setup_additional_pages(bprm, !!elf_interpreter); if (retval < 0) { send_sig(SIGKILL, current, 0); goto out; } #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ - compute_creds(bprm); + install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; retval = create_elf_tables(bprm, &loc->elf_ex, load_addr, interp_load_addr); @@ -975,12 +977,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) #endif start_thread(regs, elf_entry, bprm->p); - if (unlikely(current->ptrace & PT_PTRACED)) { - if (current->ptrace & PT_TRACE_EXEC) - ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP); - else - send_sig(SIGTRAP, current, 0); - } retval = 0; out: kfree(loc); @@ -994,8 +990,6 @@ out_free_dentry: fput(interpreter); out_free_interp: kfree(elf_interpreter); -out_free_file: - sys_close(elf_exec_fileno); out_free_ph: kfree(elf_phdata); goto out; @@ -1085,48 +1079,13 @@ out: return error; } -/* - * Note that some platforms still use traditional core dumps and not - * the ELF core dump. Each platform can select it as appropriate. - */ -#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) - +#ifdef CONFIG_ELF_CORE /* * ELF core dumper * * Modelled on fs/exec.c:aout_core_dump() * Jeremy Fitzhardinge */ -/* - * These are the only things you should do on a core-file: use only these - * functions to write out all the necessary info. - */ -static int dump_write(struct file *file, const void *addr, int nr) -{ - return file->f_op->write(file, addr, nr, &file->f_pos) == nr; -} - -static int dump_seek(struct file *file, loff_t off) -{ - if (file->f_op->llseek && file->f_op->llseek != no_llseek) { - if (file->f_op->llseek(file, off, SEEK_CUR) < 0) - return 0; - } else { - char *buf = (char *)get_zeroed_page(GFP_KERNEL); - if (!buf) - return 0; - while (off > 0) { - unsigned long n = off; - if (n > PAGE_SIZE) - n = PAGE_SIZE; - if (!dump_write(file, buf, n)) - return 0; - off -= n; - } - free_page((unsigned long)buf); - } - return 1; -} /* * Decide what to dump of a segment, part, all or none. @@ -1134,16 +1093,24 @@ static int dump_seek(struct file *file, loff_t off) static unsigned long vma_dump_size(struct vm_area_struct *vma, unsigned long mm_flags) { +#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) + /* The vma can be set up to tell us the answer directly. */ if (vma->vm_flags & VM_ALWAYSDUMP) goto whole; + /* Hugetlb memory check */ + if (vma->vm_flags & VM_HUGETLB) { + if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) + goto whole; + if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) + goto whole; + } + /* Do not dump I/O mapped devices or special mappings */ if (vma->vm_flags & (VM_IO | VM_RESERVED)) return 0; -#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) - /* By default, dump shared memory if mapped from an anonymous file. */ if (vma->vm_flags & VM_SHARED) { if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ? @@ -1166,9 +1133,11 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, * check for an ELF header. If we find one, dump the first page to * aid in determining what was mapped here. */ - if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) { + if (FILTER(ELF_HEADERS) && + vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { u32 __user *header = (u32 __user *) vma->vm_start; u32 word; + mm_segment_t fs = get_fs(); /* * Doing it this way gets the constant folded by GCC. */ @@ -1181,7 +1150,15 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, magic.elfmag[EI_MAG1] = ELFMAG1; magic.elfmag[EI_MAG2] = ELFMAG2; magic.elfmag[EI_MAG3] = ELFMAG3; - if (get_user(word, header) == 0 && word == magic.cmp) + /* + * Switch to the user "segment" for get_user(), + * then put back what elf_core_dump() had in place. + */ + set_fs(USER_DS); + if (unlikely(get_user(word, header))) + word = 0; + set_fs(fs); + if (word == magic.cmp) return PAGE_SIZE; } @@ -1243,13 +1220,6 @@ static int writenote(struct memelfnote *men, struct file *file, } #undef DUMP_WRITE -#define DUMP_WRITE(addr, nr) \ - if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ - goto end_coredump; -#define DUMP_SEEK(off) \ - if (!dump_seek(file, (off))) \ - goto end_coredump; - static void fill_elf_header(struct elfhdr *elf, int segs, u16 machine, u32 flags, u8 osabi) { @@ -1306,25 +1276,22 @@ static void fill_prstatus(struct elf_prstatus *prstatus, prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; prstatus->pr_sigpend = p->pending.signal.sig[0]; prstatus->pr_sighold = p->blocked.sig[0]; + rcu_read_lock(); + prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); + rcu_read_unlock(); prstatus->pr_pid = task_pid_vnr(p); - prstatus->pr_ppid = task_pid_vnr(p->real_parent); prstatus->pr_pgrp = task_pgrp_vnr(p); prstatus->pr_sid = task_session_vnr(p); if (thread_group_leader(p)) { + struct task_cputime cputime; + /* - * This is the record for the group leader. Add in the - * cumulative times of previous dead threads. This total - * won't include the time of each live thread whose state - * is included in the core dump. The final total reported - * to our parent process when it calls wait4 will include - * those sums as well as the little bit more time it takes - * this and each other thread to finish dying after the - * core dump synchronization phase. + * This is the record for the group leader. It shows the + * group-wide total, not its individual thread total. */ - cputime_to_timeval(cputime_add(p->utime, p->signal->utime), - &prstatus->pr_utime); - cputime_to_timeval(cputime_add(p->stime, p->signal->stime), - &prstatus->pr_stime); + thread_group_cputime(p, &cputime); + cputime_to_timeval(cputime.utime, &prstatus->pr_utime); + cputime_to_timeval(cputime.stime, &prstatus->pr_stime); } else { cputime_to_timeval(p->utime, &prstatus->pr_utime); cputime_to_timeval(p->stime, &prstatus->pr_stime); @@ -1336,6 +1303,7 @@ static void fill_prstatus(struct elf_prstatus *prstatus, static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, struct mm_struct *mm) { + const struct cred *cred; unsigned int i, len; /* first copy the parameters from user space */ @@ -1352,8 +1320,10 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, psinfo->pr_psargs[i] = ' '; psinfo->pr_psargs[len] = 0; + rcu_read_lock(); + psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); + rcu_read_unlock(); psinfo->pr_pid = task_pid_vnr(p); - psinfo->pr_ppid = task_pid_vnr(p->real_parent); psinfo->pr_pgrp = task_pgrp_vnr(p); psinfo->pr_sid = task_session_vnr(p); @@ -1363,8 +1333,11 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, psinfo->pr_zomb = psinfo->pr_sname == 'Z'; psinfo->pr_nice = task_nice(p); psinfo->pr_flag = p->flags; - SET_UID(psinfo->pr_uid, p->uid); - SET_GID(psinfo->pr_gid, p->gid); + rcu_read_lock(); + cred = __task_cred(p); + SET_UID(psinfo->pr_uid, cred->uid); + SET_GID(psinfo->pr_gid, cred->gid); + rcu_read_unlock(); strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); return 0; @@ -1485,11 +1458,11 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, info->thread = NULL; psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); - fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); - if (psinfo == NULL) return 0; + fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); + /* * Figure out how many notes we're going to need for each thread. */ @@ -1677,42 +1650,52 @@ struct elf_note_info { int numnote; }; -static int fill_note_info(struct elfhdr *elf, int phdrs, - struct elf_note_info *info, - long signr, struct pt_regs *regs) +static int elf_note_info_init(struct elf_note_info *info) { -#define NUM_NOTES 6 - struct list_head *t; - - info->notes = NULL; - info->prstatus = NULL; - info->psinfo = NULL; - info->fpu = NULL; -#ifdef ELF_CORE_COPY_XFPREGS - info->xfpu = NULL; -#endif + memset(info, 0, sizeof(*info)); INIT_LIST_HEAD(&info->thread_list); - info->notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), - GFP_KERNEL); + /* Allocate space for six ELF notes */ + info->notes = kmalloc(6 * sizeof(struct memelfnote), GFP_KERNEL); if (!info->notes) return 0; info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL); if (!info->psinfo) - return 0; + goto notes_free; info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL); if (!info->prstatus) - return 0; + goto psinfo_free; info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL); if (!info->fpu) - return 0; + goto prstatus_free; #ifdef ELF_CORE_COPY_XFPREGS info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL); if (!info->xfpu) - return 0; + goto fpu_free; #endif + return 1; +#ifdef ELF_CORE_COPY_XFPREGS + fpu_free: + kfree(info->fpu); +#endif + prstatus_free: + kfree(info->prstatus); + psinfo_free: + kfree(info->psinfo); + notes_free: + kfree(info->notes); + return 0; +} + +static int fill_note_info(struct elfhdr *elf, int phdrs, + struct elf_note_info *info, + long signr, struct pt_regs *regs) +{ + struct list_head *t; + + if (!elf_note_info_init(info)) + return 0; - info->thread_status_size = 0; if (signr) { struct core_thread *ct; struct elf_thread_status *ets; @@ -1772,8 +1755,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, #endif return 1; - -#undef NUM_NOTES } static size_t get_note_info_size(struct elf_note_info *info) @@ -1857,6 +1838,34 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, return gate_vma; } +static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, + elf_addr_t e_shoff, int segs) +{ + elf->e_shoff = e_shoff; + elf->e_shentsize = sizeof(*shdr4extnum); + elf->e_shnum = 1; + elf->e_shstrndx = SHN_UNDEF; + + memset(shdr4extnum, 0, sizeof(*shdr4extnum)); + + shdr4extnum->sh_type = SHT_NULL; + shdr4extnum->sh_size = elf->e_shnum; + shdr4extnum->sh_link = elf->e_shstrndx; + shdr4extnum->sh_info = segs; +} + +static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma, + unsigned long mm_flags) +{ + struct vm_area_struct *vma; + size_t size = 0; + + for (vma = first_vma(current, gate_vma); vma != NULL; + vma = next_vma(vma, gate_vma)) + size += vma_dump_size(vma, mm_flags); + return size; +} + /* * Actual dumper * @@ -1864,7 +1873,7 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, * and then they are actually written out. If we run out of core limit * we just truncate. */ -static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit) +static int elf_core_dump(struct coredump_params *cprm) { int has_dumped = 0; mm_segment_t fs; @@ -1873,8 +1882,11 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un struct vm_area_struct *vma, *gate_vma; struct elfhdr *elf = NULL; loff_t offset = 0, dataoff, foffset; - unsigned long mm_flags; struct elf_note_info info; + struct elf_phdr *phdr4note = NULL; + struct elf_shdr *shdr4extnum = NULL; + Elf_Half e_phnum; + elf_addr_t e_shoff; /* * We no longer stop all VM operations. @@ -1892,22 +1904,30 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un elf = kmalloc(sizeof(*elf), GFP_KERNEL); if (!elf) goto out; - + /* + * The number of segs are recored into ELF header as 16bit value. + * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. + */ segs = current->mm->map_count; -#ifdef ELF_CORE_EXTRA_PHDRS - segs += ELF_CORE_EXTRA_PHDRS; -#endif + segs += elf_core_extra_phdrs(); gate_vma = get_gate_vma(current); if (gate_vma != NULL) segs++; + /* for notes section */ + segs++; + + /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid + * this, kernel supports extended numbering. Have a look at + * include/linux/elf.h for further information. */ + e_phnum = segs > PN_XNUM ? PN_XNUM : segs; + /* * Collect all the non-memory information about the process for the * notes. This also sets up the file header. */ - if (!fill_note_info(elf, segs + 1, /* including notes section */ - &info, signr, regs)) + if (!fill_note_info(elf, e_phnum, &info, cprm->signr, cprm->regs)) goto cleanup; has_dumped = 1; @@ -1916,31 +1936,47 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un fs = get_fs(); set_fs(KERNEL_DS); - DUMP_WRITE(elf, sizeof(*elf)); offset += sizeof(*elf); /* Elf header */ - offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */ + offset += segs * sizeof(struct elf_phdr); /* Program headers */ foffset = offset; /* Write notes phdr entry */ { - struct elf_phdr phdr; size_t sz = get_note_info_size(&info); sz += elf_coredump_extra_notes_size(); - fill_elf_note_phdr(&phdr, sz, offset); + phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL); + if (!phdr4note) + goto end_coredump; + + fill_elf_note_phdr(phdr4note, sz, offset); offset += sz; - DUMP_WRITE(&phdr, sizeof(phdr)); } dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); - /* - * We must use the same mm->flags while dumping core to avoid - * inconsistency between the program headers and bodies, otherwise an - * unusable core file can be generated. - */ - mm_flags = current->mm->flags; + offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags); + offset += elf_core_extra_data_size(); + e_shoff = offset; + + if (e_phnum == PN_XNUM) { + shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL); + if (!shdr4extnum) + goto end_coredump; + fill_extnum_info(elf, shdr4extnum, e_shoff, segs); + } + + offset = dataoff; + + size += sizeof(*elf); + if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf))) + goto end_coredump; + + size += sizeof(*phdr4note); + if (size > cprm->limit + || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note))) + goto end_coredump; /* Write program headers for segments dump */ for (vma = first_vma(current, gate_vma); vma != NULL; @@ -1951,7 +1987,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un phdr.p_offset = offset; phdr.p_vaddr = vma->vm_start; phdr.p_paddr = 0; - phdr.p_filesz = vma_dump_size(vma, mm_flags); + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags); phdr.p_memsz = vma->vm_end - vma->vm_start; offset += phdr.p_filesz; phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; @@ -1961,77 +1997,76 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un phdr.p_flags |= PF_X; phdr.p_align = ELF_EXEC_PAGESIZE; - DUMP_WRITE(&phdr, sizeof(phdr)); + size += sizeof(phdr); + if (size > cprm->limit + || !dump_write(cprm->file, &phdr, sizeof(phdr))) + goto end_coredump; } -#ifdef ELF_CORE_WRITE_EXTRA_PHDRS - ELF_CORE_WRITE_EXTRA_PHDRS; -#endif + if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit)) + goto end_coredump; /* write out the notes section */ - if (!write_note_info(&info, file, &foffset)) + if (!write_note_info(&info, cprm->file, &foffset)) goto end_coredump; - if (elf_coredump_extra_notes_write(file, &foffset)) + if (elf_coredump_extra_notes_write(cprm->file, &foffset)) goto end_coredump; /* Align to page */ - DUMP_SEEK(dataoff - foffset); + if (!dump_seek(cprm->file, dataoff - foffset)) + goto end_coredump; for (vma = first_vma(current, gate_vma); vma != NULL; vma = next_vma(vma, gate_vma)) { unsigned long addr; unsigned long end; - end = vma->vm_start + vma_dump_size(vma, mm_flags); + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags); for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { struct page *page; - struct vm_area_struct *tmp_vma; - - if (get_user_pages(current, current->mm, addr, 1, 0, 1, - &page, &tmp_vma) <= 0) { - DUMP_SEEK(PAGE_SIZE); - } else { - if (page == ZERO_PAGE(0)) { - if (!dump_seek(file, PAGE_SIZE)) { - page_cache_release(page); - goto end_coredump; - } - } else { - void *kaddr; - flush_cache_page(tmp_vma, addr, - page_to_pfn(page)); - kaddr = kmap(page); - if ((size += PAGE_SIZE) > limit || - !dump_write(file, kaddr, - PAGE_SIZE)) { - kunmap(page); - page_cache_release(page); - goto end_coredump; - } - kunmap(page); - } + int stop; + + page = get_dump_page(addr); + if (page) { + void *kaddr = kmap(page); + stop = ((size += PAGE_SIZE) > cprm->limit) || + !dump_write(cprm->file, kaddr, + PAGE_SIZE); + kunmap(page); page_cache_release(page); - } + } else + stop = !dump_seek(cprm->file, PAGE_SIZE); + if (stop) + goto end_coredump; } } -#ifdef ELF_CORE_WRITE_EXTRA_DATA - ELF_CORE_WRITE_EXTRA_DATA; -#endif + if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit)) + goto end_coredump; + + if (e_phnum == PN_XNUM) { + size += sizeof(*shdr4extnum); + if (size > cprm->limit + || !dump_write(cprm->file, shdr4extnum, + sizeof(*shdr4extnum))) + goto end_coredump; + } end_coredump: set_fs(fs); cleanup: free_note_info(&info); + kfree(shdr4extnum); + kfree(phdr4note); kfree(elf); out: return has_dumped; } -#endif /* USE_ELF_CORE_DUMP */ +#endif /* CONFIG_ELF_CORE */ static int __init init_elf_binfmt(void) {