2 * fs/proc/kcore.c kernel ELF core dumper
4 * Modelled on fs/exec.c:aout_core_dump()
5 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
6 * ELF version written by David Howells <David.Howells@nexor.co.uk>
7 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
8 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
9 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
13 #include <linux/proc_fs.h>
14 #include <linux/user.h>
15 #include <linux/capability.h>
16 #include <linux/elf.h>
17 #include <linux/elfcore.h>
18 #include <linux/vmalloc.h>
19 #include <linux/highmem.h>
20 #include <linux/bootmem.h>
21 #include <linux/init.h>
22 #include <asm/uaccess.h>
24 #include <linux/list.h>
25 #include <linux/ioport.h>
27 #include <linux/memory.h>
28 #include <asm/sections.h>
30 #define CORE_STR "CORE"
32 #ifndef ELF_CORE_EFLAGS
33 #define ELF_CORE_EFLAGS 0
36 static struct proc_dir_entry *proc_root_kcore;
39 #ifndef kc_vaddr_to_offset
40 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
42 #ifndef kc_offset_to_vaddr
43 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
46 /* An ELF note in memory */
55 static LIST_HEAD(kclist_head);
56 static DEFINE_RWLOCK(kclist_lock);
57 static int kcore_need_update = 1;
60 kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
62 new->addr = (unsigned long)addr;
66 write_lock(&kclist_lock);
67 list_add_tail(&new->list, &kclist_head);
68 write_unlock(&kclist_lock);
71 static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
76 *nphdr = 1; /* PT_NOTE */
79 list_for_each_entry(m, &kclist_head, list) {
80 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
85 *elf_buflen = sizeof(struct elfhdr) +
86 (*nphdr + 2)*sizeof(struct elf_phdr) +
87 3 * ((sizeof(struct elf_note)) +
88 roundup(sizeof(CORE_STR), 4)) +
89 roundup(sizeof(struct elf_prstatus), 4) +
90 roundup(sizeof(struct elf_prpsinfo), 4) +
91 roundup(sizeof(struct task_struct), 4);
92 *elf_buflen = PAGE_ALIGN(*elf_buflen);
93 return size + *elf_buflen;
96 static void free_kclist_ents(struct list_head *head)
98 struct kcore_list *tmp, *pos;
100 list_for_each_entry_safe(pos, tmp, head, list) {
101 list_del(&pos->list);
106 * Replace all KCORE_RAM information with passed list.
108 static void __kcore_update_ram(struct list_head *list)
110 struct kcore_list *tmp, *pos;
113 write_lock(&kclist_lock);
114 if (kcore_need_update) {
115 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
116 if (pos->type == KCORE_RAM)
117 list_move(&pos->list, &garbage);
119 list_splice_tail(list, &kclist_head);
121 list_splice(list, &garbage);
122 kcore_need_update = 0;
123 write_unlock(&kclist_lock);
125 free_kclist_ents(&garbage);
129 #ifdef CONFIG_HIGHMEM
131 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
132 * because memory hole is not as big as !HIGHMEM case.
133 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
135 static int kcore_update_ram(void)
138 struct kcore_list *ent;
141 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
144 ent->addr = (unsigned long)__va(0);
145 ent->size = max_low_pfn << PAGE_SHIFT;
146 ent->type = KCORE_RAM;
147 list_add(&ent->list, &head);
148 __kcore_update_ram(&head);
152 #else /* !CONFIG_HIGHMEM */
155 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
157 struct list_head *head = (struct list_head *)arg;
158 struct kcore_list *ent;
160 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
163 ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
164 ent->size = nr_pages << PAGE_SHIFT;
166 /* Sanity check: Can happen in 32bit arch...maybe */
167 if (ent->addr < (unsigned long) __va(0))
170 /* cut not-mapped area. ....from ppc-32 code. */
171 if (ULONG_MAX - ent->addr < ent->size)
172 ent->size = ULONG_MAX - ent->addr;
174 /* cut when vmalloc() area is higher than direct-map area */
175 if (VMALLOC_START > (unsigned long)__va(0)) {
176 if (ent->addr > VMALLOC_START)
178 if (VMALLOC_START - ent->addr < ent->size)
179 ent->size = VMALLOC_START - ent->addr;
182 ent->type = KCORE_RAM;
183 list_add_tail(&ent->list, head);
190 static int kcore_update_ram(void)
193 unsigned long end_pfn;
196 /* Not inialized....update now */
197 /* find out "max pfn" */
199 for_each_node_state(nid, N_HIGH_MEMORY) {
200 unsigned long node_end;
201 node_end = NODE_DATA(nid)->node_start_pfn +
202 NODE_DATA(nid)->node_spanned_pages;
203 if (end_pfn < node_end)
206 /* scan 0 to max_pfn */
207 ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
209 free_kclist_ents(&head);
212 __kcore_update_ram(&head);
215 #endif /* CONFIG_HIGHMEM */
217 /*****************************************************************************/
219 * determine size of ELF note
221 static int notesize(struct memelfnote *en)
225 sz = sizeof(struct elf_note);
226 sz += roundup((strlen(en->name) + 1), 4);
227 sz += roundup(en->datasz, 4);
230 } /* end notesize() */
232 /*****************************************************************************/
234 * store a note in the header buffer
236 static char *storenote(struct memelfnote *men, char *bufp)
240 #define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
242 en.n_namesz = strlen(men->name) + 1;
243 en.n_descsz = men->datasz;
244 en.n_type = men->type;
246 DUMP_WRITE(&en, sizeof(en));
247 DUMP_WRITE(men->name, en.n_namesz);
249 /* XXX - cast from long long to long to avoid need for libgcc.a */
250 bufp = (char*) roundup((unsigned long)bufp,4);
251 DUMP_WRITE(men->data, men->datasz);
252 bufp = (char*) roundup((unsigned long)bufp,4);
257 } /* end storenote() */
260 * store an ELF coredump header in the supplied buffer
261 * nphdr is the number of elf_phdr to insert
263 static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
265 struct elf_prstatus prstatus; /* NT_PRSTATUS */
266 struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
267 struct elf_phdr *nhdr, *phdr;
269 struct memelfnote notes[3];
271 struct kcore_list *m;
273 /* setup ELF header */
274 elf = (struct elfhdr *) bufp;
275 bufp += sizeof(struct elfhdr);
276 offset += sizeof(struct elfhdr);
277 memcpy(elf->e_ident, ELFMAG, SELFMAG);
278 elf->e_ident[EI_CLASS] = ELF_CLASS;
279 elf->e_ident[EI_DATA] = ELF_DATA;
280 elf->e_ident[EI_VERSION]= EV_CURRENT;
281 elf->e_ident[EI_OSABI] = ELF_OSABI;
282 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
283 elf->e_type = ET_CORE;
284 elf->e_machine = ELF_ARCH;
285 elf->e_version = EV_CURRENT;
287 elf->e_phoff = sizeof(struct elfhdr);
289 elf->e_flags = ELF_CORE_EFLAGS;
290 elf->e_ehsize = sizeof(struct elfhdr);
291 elf->e_phentsize= sizeof(struct elf_phdr);
292 elf->e_phnum = nphdr;
297 /* setup ELF PT_NOTE program header */
298 nhdr = (struct elf_phdr *) bufp;
299 bufp += sizeof(struct elf_phdr);
300 offset += sizeof(struct elf_phdr);
301 nhdr->p_type = PT_NOTE;
310 /* setup ELF PT_LOAD program header for every area */
311 list_for_each_entry(m, &kclist_head, list) {
312 phdr = (struct elf_phdr *) bufp;
313 bufp += sizeof(struct elf_phdr);
314 offset += sizeof(struct elf_phdr);
316 phdr->p_type = PT_LOAD;
317 phdr->p_flags = PF_R|PF_W|PF_X;
318 phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
319 phdr->p_vaddr = (size_t)m->addr;
321 phdr->p_filesz = phdr->p_memsz = m->size;
322 phdr->p_align = PAGE_SIZE;
326 * Set up the notes in similar form to SVR4 core dumps made
327 * with info from their /proc.
329 nhdr->p_offset = offset;
331 /* set up the process status */
332 notes[0].name = CORE_STR;
333 notes[0].type = NT_PRSTATUS;
334 notes[0].datasz = sizeof(struct elf_prstatus);
335 notes[0].data = &prstatus;
337 memset(&prstatus, 0, sizeof(struct elf_prstatus));
339 nhdr->p_filesz = notesize(¬es[0]);
340 bufp = storenote(¬es[0], bufp);
342 /* set up the process info */
343 notes[1].name = CORE_STR;
344 notes[1].type = NT_PRPSINFO;
345 notes[1].datasz = sizeof(struct elf_prpsinfo);
346 notes[1].data = &prpsinfo;
348 memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
349 prpsinfo.pr_state = 0;
350 prpsinfo.pr_sname = 'R';
351 prpsinfo.pr_zomb = 0;
353 strcpy(prpsinfo.pr_fname, "vmlinux");
354 strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
356 nhdr->p_filesz += notesize(¬es[1]);
357 bufp = storenote(¬es[1], bufp);
359 /* set up the task structure */
360 notes[2].name = CORE_STR;
361 notes[2].type = NT_TASKSTRUCT;
362 notes[2].datasz = sizeof(struct task_struct);
363 notes[2].data = current;
365 nhdr->p_filesz += notesize(¬es[2]);
366 bufp = storenote(¬es[2], bufp);
368 } /* end elf_kcore_store_hdr() */
370 /*****************************************************************************/
372 * read from the ELF header and then kernel memory
375 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
383 read_lock(&kclist_lock);
384 proc_root_kcore->size = size = get_kcore_size(&nphdr, &elf_buflen);
385 if (buflen == 0 || *fpos >= size) {
386 read_unlock(&kclist_lock);
390 /* trim buflen to not go beyond EOF */
391 if (buflen > size - *fpos)
392 buflen = size - *fpos;
394 /* construct an ELF core header if we'll need some of it */
395 if (*fpos < elf_buflen) {
398 tsz = elf_buflen - *fpos;
401 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
403 read_unlock(&kclist_lock);
406 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
407 read_unlock(&kclist_lock);
408 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
418 /* leave now if filled buffer already */
422 read_unlock(&kclist_lock);
425 * Check to see if our file offset matches with any of
426 * the addresses in the elf_phdr on our list.
428 start = kc_offset_to_vaddr(*fpos - elf_buflen);
429 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
433 struct kcore_list *m;
435 read_lock(&kclist_lock);
436 list_for_each_entry(m, &kclist_head, list) {
437 if (start >= m->addr && start < (m->addr+m->size))
440 read_unlock(&kclist_lock);
443 if (clear_user(buffer, tsz))
445 } else if (is_vmalloc_addr((void *)start)) {
448 elf_buf = kzalloc(tsz, GFP_KERNEL);
451 vread(elf_buf, (char *)start, tsz);
452 /* we have to zero-fill user buffer even if no read */
453 if (copy_to_user(buffer, elf_buf, tsz)) {
459 if (kern_addr_valid(start)) {
462 n = copy_to_user(buffer, (char *)start, tsz);
464 * We cannot distingush between fault on source
465 * and fault on destination. When this happens
466 * we clear too and hope it will trigger the
470 if (clear_user(buffer + tsz - n,
475 if (clear_user(buffer, tsz))
484 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
491 static int open_kcore(struct inode *inode, struct file *filp)
493 if (!capable(CAP_SYS_RAWIO))
495 if (kcore_need_update)
501 static const struct file_operations proc_kcore_operations = {
506 #ifdef CONFIG_MEMORY_HOTPLUG
507 /* just remember that we have to update kcore */
508 static int __meminit kcore_callback(struct notifier_block *self,
509 unsigned long action, void *arg)
514 write_lock(&kclist_lock);
515 kcore_need_update = 1;
516 write_unlock(&kclist_lock);
523 static struct kcore_list kcore_vmalloc;
525 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
526 static struct kcore_list kcore_text;
528 * If defined, special segment is used for mapping kernel text instead of
529 * direct-map area. We need to create special TEXT section.
531 static void __init proc_kcore_text_init(void)
533 kclist_add(&kcore_text, _stext, _end - _stext, KCORE_TEXT);
536 static void __init proc_kcore_text_init(void)
541 static int __init proc_kcore_init(void)
543 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
544 &proc_kcore_operations);
545 /* Store text area if it's special */
546 proc_kcore_text_init();
547 /* Store vmalloc area */
548 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
549 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
550 /* Store direct-map area from physical memory map */
552 hotplug_memory_notifier(kcore_callback, 0);
553 /* Other special area, area-for-module etc is arch specific. */
557 module_init(proc_kcore_init);