kcore: more fixes for init
[safe/jmp/linux-2.6] / fs / proc / kcore.c
1 /*
2  *      fs/proc/kcore.c kernel ELF core dumper
3  *
4  *      Modelled on fs/exec.c:aout_core_dump()
5  *      Jeremy Fitzhardinge <jeremy@sw.oz.au>
6  *      ELF version written by David Howells <David.Howells@nexor.co.uk>
7  *      Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
8  *      Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
9  *      Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/proc_fs.h>
14 #include <linux/user.h>
15 #include <linux/capability.h>
16 #include <linux/elf.h>
17 #include <linux/elfcore.h>
18 #include <linux/vmalloc.h>
19 #include <linux/highmem.h>
20 #include <linux/bootmem.h>
21 #include <linux/init.h>
22 #include <asm/uaccess.h>
23 #include <asm/io.h>
24 #include <linux/list.h>
25 #include <linux/ioport.h>
26 #include <linux/mm.h>
27 #include <linux/memory.h>
28 #include <asm/sections.h>
29
30 #define CORE_STR "CORE"
31
32 #ifndef ELF_CORE_EFLAGS
33 #define ELF_CORE_EFLAGS 0
34 #endif
35
36 static struct proc_dir_entry *proc_root_kcore;
37
38
39 #ifndef kc_vaddr_to_offset
40 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
41 #endif
42 #ifndef kc_offset_to_vaddr
43 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
44 #endif
45
46 /* An ELF note in memory */
47 struct memelfnote
48 {
49         const char *name;
50         int type;
51         unsigned int datasz;
52         void *data;
53 };
54
55 static LIST_HEAD(kclist_head);
56 static DEFINE_RWLOCK(kclist_lock);
57 static int kcore_need_update = 1;
58
59 void
60 kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
61 {
62         new->addr = (unsigned long)addr;
63         new->size = size;
64         new->type = type;
65
66         write_lock(&kclist_lock);
67         list_add_tail(&new->list, &kclist_head);
68         write_unlock(&kclist_lock);
69 }
70
71 static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
72 {
73         size_t try, size;
74         struct kcore_list *m;
75
76         *nphdr = 1; /* PT_NOTE */
77         size = 0;
78
79         list_for_each_entry(m, &kclist_head, list) {
80                 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
81                 if (try > size)
82                         size = try;
83                 *nphdr = *nphdr + 1;
84         }
85         *elf_buflen =   sizeof(struct elfhdr) + 
86                         (*nphdr + 2)*sizeof(struct elf_phdr) + 
87                         3 * ((sizeof(struct elf_note)) +
88                              roundup(sizeof(CORE_STR), 4)) +
89                         roundup(sizeof(struct elf_prstatus), 4) +
90                         roundup(sizeof(struct elf_prpsinfo), 4) +
91                         roundup(sizeof(struct task_struct), 4);
92         *elf_buflen = PAGE_ALIGN(*elf_buflen);
93         return size + *elf_buflen;
94 }
95
96 static void free_kclist_ents(struct list_head *head)
97 {
98         struct kcore_list *tmp, *pos;
99
100         list_for_each_entry_safe(pos, tmp, head, list) {
101                 list_del(&pos->list);
102                 kfree(pos);
103         }
104 }
105 /*
106  * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
107  */
108 static void __kcore_update_ram(struct list_head *list)
109 {
110         struct kcore_list *tmp, *pos;
111         LIST_HEAD(garbage);
112
113         write_lock(&kclist_lock);
114         if (kcore_need_update) {
115                 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
116                         if (pos->type == KCORE_RAM
117                                 || pos->type == KCORE_VMEMMAP)
118                                 list_move(&pos->list, &garbage);
119                 }
120                 list_splice_tail(list, &kclist_head);
121         } else
122                 list_splice(list, &garbage);
123         kcore_need_update = 0;
124         write_unlock(&kclist_lock);
125
126         free_kclist_ents(&garbage);
127 }
128
129
130 #ifdef CONFIG_HIGHMEM
131 /*
132  * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
133  * because memory hole is not as big as !HIGHMEM case.
134  * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
135  */
136 static int kcore_update_ram(void)
137 {
138         LIST_HEAD(head);
139         struct kcore_list *ent;
140         int ret = 0;
141
142         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
143         if (!ent)
144                 return -ENOMEM;
145         ent->addr = (unsigned long)__va(0);
146         ent->size = max_low_pfn << PAGE_SHIFT;
147         ent->type = KCORE_RAM;
148         list_add(&ent->list, &head);
149         __kcore_update_ram(&head);
150         return ret;
151 }
152
153 #else /* !CONFIG_HIGHMEM */
154
155 #ifdef CONFIG_SPARSEMEM_VMEMMAP
156 /* calculate vmemmap's address from given system ram pfn and register it */
157 int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
158 {
159         unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
160         unsigned long nr_pages = ent->size >> PAGE_SHIFT;
161         unsigned long start, end;
162         struct kcore_list *vmm, *tmp;
163
164
165         start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
166         end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
167         end = ALIGN(end, PAGE_SIZE);
168         /* overlap check (because we have to align page */
169         list_for_each_entry(tmp, head, list) {
170                 if (tmp->type != KCORE_VMEMMAP)
171                         continue;
172                 if (start < tmp->addr + tmp->size)
173                         if (end > tmp->addr)
174                                 end = tmp->addr;
175         }
176         if (start < end) {
177                 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
178                 if (!vmm)
179                         return 0;
180                 vmm->addr = start;
181                 vmm->size = end - start;
182                 vmm->type = KCORE_VMEMMAP;
183                 list_add_tail(&vmm->list, head);
184         }
185         return 1;
186
187 }
188 #else
189 int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
190 {
191         return 1;
192 }
193
194 #endif
195
196 static int
197 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
198 {
199         struct list_head *head = (struct list_head *)arg;
200         struct kcore_list *ent;
201
202         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
203         if (!ent)
204                 return -ENOMEM;
205         ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
206         ent->size = nr_pages << PAGE_SHIFT;
207
208         /* Sanity check: Can happen in 32bit arch...maybe */
209         if (ent->addr < (unsigned long) __va(0))
210                 goto free_out;
211
212         /* cut not-mapped area. ....from ppc-32 code. */
213         if (ULONG_MAX - ent->addr < ent->size)
214                 ent->size = ULONG_MAX - ent->addr;
215
216         /* cut when vmalloc() area is higher than direct-map area */
217         if (VMALLOC_START > (unsigned long)__va(0)) {
218                 if (ent->addr > VMALLOC_START)
219                         goto free_out;
220                 if (VMALLOC_START - ent->addr < ent->size)
221                         ent->size = VMALLOC_START - ent->addr;
222         }
223
224         ent->type = KCORE_RAM;
225         list_add_tail(&ent->list, head);
226
227         if (!get_sparsemem_vmemmap_info(ent, head)) {
228                 list_del(&ent->list);
229                 goto free_out;
230         }
231
232         return 0;
233 free_out:
234         kfree(ent);
235         return 1;
236 }
237
238 static int kcore_update_ram(void)
239 {
240         int nid, ret;
241         unsigned long end_pfn;
242         LIST_HEAD(head);
243
244         /* Not inialized....update now */
245         /* find out "max pfn" */
246         end_pfn = 0;
247         for_each_node_state(nid, N_HIGH_MEMORY) {
248                 unsigned long node_end;
249                 node_end  = NODE_DATA(nid)->node_start_pfn +
250                         NODE_DATA(nid)->node_spanned_pages;
251                 if (end_pfn < node_end)
252                         end_pfn = node_end;
253         }
254         /* scan 0 to max_pfn */
255         ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
256         if (ret) {
257                 free_kclist_ents(&head);
258                 return -ENOMEM;
259         }
260         __kcore_update_ram(&head);
261         return ret;
262 }
263 #endif /* CONFIG_HIGHMEM */
264
265 /*****************************************************************************/
266 /*
267  * determine size of ELF note
268  */
269 static int notesize(struct memelfnote *en)
270 {
271         int sz;
272
273         sz = sizeof(struct elf_note);
274         sz += roundup((strlen(en->name) + 1), 4);
275         sz += roundup(en->datasz, 4);
276
277         return sz;
278 } /* end notesize() */
279
280 /*****************************************************************************/
281 /*
282  * store a note in the header buffer
283  */
284 static char *storenote(struct memelfnote *men, char *bufp)
285 {
286         struct elf_note en;
287
288 #define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
289
290         en.n_namesz = strlen(men->name) + 1;
291         en.n_descsz = men->datasz;
292         en.n_type = men->type;
293
294         DUMP_WRITE(&en, sizeof(en));
295         DUMP_WRITE(men->name, en.n_namesz);
296
297         /* XXX - cast from long long to long to avoid need for libgcc.a */
298         bufp = (char*) roundup((unsigned long)bufp,4);
299         DUMP_WRITE(men->data, men->datasz);
300         bufp = (char*) roundup((unsigned long)bufp,4);
301
302 #undef DUMP_WRITE
303
304         return bufp;
305 } /* end storenote() */
306
307 /*
308  * store an ELF coredump header in the supplied buffer
309  * nphdr is the number of elf_phdr to insert
310  */
311 static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
312 {
313         struct elf_prstatus prstatus;   /* NT_PRSTATUS */
314         struct elf_prpsinfo prpsinfo;   /* NT_PRPSINFO */
315         struct elf_phdr *nhdr, *phdr;
316         struct elfhdr *elf;
317         struct memelfnote notes[3];
318         off_t offset = 0;
319         struct kcore_list *m;
320
321         /* setup ELF header */
322         elf = (struct elfhdr *) bufp;
323         bufp += sizeof(struct elfhdr);
324         offset += sizeof(struct elfhdr);
325         memcpy(elf->e_ident, ELFMAG, SELFMAG);
326         elf->e_ident[EI_CLASS]  = ELF_CLASS;
327         elf->e_ident[EI_DATA]   = ELF_DATA;
328         elf->e_ident[EI_VERSION]= EV_CURRENT;
329         elf->e_ident[EI_OSABI] = ELF_OSABI;
330         memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
331         elf->e_type     = ET_CORE;
332         elf->e_machine  = ELF_ARCH;
333         elf->e_version  = EV_CURRENT;
334         elf->e_entry    = 0;
335         elf->e_phoff    = sizeof(struct elfhdr);
336         elf->e_shoff    = 0;
337         elf->e_flags    = ELF_CORE_EFLAGS;
338         elf->e_ehsize   = sizeof(struct elfhdr);
339         elf->e_phentsize= sizeof(struct elf_phdr);
340         elf->e_phnum    = nphdr;
341         elf->e_shentsize= 0;
342         elf->e_shnum    = 0;
343         elf->e_shstrndx = 0;
344
345         /* setup ELF PT_NOTE program header */
346         nhdr = (struct elf_phdr *) bufp;
347         bufp += sizeof(struct elf_phdr);
348         offset += sizeof(struct elf_phdr);
349         nhdr->p_type    = PT_NOTE;
350         nhdr->p_offset  = 0;
351         nhdr->p_vaddr   = 0;
352         nhdr->p_paddr   = 0;
353         nhdr->p_filesz  = 0;
354         nhdr->p_memsz   = 0;
355         nhdr->p_flags   = 0;
356         nhdr->p_align   = 0;
357
358         /* setup ELF PT_LOAD program header for every area */
359         list_for_each_entry(m, &kclist_head, list) {
360                 phdr = (struct elf_phdr *) bufp;
361                 bufp += sizeof(struct elf_phdr);
362                 offset += sizeof(struct elf_phdr);
363
364                 phdr->p_type    = PT_LOAD;
365                 phdr->p_flags   = PF_R|PF_W|PF_X;
366                 phdr->p_offset  = kc_vaddr_to_offset(m->addr) + dataoff;
367                 phdr->p_vaddr   = (size_t)m->addr;
368                 phdr->p_paddr   = 0;
369                 phdr->p_filesz  = phdr->p_memsz = m->size;
370                 phdr->p_align   = PAGE_SIZE;
371         }
372
373         /*
374          * Set up the notes in similar form to SVR4 core dumps made
375          * with info from their /proc.
376          */
377         nhdr->p_offset  = offset;
378
379         /* set up the process status */
380         notes[0].name = CORE_STR;
381         notes[0].type = NT_PRSTATUS;
382         notes[0].datasz = sizeof(struct elf_prstatus);
383         notes[0].data = &prstatus;
384
385         memset(&prstatus, 0, sizeof(struct elf_prstatus));
386
387         nhdr->p_filesz  = notesize(&notes[0]);
388         bufp = storenote(&notes[0], bufp);
389
390         /* set up the process info */
391         notes[1].name   = CORE_STR;
392         notes[1].type   = NT_PRPSINFO;
393         notes[1].datasz = sizeof(struct elf_prpsinfo);
394         notes[1].data   = &prpsinfo;
395
396         memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
397         prpsinfo.pr_state       = 0;
398         prpsinfo.pr_sname       = 'R';
399         prpsinfo.pr_zomb        = 0;
400
401         strcpy(prpsinfo.pr_fname, "vmlinux");
402         strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
403
404         nhdr->p_filesz  += notesize(&notes[1]);
405         bufp = storenote(&notes[1], bufp);
406
407         /* set up the task structure */
408         notes[2].name   = CORE_STR;
409         notes[2].type   = NT_TASKSTRUCT;
410         notes[2].datasz = sizeof(struct task_struct);
411         notes[2].data   = current;
412
413         nhdr->p_filesz  += notesize(&notes[2]);
414         bufp = storenote(&notes[2], bufp);
415
416 } /* end elf_kcore_store_hdr() */
417
418 /*****************************************************************************/
419 /*
420  * read from the ELF header and then kernel memory
421  */
422 static ssize_t
423 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
424 {
425         ssize_t acc = 0;
426         size_t size, tsz;
427         size_t elf_buflen;
428         int nphdr;
429         unsigned long start;
430
431         read_lock(&kclist_lock);
432         proc_root_kcore->size = size = get_kcore_size(&nphdr, &elf_buflen);
433         if (buflen == 0 || *fpos >= size) {
434                 read_unlock(&kclist_lock);
435                 return 0;
436         }
437
438         /* trim buflen to not go beyond EOF */
439         if (buflen > size - *fpos)
440                 buflen = size - *fpos;
441
442         /* construct an ELF core header if we'll need some of it */
443         if (*fpos < elf_buflen) {
444                 char * elf_buf;
445
446                 tsz = elf_buflen - *fpos;
447                 if (buflen < tsz)
448                         tsz = buflen;
449                 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
450                 if (!elf_buf) {
451                         read_unlock(&kclist_lock);
452                         return -ENOMEM;
453                 }
454                 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
455                 read_unlock(&kclist_lock);
456                 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
457                         kfree(elf_buf);
458                         return -EFAULT;
459                 }
460                 kfree(elf_buf);
461                 buflen -= tsz;
462                 *fpos += tsz;
463                 buffer += tsz;
464                 acc += tsz;
465
466                 /* leave now if filled buffer already */
467                 if (buflen == 0)
468                         return acc;
469         } else
470                 read_unlock(&kclist_lock);
471
472         /*
473          * Check to see if our file offset matches with any of
474          * the addresses in the elf_phdr on our list.
475          */
476         start = kc_offset_to_vaddr(*fpos - elf_buflen);
477         if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
478                 tsz = buflen;
479                 
480         while (buflen) {
481                 struct kcore_list *m;
482
483                 read_lock(&kclist_lock);
484                 list_for_each_entry(m, &kclist_head, list) {
485                         if (start >= m->addr && start < (m->addr+m->size))
486                                 break;
487                 }
488                 read_unlock(&kclist_lock);
489
490                 if (m == NULL) {
491                         if (clear_user(buffer, tsz))
492                                 return -EFAULT;
493                 } else if (is_vmalloc_or_module_addr((void *)start)) {
494                         char * elf_buf;
495
496                         elf_buf = kzalloc(tsz, GFP_KERNEL);
497                         if (!elf_buf)
498                                 return -ENOMEM;
499                         vread(elf_buf, (char *)start, tsz);
500                         /* we have to zero-fill user buffer even if no read */
501                         if (copy_to_user(buffer, elf_buf, tsz)) {
502                                 kfree(elf_buf);
503                                 return -EFAULT;
504                         }
505                         kfree(elf_buf);
506                 } else {
507                         if (kern_addr_valid(start)) {
508                                 unsigned long n;
509
510                                 n = copy_to_user(buffer, (char *)start, tsz);
511                                 /*
512                                  * We cannot distingush between fault on source
513                                  * and fault on destination. When this happens
514                                  * we clear too and hope it will trigger the
515                                  * EFAULT again.
516                                  */
517                                 if (n) { 
518                                         if (clear_user(buffer + tsz - n,
519                                                                 n))
520                                                 return -EFAULT;
521                                 }
522                         } else {
523                                 if (clear_user(buffer, tsz))
524                                         return -EFAULT;
525                         }
526                 }
527                 buflen -= tsz;
528                 *fpos += tsz;
529                 buffer += tsz;
530                 acc += tsz;
531                 start += tsz;
532                 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
533         }
534
535         return acc;
536 }
537
538
539 static int open_kcore(struct inode *inode, struct file *filp)
540 {
541         if (!capable(CAP_SYS_RAWIO))
542                 return -EPERM;
543         if (kcore_need_update)
544                 kcore_update_ram();
545         return 0;
546 }
547
548
549 static const struct file_operations proc_kcore_operations = {
550         .read           = read_kcore,
551         .open           = open_kcore,
552 };
553
554 #ifdef CONFIG_MEMORY_HOTPLUG
555 /* just remember that we have to update kcore */
556 static int __meminit kcore_callback(struct notifier_block *self,
557                                     unsigned long action, void *arg)
558 {
559         switch (action) {
560         case MEM_ONLINE:
561         case MEM_OFFLINE:
562                 write_lock(&kclist_lock);
563                 kcore_need_update = 1;
564                 write_unlock(&kclist_lock);
565         }
566         return NOTIFY_OK;
567 }
568 #endif
569
570
571 static struct kcore_list kcore_vmalloc;
572
573 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
574 static struct kcore_list kcore_text;
575 /*
576  * If defined, special segment is used for mapping kernel text instead of
577  * direct-map area. We need to create special TEXT section.
578  */
579 static void __init proc_kcore_text_init(void)
580 {
581         kclist_add(&kcore_text, _stext, _end - _stext, KCORE_TEXT);
582 }
583 #else
584 static void __init proc_kcore_text_init(void)
585 {
586 }
587 #endif
588
589 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
590 /*
591  * MODULES_VADDR has no intersection with VMALLOC_ADDR.
592  */
593 struct kcore_list kcore_modules;
594 static void __init add_modules_range(void)
595 {
596         kclist_add(&kcore_modules, (void *)MODULES_VADDR,
597                         MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
598 }
599 #else
600 static void __init add_modules_range(void)
601 {
602 }
603 #endif
604
605 static int __init proc_kcore_init(void)
606 {
607         proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
608                                       &proc_kcore_operations);
609         if (!proc_root_kcore) {
610                 printk(KERN_ERR "couldn't create /proc/kcore\n");
611                 return 0; /* Always returns 0. */
612         }
613         /* Store text area if it's special */
614         proc_kcore_text_init();
615         /* Store vmalloc area */
616         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
617                 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
618         add_modules_range();
619         /* Store direct-map area from physical memory map */
620         kcore_update_ram();
621         hotplug_memory_notifier(kcore_callback, 0);
622
623         return 0;
624 }
625 module_init(proc_kcore_init);