4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
12 #include <linux/module.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugobjects.h>
19 #include <linux/vmalloc.h>
20 #include <linux/kallsyms.h>
22 #include <asm/uaccess.h>
23 #include <asm/tlbflush.h>
26 DEFINE_RWLOCK(vmlist_lock);
27 struct vm_struct *vmlist;
29 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
30 int node, void *caller);
32 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
36 pte = pte_offset_kernel(pmd, addr);
38 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
39 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
40 } while (pte++, addr += PAGE_SIZE, addr != end);
43 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
49 pmd = pmd_offset(pud, addr);
51 next = pmd_addr_end(addr, end);
52 if (pmd_none_or_clear_bad(pmd))
54 vunmap_pte_range(pmd, addr, next);
55 } while (pmd++, addr = next, addr != end);
58 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
64 pud = pud_offset(pgd, addr);
66 next = pud_addr_end(addr, end);
67 if (pud_none_or_clear_bad(pud))
69 vunmap_pmd_range(pud, addr, next);
70 } while (pud++, addr = next, addr != end);
73 void unmap_kernel_range(unsigned long addr, unsigned long size)
77 unsigned long start = addr;
78 unsigned long end = addr + size;
81 pgd = pgd_offset_k(addr);
82 flush_cache_vunmap(addr, end);
84 next = pgd_addr_end(addr, end);
85 if (pgd_none_or_clear_bad(pgd))
87 vunmap_pud_range(pgd, addr, next);
88 } while (pgd++, addr = next, addr != end);
89 flush_tlb_kernel_range(start, end);
92 static void unmap_vm_area(struct vm_struct *area)
94 unmap_kernel_range((unsigned long)area->addr, area->size);
97 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
98 unsigned long end, pgprot_t prot, struct page ***pages)
102 pte = pte_alloc_kernel(pmd, addr);
106 struct page *page = **pages;
107 WARN_ON(!pte_none(*pte));
110 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
112 } while (pte++, addr += PAGE_SIZE, addr != end);
116 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
117 unsigned long end, pgprot_t prot, struct page ***pages)
122 pmd = pmd_alloc(&init_mm, pud, addr);
126 next = pmd_addr_end(addr, end);
127 if (vmap_pte_range(pmd, addr, next, prot, pages))
129 } while (pmd++, addr = next, addr != end);
133 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
134 unsigned long end, pgprot_t prot, struct page ***pages)
139 pud = pud_alloc(&init_mm, pgd, addr);
143 next = pud_addr_end(addr, end);
144 if (vmap_pmd_range(pud, addr, next, prot, pages))
146 } while (pud++, addr = next, addr != end);
150 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
154 unsigned long addr = (unsigned long) area->addr;
155 unsigned long end = addr + area->size - PAGE_SIZE;
159 pgd = pgd_offset_k(addr);
161 next = pgd_addr_end(addr, end);
162 err = vmap_pud_range(pgd, addr, next, prot, pages);
165 } while (pgd++, addr = next, addr != end);
166 flush_cache_vmap((unsigned long) area->addr, end);
169 EXPORT_SYMBOL_GPL(map_vm_area);
171 static inline int is_vmalloc_or_module_addr(const void *x)
174 * x86-64 and sparc64 put modules in a special place,
175 * and fall back on vmalloc() if that fails. Others
176 * just put it in the vmalloc space.
178 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
179 unsigned long addr = (unsigned long)x;
180 if (addr >= MODULES_VADDR && addr < MODULES_END)
183 return is_vmalloc_addr(x);
187 * Map a vmalloc()-space virtual address to the physical page.
189 struct page *vmalloc_to_page(const void *vmalloc_addr)
191 unsigned long addr = (unsigned long) vmalloc_addr;
192 struct page *page = NULL;
193 pgd_t *pgd = pgd_offset_k(addr);
199 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
200 * architectures that do not vmalloc module space
202 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
204 if (!pgd_none(*pgd)) {
205 pud = pud_offset(pgd, addr);
206 if (!pud_none(*pud)) {
207 pmd = pmd_offset(pud, addr);
208 if (!pmd_none(*pmd)) {
209 ptep = pte_offset_map(pmd, addr);
211 if (pte_present(pte))
212 page = pte_page(pte);
219 EXPORT_SYMBOL(vmalloc_to_page);
222 * Map a vmalloc()-space virtual address to the physical page frame number.
224 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
226 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
228 EXPORT_SYMBOL(vmalloc_to_pfn);
230 static struct vm_struct *
231 __get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
232 unsigned long end, int node, gfp_t gfp_mask, void *caller)
234 struct vm_struct **p, *tmp, *area;
235 unsigned long align = 1;
238 BUG_ON(in_interrupt());
239 if (flags & VM_IOREMAP) {
242 if (bit > IOREMAP_MAX_ORDER)
243 bit = IOREMAP_MAX_ORDER;
244 else if (bit < PAGE_SHIFT)
249 addr = ALIGN(start, align);
250 size = PAGE_ALIGN(size);
254 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
260 * We always allocate a guard page.
264 write_lock(&vmlist_lock);
265 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
266 if ((unsigned long)tmp->addr < addr) {
267 if((unsigned long)tmp->addr + tmp->size >= addr)
268 addr = ALIGN(tmp->size +
269 (unsigned long)tmp->addr, align);
272 if ((size + addr) < addr)
274 if (size + addr <= (unsigned long)tmp->addr)
276 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
277 if (addr > end - size)
280 if ((size + addr) < addr)
282 if (addr > end - size)
290 area->addr = (void *)addr;
295 area->caller = caller;
296 write_unlock(&vmlist_lock);
301 write_unlock(&vmlist_lock);
303 if (printk_ratelimit())
304 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
308 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
309 unsigned long start, unsigned long end)
311 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
312 __builtin_return_address(0));
314 EXPORT_SYMBOL_GPL(__get_vm_area);
317 * get_vm_area - reserve a contiguous kernel virtual area
318 * @size: size of the area
319 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
321 * Search an area of @size in the kernel virtual mapping area,
322 * and reserved it for out purposes. Returns the area descriptor
323 * on success or %NULL on failure.
325 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
327 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
328 -1, GFP_KERNEL, __builtin_return_address(0));
331 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
334 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
335 -1, GFP_KERNEL, caller);
338 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
339 int node, gfp_t gfp_mask)
341 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
342 gfp_mask, __builtin_return_address(0));
345 /* Caller must hold vmlist_lock */
346 static struct vm_struct *__find_vm_area(const void *addr)
348 struct vm_struct *tmp;
350 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
351 if (tmp->addr == addr)
358 /* Caller must hold vmlist_lock */
359 static struct vm_struct *__remove_vm_area(const void *addr)
361 struct vm_struct **p, *tmp;
363 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
364 if (tmp->addr == addr)
374 * Remove the guard page.
376 tmp->size -= PAGE_SIZE;
381 * remove_vm_area - find and remove a continuous kernel virtual area
382 * @addr: base address
384 * Search for the kernel VM area starting at @addr, and remove it.
385 * This function returns the found VM area, but using it is NOT safe
386 * on SMP machines, except for its size or flags.
388 struct vm_struct *remove_vm_area(const void *addr)
391 write_lock(&vmlist_lock);
392 v = __remove_vm_area(addr);
393 write_unlock(&vmlist_lock);
397 static void __vunmap(const void *addr, int deallocate_pages)
399 struct vm_struct *area;
404 if ((PAGE_SIZE-1) & (unsigned long)addr) {
405 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
409 area = remove_vm_area(addr);
410 if (unlikely(!area)) {
411 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
416 debug_check_no_locks_freed(addr, area->size);
417 debug_check_no_obj_freed(addr, area->size);
419 if (deallocate_pages) {
422 for (i = 0; i < area->nr_pages; i++) {
423 struct page *page = area->pages[i];
429 if (area->flags & VM_VPAGES)
440 * vfree - release memory allocated by vmalloc()
441 * @addr: memory base address
443 * Free the virtually continuous memory area starting at @addr, as
444 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
445 * NULL, no operation is performed.
447 * Must not be called in interrupt context.
449 void vfree(const void *addr)
451 BUG_ON(in_interrupt());
454 EXPORT_SYMBOL(vfree);
457 * vunmap - release virtual mapping obtained by vmap()
458 * @addr: memory base address
460 * Free the virtually contiguous memory area starting at @addr,
461 * which was created from the page array passed to vmap().
463 * Must not be called in interrupt context.
465 void vunmap(const void *addr)
467 BUG_ON(in_interrupt());
470 EXPORT_SYMBOL(vunmap);
473 * vmap - map an array of pages into virtually contiguous space
474 * @pages: array of page pointers
475 * @count: number of pages to map
476 * @flags: vm_area->flags
477 * @prot: page protection for the mapping
479 * Maps @count pages from @pages into contiguous kernel virtual
482 void *vmap(struct page **pages, unsigned int count,
483 unsigned long flags, pgprot_t prot)
485 struct vm_struct *area;
487 if (count > num_physpages)
490 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
491 __builtin_return_address(0));
495 if (map_vm_area(area, prot, &pages)) {
504 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
505 pgprot_t prot, int node, void *caller)
508 unsigned int nr_pages, array_size, i;
510 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
511 array_size = (nr_pages * sizeof(struct page *));
513 area->nr_pages = nr_pages;
514 /* Please note that the recursion is strictly bounded. */
515 if (array_size > PAGE_SIZE) {
516 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
517 PAGE_KERNEL, node, caller);
518 area->flags |= VM_VPAGES;
520 pages = kmalloc_node(array_size,
521 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
525 area->caller = caller;
527 remove_vm_area(area->addr);
532 for (i = 0; i < area->nr_pages; i++) {
536 page = alloc_page(gfp_mask);
538 page = alloc_pages_node(node, gfp_mask, 0);
540 if (unlikely(!page)) {
541 /* Successfully allocated i pages, free them in __vunmap() */
545 area->pages[i] = page;
548 if (map_vm_area(area, prot, &pages))
557 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
559 return __vmalloc_area_node(area, gfp_mask, prot, -1,
560 __builtin_return_address(0));
564 * __vmalloc_node - allocate virtually contiguous memory
565 * @size: allocation size
566 * @gfp_mask: flags for the page level allocator
567 * @prot: protection mask for the allocated pages
568 * @node: node to use for allocation or -1
569 * @caller: caller's return address
571 * Allocate enough pages to cover @size from the page level
572 * allocator with @gfp_mask flags. Map them into contiguous
573 * kernel virtual space, using a pagetable protection of @prot.
575 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
576 int node, void *caller)
578 struct vm_struct *area;
580 size = PAGE_ALIGN(size);
581 if (!size || (size >> PAGE_SHIFT) > num_physpages)
584 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
585 node, gfp_mask, caller);
590 return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
593 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
595 return __vmalloc_node(size, gfp_mask, prot, -1,
596 __builtin_return_address(0));
598 EXPORT_SYMBOL(__vmalloc);
601 * vmalloc - allocate virtually contiguous memory
602 * @size: allocation size
603 * Allocate enough pages to cover @size from the page level
604 * allocator and map them into contiguous kernel virtual space.
606 * For tight control over page level allocator and protection flags
607 * use __vmalloc() instead.
609 void *vmalloc(unsigned long size)
611 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
612 -1, __builtin_return_address(0));
614 EXPORT_SYMBOL(vmalloc);
617 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
618 * @size: allocation size
620 * The resulting memory area is zeroed so it can be mapped to userspace
621 * without leaking data.
623 void *vmalloc_user(unsigned long size)
625 struct vm_struct *area;
628 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
630 write_lock(&vmlist_lock);
631 area = __find_vm_area(ret);
632 area->flags |= VM_USERMAP;
633 write_unlock(&vmlist_lock);
637 EXPORT_SYMBOL(vmalloc_user);
640 * vmalloc_node - allocate memory on a specific node
641 * @size: allocation size
644 * Allocate enough pages to cover @size from the page level
645 * allocator and map them into contiguous kernel virtual space.
647 * For tight control over page level allocator and protection flags
648 * use __vmalloc() instead.
650 void *vmalloc_node(unsigned long size, int node)
652 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
653 node, __builtin_return_address(0));
655 EXPORT_SYMBOL(vmalloc_node);
657 #ifndef PAGE_KERNEL_EXEC
658 # define PAGE_KERNEL_EXEC PAGE_KERNEL
662 * vmalloc_exec - allocate virtually contiguous, executable memory
663 * @size: allocation size
665 * Kernel-internal function to allocate enough pages to cover @size
666 * the page level allocator and map them into contiguous and
667 * executable kernel virtual space.
669 * For tight control over page level allocator and protection flags
670 * use __vmalloc() instead.
673 void *vmalloc_exec(unsigned long size)
675 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
678 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
679 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
680 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
681 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
683 #define GFP_VMALLOC32 GFP_KERNEL
687 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
688 * @size: allocation size
690 * Allocate enough 32bit PA addressable pages to cover @size from the
691 * page level allocator and map them into contiguous kernel virtual space.
693 void *vmalloc_32(unsigned long size)
695 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
697 EXPORT_SYMBOL(vmalloc_32);
700 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
701 * @size: allocation size
703 * The resulting memory area is 32bit addressable and zeroed so it can be
704 * mapped to userspace without leaking data.
706 void *vmalloc_32_user(unsigned long size)
708 struct vm_struct *area;
711 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
713 write_lock(&vmlist_lock);
714 area = __find_vm_area(ret);
715 area->flags |= VM_USERMAP;
716 write_unlock(&vmlist_lock);
720 EXPORT_SYMBOL(vmalloc_32_user);
722 long vread(char *buf, char *addr, unsigned long count)
724 struct vm_struct *tmp;
725 char *vaddr, *buf_start = buf;
728 /* Don't allow overflow */
729 if ((unsigned long) addr + count < count)
730 count = -(unsigned long) addr;
732 read_lock(&vmlist_lock);
733 for (tmp = vmlist; tmp; tmp = tmp->next) {
734 vaddr = (char *) tmp->addr;
735 if (addr >= vaddr + tmp->size - PAGE_SIZE)
737 while (addr < vaddr) {
745 n = vaddr + tmp->size - PAGE_SIZE - addr;
756 read_unlock(&vmlist_lock);
757 return buf - buf_start;
760 long vwrite(char *buf, char *addr, unsigned long count)
762 struct vm_struct *tmp;
763 char *vaddr, *buf_start = buf;
766 /* Don't allow overflow */
767 if ((unsigned long) addr + count < count)
768 count = -(unsigned long) addr;
770 read_lock(&vmlist_lock);
771 for (tmp = vmlist; tmp; tmp = tmp->next) {
772 vaddr = (char *) tmp->addr;
773 if (addr >= vaddr + tmp->size - PAGE_SIZE)
775 while (addr < vaddr) {
782 n = vaddr + tmp->size - PAGE_SIZE - addr;
793 read_unlock(&vmlist_lock);
794 return buf - buf_start;
798 * remap_vmalloc_range - map vmalloc pages to userspace
799 * @vma: vma to cover (map full range of vma)
800 * @addr: vmalloc memory
801 * @pgoff: number of pages into addr before first page to map
803 * Returns: 0 for success, -Exxx on failure
805 * This function checks that addr is a valid vmalloc'ed area, and
806 * that it is big enough to cover the vma. Will return failure if
807 * that criteria isn't met.
809 * Similar to remap_pfn_range() (see mm/memory.c)
811 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
814 struct vm_struct *area;
815 unsigned long uaddr = vma->vm_start;
816 unsigned long usize = vma->vm_end - vma->vm_start;
819 if ((PAGE_SIZE-1) & (unsigned long)addr)
822 read_lock(&vmlist_lock);
823 area = __find_vm_area(addr);
825 goto out_einval_locked;
827 if (!(area->flags & VM_USERMAP))
828 goto out_einval_locked;
830 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
831 goto out_einval_locked;
832 read_unlock(&vmlist_lock);
834 addr += pgoff << PAGE_SHIFT;
836 struct page *page = vmalloc_to_page(addr);
837 ret = vm_insert_page(vma, uaddr, page);
846 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
847 vma->vm_flags |= VM_RESERVED;
852 read_unlock(&vmlist_lock);
855 EXPORT_SYMBOL(remap_vmalloc_range);
858 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
861 void __attribute__((weak)) vmalloc_sync_all(void)
866 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
868 /* apply_to_page_range() does all the hard work. */
873 * alloc_vm_area - allocate a range of kernel address space
874 * @size: size of the area
876 * Returns: NULL on failure, vm_struct on success
878 * This function reserves a range of kernel address space, and
879 * allocates pagetables to map that range. No actual mappings
880 * are created. If the kernel address space is not shared
881 * between processes, it syncs the pagetable across all
884 struct vm_struct *alloc_vm_area(size_t size)
886 struct vm_struct *area;
888 area = get_vm_area_caller(size, VM_IOREMAP,
889 __builtin_return_address(0));
894 * This ensures that page tables are constructed for this region
895 * of kernel virtual address space and mapped into init_mm.
897 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
898 area->size, f, NULL)) {
903 /* Make sure the pagetables are constructed in process kernel
909 EXPORT_SYMBOL_GPL(alloc_vm_area);
911 void free_vm_area(struct vm_struct *area)
913 struct vm_struct *ret;
914 ret = remove_vm_area(area->addr);
918 EXPORT_SYMBOL_GPL(free_vm_area);
921 #ifdef CONFIG_PROC_FS
922 static void *s_start(struct seq_file *m, loff_t *pos)
927 read_lock(&vmlist_lock);
940 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
942 struct vm_struct *v = p;
948 static void s_stop(struct seq_file *m, void *p)
950 read_unlock(&vmlist_lock);
953 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
956 unsigned int nr, *counters = m->private;
961 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
963 for (nr = 0; nr < v->nr_pages; nr++)
964 counters[page_to_nid(v->pages[nr])]++;
966 for_each_node_state(nr, N_HIGH_MEMORY)
968 seq_printf(m, " N%u=%u", nr, counters[nr]);
972 static int s_show(struct seq_file *m, void *p)
974 struct vm_struct *v = p;
976 seq_printf(m, "0x%p-0x%p %7ld",
977 v->addr, v->addr + v->size, v->size);
980 char buff[2 * KSYM_NAME_LEN];
983 sprint_symbol(buff, (unsigned long)v->caller);
988 seq_printf(m, " pages=%d", v->nr_pages);
991 seq_printf(m, " phys=%lx", v->phys_addr);
993 if (v->flags & VM_IOREMAP)
994 seq_printf(m, " ioremap");
996 if (v->flags & VM_ALLOC)
997 seq_printf(m, " vmalloc");
999 if (v->flags & VM_MAP)
1000 seq_printf(m, " vmap");
1002 if (v->flags & VM_USERMAP)
1003 seq_printf(m, " user");
1005 if (v->flags & VM_VPAGES)
1006 seq_printf(m, " vpages");
1008 show_numa_info(m, v);
1013 const struct seq_operations vmalloc_op = {