X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fvmalloc.c;h=af77e171e339a357933d7eb6ba4b001efae5e390;hb=da3f13c95a4c6e275a9b568f358c0c120ad83ecb;hp=13c3d82968ae2a1e363dcc16e47474a716c5952d;hpb=80e93effce55044c5a7fa96e8b313640a80bd4e9;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 13c3d82..af77e17 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -5,6 +5,7 @@ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian , May 2000 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 + * Numa awareness, Christoph Lameter, SGI, June 2005 */ #include @@ -23,6 +24,9 @@ DEFINE_RWLOCK(vmlist_lock); struct vm_struct *vmlist; +static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, + int node); + static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) { pte_t *pte; @@ -64,12 +68,12 @@ static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr, } while (pud++, addr = next, addr != end); } -void unmap_vm_area(struct vm_struct *area) +void unmap_kernel_range(unsigned long addr, unsigned long size) { pgd_t *pgd; unsigned long next; - unsigned long addr = (unsigned long) area->addr; - unsigned long end = addr + area->size; + unsigned long start = addr; + unsigned long end = addr + size; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); @@ -80,7 +84,12 @@ void unmap_vm_area(struct vm_struct *area) continue; vunmap_pud_range(pgd, addr, next); } while (pgd++, addr = next, addr != end); - flush_tlb_kernel_range((unsigned long) area->addr, end); + flush_tlb_kernel_range(start, end); +} + +static void unmap_vm_area(struct vm_struct *area) +{ + unmap_kernel_range((unsigned long)area->addr, area->size); } static int vmap_pte_range(pmd_t *pmd, unsigned long addr, @@ -88,7 +97,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, { pte_t *pte; - pte = pte_alloc_kernel(&init_mm, pmd, addr); + pte = pte_alloc_kernel(pmd, addr); if (!pte) return -ENOMEM; do { @@ -146,25 +155,26 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) BUG_ON(addr >= end); pgd = pgd_offset_k(addr); - spin_lock(&init_mm.page_table_lock); do { next = pgd_addr_end(addr, end); err = vmap_pud_range(pgd, addr, next, prot, pages); if (err) break; } while (pgd++, addr = next, addr != end); - spin_unlock(&init_mm.page_table_lock); flush_cache_vmap((unsigned long) area->addr, end); return err; } +EXPORT_SYMBOL_GPL(map_vm_area); -struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, - unsigned long start, unsigned long end) +static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, + unsigned long start, unsigned long end, + int node, gfp_t gfp_mask) { struct vm_struct **p, *tmp, *area; unsigned long align = 1; unsigned long addr; + BUG_ON(in_interrupt()); if (flags & VM_IOREMAP) { int bit = fls(size); @@ -177,15 +187,13 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, } addr = ALIGN(start, align); size = PAGE_ALIGN(size); - - area = kmalloc(sizeof(*area), GFP_KERNEL); - if (unlikely(!area)) + if (unlikely(!size)) return NULL; - if (unlikely(!size)) { - kfree (area); + area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); + + if (unlikely(!area)) return NULL; - } /* * We always allocate a guard page. @@ -231,9 +239,15 @@ out: return NULL; } +struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, + unsigned long start, unsigned long end) +{ + return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL); +} +EXPORT_SYMBOL_GPL(__get_vm_area); + /** - * get_vm_area - reserve a contingous kernel virtual area - * + * get_vm_area - reserve a contiguous kernel virtual area * @size: size of the area * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC * @@ -246,8 +260,28 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); } +struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, + int node, gfp_t gfp_mask) +{ + return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, + gfp_mask); +} + /* Caller must hold vmlist_lock */ -struct vm_struct *__remove_vm_area(void *addr) +static struct vm_struct *__find_vm_area(void *addr) +{ + struct vm_struct *tmp; + + for (tmp = vmlist; tmp != NULL; tmp = tmp->next) { + if (tmp->addr == addr) + break; + } + + return tmp; +} + +/* Caller must hold vmlist_lock */ +static struct vm_struct *__remove_vm_area(void *addr) { struct vm_struct **p, *tmp; @@ -269,8 +303,7 @@ found: } /** - * remove_vm_area - find and remove a contingous kernel virtual area - * + * remove_vm_area - find and remove a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and remove it. @@ -286,7 +319,7 @@ struct vm_struct *remove_vm_area(void *addr) return v; } -void __vunmap(void *addr, int deallocate_pages) +static void __vunmap(void *addr, int deallocate_pages) { struct vm_struct *area; @@ -307,16 +340,17 @@ void __vunmap(void *addr, int deallocate_pages) return; } + debug_check_no_locks_freed(addr, area->size); + if (deallocate_pages) { int i; for (i = 0; i < area->nr_pages; i++) { - if (unlikely(!area->pages[i])) - BUG(); + BUG_ON(!area->pages[i]); __free_page(area->pages[i]); } - if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) + if (area->flags & VM_VPAGES) vfree(area->pages); else kfree(area->pages); @@ -328,10 +362,9 @@ void __vunmap(void *addr, int deallocate_pages) /** * vfree - release memory allocated by vmalloc() - * * @addr: memory base address * - * Free the virtually contiguous memory area starting at @addr, as + * Free the virtually continuous memory area starting at @addr, as * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is * NULL, no operation is performed. * @@ -342,12 +375,10 @@ void vfree(void *addr) BUG_ON(in_interrupt()); __vunmap(addr, 1); } - EXPORT_SYMBOL(vfree); /** * vunmap - release virtual mapping obtained by vmap() - * * @addr: memory base address * * Free the virtually contiguous memory area starting at @addr, @@ -360,12 +391,10 @@ void vunmap(void *addr) BUG_ON(in_interrupt()); __vunmap(addr, 0); } - EXPORT_SYMBOL(vunmap); /** * vmap - map an array of pages into virtually contiguous space - * * @pages: array of page pointers * @count: number of pages to map * @flags: vm_area->flags @@ -392,10 +421,10 @@ void *vmap(struct page **pages, unsigned int count, return area->addr; } - EXPORT_SYMBOL(vmap); -void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot) +void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, + pgprot_t prot, int node) { struct page **pages; unsigned int nr_pages, array_size, i; @@ -405,20 +434,27 @@ void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgp area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ - if (array_size > PAGE_SIZE) - pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL); - else - pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM)); + if (array_size > PAGE_SIZE) { + pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, + PAGE_KERNEL, node); + area->flags |= VM_VPAGES; + } else { + pages = kmalloc_node(array_size, + (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, + node); + } area->pages = pages; if (!area->pages) { remove_vm_area(area->addr); kfree(area); return NULL; } - memset(area->pages, 0, array_size); for (i = 0; i < area->nr_pages; i++) { - area->pages[i] = alloc_page(gfp_mask); + if (node < 0) + area->pages[i] = alloc_page(gfp_mask); + else + area->pages[i] = alloc_pages_node(node, gfp_mask, 0); if (unlikely(!area->pages[i])) { /* Successfully allocated i pages, free them in __vunmap() */ area->nr_pages = i; @@ -435,18 +471,24 @@ fail: return NULL; } +void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) +{ + return __vmalloc_area_node(area, gfp_mask, prot, -1); +} + /** - * __vmalloc - allocate virtually contiguous memory - * + * __vmalloc_node - allocate virtually contiguous memory * @size: allocation size * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages + * @node: node to use for allocation or -1 * * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ -void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot) +static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, + int node) { struct vm_struct *area; @@ -454,47 +496,87 @@ void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t pro if (!size || (size >> PAGE_SHIFT) > num_physpages) return NULL; - area = get_vm_area(size, VM_ALLOC); + area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask); if (!area) return NULL; - return __vmalloc_area(area, gfp_mask, prot); + return __vmalloc_area_node(area, gfp_mask, prot, node); } +void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) +{ + return __vmalloc_node(size, gfp_mask, prot, -1); +} EXPORT_SYMBOL(__vmalloc); /** * vmalloc - allocate virtually contiguous memory - * * @size: allocation size - * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * - * For tight cotrol over page level allocator and protection flags + * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc(unsigned long size) { - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); } - EXPORT_SYMBOL(vmalloc); +/** + * vmalloc_user - allocate zeroed virtually contiguous memory for userspace + * @size: allocation size + * + * The resulting memory area is zeroed so it can be mapped to userspace + * without leaking data. + */ +void *vmalloc_user(unsigned long size) +{ + struct vm_struct *area; + void *ret; + + ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); + if (ret) { + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + } + return ret; +} +EXPORT_SYMBOL(vmalloc_user); + +/** + * vmalloc_node - allocate memory on a specific node + * @size: allocation size + * @node: numa node + * + * Allocate enough pages to cover @size from the page level + * allocator and map them into contiguous kernel virtual space. + * + * For tight control over page level allocator and protection flags + * use __vmalloc() instead. + */ +void *vmalloc_node(unsigned long size, int node) +{ + return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); +} +EXPORT_SYMBOL(vmalloc_node); + #ifndef PAGE_KERNEL_EXEC # define PAGE_KERNEL_EXEC PAGE_KERNEL #endif /** * vmalloc_exec - allocate virtually contiguous, executable memory - * * @size: allocation size * * Kernel-internal function to allocate enough pages to cover @size * the page level allocator and map them into contiguous and * executable kernel virtual space. * - * For tight cotrol over page level allocator and protection flags + * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ @@ -503,9 +585,16 @@ void *vmalloc_exec(unsigned long size) return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); } +#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) +#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL +#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) +#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL +#else +#define GFP_VMALLOC32 GFP_KERNEL +#endif + /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) - * * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the @@ -513,11 +602,33 @@ void *vmalloc_exec(unsigned long size) */ void *vmalloc_32(unsigned long size) { - return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); + return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); } - EXPORT_SYMBOL(vmalloc_32); +/** + * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory + * @size: allocation size + * + * The resulting memory area is 32bit addressable and zeroed so it can be + * mapped to userspace without leaking data. + */ +void *vmalloc_32_user(unsigned long size) +{ + struct vm_struct *area; + void *ret; + + ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); + if (ret) { + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + } + return ret; +} +EXPORT_SYMBOL(vmalloc_32_user); + long vread(char *buf, char *addr, unsigned long count) { struct vm_struct *tmp; @@ -592,3 +703,123 @@ finished: read_unlock(&vmlist_lock); return buf - buf_start; } + +/** + * remap_vmalloc_range - map vmalloc pages to userspace + * @vma: vma to cover (map full range of vma) + * @addr: vmalloc memory + * @pgoff: number of pages into addr before first page to map + * @returns: 0 for success, -Exxx on failure + * + * This function checks that addr is a valid vmalloc'ed area, and + * that it is big enough to cover the vma. Will return failure if + * that criteria isn't met. + * + * Similar to remap_pfn_range() (see mm/memory.c) + */ +int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, + unsigned long pgoff) +{ + struct vm_struct *area; + unsigned long uaddr = vma->vm_start; + unsigned long usize = vma->vm_end - vma->vm_start; + int ret; + + if ((PAGE_SIZE-1) & (unsigned long)addr) + return -EINVAL; + + read_lock(&vmlist_lock); + area = __find_vm_area(addr); + if (!area) + goto out_einval_locked; + + if (!(area->flags & VM_USERMAP)) + goto out_einval_locked; + + if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) + goto out_einval_locked; + read_unlock(&vmlist_lock); + + addr += pgoff << PAGE_SHIFT; + do { + struct page *page = vmalloc_to_page(addr); + ret = vm_insert_page(vma, uaddr, page); + if (ret) + return ret; + + uaddr += PAGE_SIZE; + addr += PAGE_SIZE; + usize -= PAGE_SIZE; + } while (usize > 0); + + /* Prevent "things" like memory migration? VM_flags need a cleanup... */ + vma->vm_flags |= VM_RESERVED; + + return ret; + +out_einval_locked: + read_unlock(&vmlist_lock); + return -EINVAL; +} +EXPORT_SYMBOL(remap_vmalloc_range); + +/* + * Implement a stub for vmalloc_sync_all() if the architecture chose not to + * have one. + */ +void __attribute__((weak)) vmalloc_sync_all(void) +{ +} + + +static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) +{ + /* apply_to_page_range() does all the hard work. */ + return 0; +} + +/** + * alloc_vm_area - allocate a range of kernel address space + * @size: size of the area + * @returns: NULL on failure, vm_struct on success + * + * This function reserves a range of kernel address space, and + * allocates pagetables to map that range. No actual mappings + * are created. If the kernel address space is not shared + * between processes, it syncs the pagetable across all + * processes. + */ +struct vm_struct *alloc_vm_area(size_t size) +{ + struct vm_struct *area; + + area = get_vm_area(size, VM_IOREMAP); + if (area == NULL) + return NULL; + + /* + * This ensures that page tables are constructed for this region + * of kernel virtual address space and mapped into init_mm. + */ + if (apply_to_page_range(&init_mm, (unsigned long)area->addr, + area->size, f, NULL)) { + free_vm_area(area); + return NULL; + } + + /* Make sure the pagetables are constructed in process kernel + mappings */ + vmalloc_sync_all(); + + return area; +} +EXPORT_SYMBOL_GPL(alloc_vm_area); + +void free_vm_area(struct vm_struct *area) +{ + struct vm_struct *ret; + ret = remove_vm_area(area->addr); + BUG_ON(ret != area); + kfree(area); +} +EXPORT_SYMBOL_GPL(free_vm_area);