X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fvmalloc.c;h=f8189a4b3e135e4c4158bb80082a49434fcb54af;hb=288c857d66a400ca4846dd700eb1c4820d738bb9;hp=366ae9ea6af21b2af9917a9caf77f1c12e3262f2;hpb=c0c0a29379b5848aec2e8f1c58d853d3cb7118b8;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 366ae9e..f8189a4 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -23,8 +23,8 @@ #include #include #include -#include #include +#include #include #include @@ -333,6 +333,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, unsigned long addr; int purged = 0; + BUG_ON(!size); BUG_ON(size & ~PAGE_MASK); va = kmalloc_node(sizeof(struct vmap_area), @@ -344,6 +345,9 @@ retry: addr = ALIGN(vstart, align); spin_lock(&vmap_area_lock); + if (addr + size - 1 < addr) + goto overflow; + /* XXX: could have a last_hole cache */ n = vmap_area_root.rb_node; if (n) { @@ -375,6 +379,8 @@ retry: while (addr + size > first->va_start && addr + size <= vend) { addr = ALIGN(first->va_end + PAGE_SIZE, align); + if (addr + size - 1 < addr) + goto overflow; n = rb_next(&first->rb_node); if (n) @@ -385,6 +391,7 @@ retry: } found: if (addr + size > vend) { +overflow: spin_unlock(&vmap_area_lock); if (!purged) { purge_vmap_area_lazy(); @@ -395,6 +402,7 @@ found: printk(KERN_WARNING "vmap allocation for size %lu failed: " "use vmalloc= to increase size.\n", size); + kfree(va); return ERR_PTR(-EBUSY); } @@ -508,6 +516,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, static DEFINE_SPINLOCK(purge_lock); LIST_HEAD(valist); struct vmap_area *va; + struct vmap_area *n_va; int nr = 0; /* @@ -547,7 +556,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, if (nr) { spin_lock(&vmap_area_lock); - list_for_each_entry(va, &valist, purge_list) + list_for_each_entry_safe(va, n_va, &valist, purge_list) __free_vmap_area(va); spin_unlock(&vmap_area_lock); } @@ -663,10 +672,7 @@ struct vmap_block { DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); union { - struct { - struct list_head free_list; - struct list_head dirty_list; - }; + struct list_head free_list; struct rcu_head rcu_head; }; }; @@ -733,7 +739,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); INIT_LIST_HEAD(&vb->free_list); - INIT_LIST_HEAD(&vb->dirty_list); vb_idx = addr_to_vb_idx(va->va_start); spin_lock(&vmap_block_tree_lock); @@ -764,12 +769,7 @@ static void free_vmap_block(struct vmap_block *vb) struct vmap_block *tmp; unsigned long vb_idx; - spin_lock(&vb->vbq->lock); - if (!list_empty(&vb->free_list)) - list_del(&vb->free_list); - if (!list_empty(&vb->dirty_list)) - list_del(&vb->dirty_list); - spin_unlock(&vb->vbq->lock); + BUG_ON(!list_empty(&vb->free_list)); vb_idx = addr_to_vb_idx(vb->va->va_start); spin_lock(&vmap_block_tree_lock); @@ -854,11 +854,7 @@ static void vb_free(const void *addr, unsigned long size) spin_lock(&vb->lock); bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); - if (!vb->dirty) { - spin_lock(&vb->vbq->lock); - list_add(&vb->dirty_list, &vb->vbq->dirty); - spin_unlock(&vb->vbq->lock); - } + vb->dirty += 1UL << order; if (vb->dirty == VMAP_BBMAP_BITS) { BUG_ON(vb->free || !list_empty(&vb->free_list)); @@ -1036,7 +1032,7 @@ void __init vmalloc_init(void) /* Import existing vmlist entries. */ for (tmp = vmlist; tmp; tmp = tmp->next) { - va = alloc_bootmem(sizeof(struct vmap_area)); + va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); va->flags = tmp->flags | VM_VM_AREA; va->va_start = (unsigned long)tmp->addr; va->va_end = va->va_start + tmp->size; @@ -1196,6 +1192,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, } EXPORT_SYMBOL_GPL(__get_vm_area); +struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, + unsigned long start, unsigned long end, + void *caller) +{ + return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, + caller); +} + /** * get_vm_area - reserve a contiguous kernel virtual area * @size: size of the area @@ -1323,6 +1327,9 @@ static void __vunmap(const void *addr, int deallocate_pages) void vfree(const void *addr) { BUG_ON(in_interrupt()); + + kmemleak_free(addr); + __vunmap(addr, 1); } EXPORT_SYMBOL(vfree); @@ -1339,6 +1346,7 @@ EXPORT_SYMBOL(vfree); void vunmap(const void *addr) { BUG_ON(in_interrupt()); + might_sleep(); __vunmap(addr, 0); } EXPORT_SYMBOL(vunmap); @@ -1358,6 +1366,8 @@ void *vmap(struct page **pages, unsigned int count, { struct vm_struct *area; + might_sleep(); + if (count > num_physpages) return NULL; @@ -1432,8 +1442,17 @@ fail: void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) { - return __vmalloc_area_node(area, gfp_mask, prot, -1, - __builtin_return_address(0)); + void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, + __builtin_return_address(0)); + + /* + * A ref_count = 3 is needed because the vm_struct and vmap_area + * structures allocated in the __get_vm_area_node() function contain + * references to the virtual address of the vmalloc'ed block. + */ + kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); + + return addr; } /** @@ -1452,6 +1471,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, int node, void *caller) { struct vm_struct *area; + void *addr; + unsigned long real_size = size; size = PAGE_ALIGN(size); if (!size || (size >> PAGE_SHIFT) > num_physpages) @@ -1463,7 +1484,16 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, if (!area) return NULL; - return __vmalloc_area_node(area, gfp_mask, prot, node, caller); + addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); + + /* + * A ref_count = 3 is needed because the vm_struct and vmap_area + * structures allocated in the __get_vm_area_node() function contain + * references to the virtual address of the vmalloc'ed block. + */ + kmemleak_alloc(addr, real_size, 3, gfp_mask); + + return addr; } void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)