vmscan: fix it to take care of nodemask
[safe/jmp/linux-2.6] / mm / vmalloc.c
index 366ae9e..fab1987 100644 (file)
@@ -333,6 +333,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
        unsigned long addr;
        int purged = 0;
 
+       BUG_ON(!size);
        BUG_ON(size & ~PAGE_MASK);
 
        va = kmalloc_node(sizeof(struct vmap_area),
@@ -344,6 +345,9 @@ retry:
        addr = ALIGN(vstart, align);
 
        spin_lock(&vmap_area_lock);
+       if (addr + size - 1 < addr)
+               goto overflow;
+
        /* XXX: could have a last_hole cache */
        n = vmap_area_root.rb_node;
        if (n) {
@@ -375,6 +379,8 @@ retry:
 
                while (addr + size > first->va_start && addr + size <= vend) {
                        addr = ALIGN(first->va_end + PAGE_SIZE, align);
+                       if (addr + size - 1 < addr)
+                               goto overflow;
 
                        n = rb_next(&first->rb_node);
                        if (n)
@@ -385,6 +391,7 @@ retry:
        }
 found:
        if (addr + size > vend) {
+overflow:
                spin_unlock(&vmap_area_lock);
                if (!purged) {
                        purge_vmap_area_lazy();
@@ -508,6 +515,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
        static DEFINE_SPINLOCK(purge_lock);
        LIST_HEAD(valist);
        struct vmap_area *va;
+       struct vmap_area *n_va;
        int nr = 0;
 
        /*
@@ -547,7 +555,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
 
        if (nr) {
                spin_lock(&vmap_area_lock);
-               list_for_each_entry(va, &valist, purge_list)
+               list_for_each_entry_safe(va, n_va, &valist, purge_list)
                        __free_vmap_area(va);
                spin_unlock(&vmap_area_lock);
        }
@@ -663,10 +671,7 @@ struct vmap_block {
        DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
        DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
        union {
-               struct {
-                       struct list_head free_list;
-                       struct list_head dirty_list;
-               };
+               struct list_head free_list;
                struct rcu_head rcu_head;
        };
 };
@@ -733,7 +738,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
        bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
        bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
        INIT_LIST_HEAD(&vb->free_list);
-       INIT_LIST_HEAD(&vb->dirty_list);
 
        vb_idx = addr_to_vb_idx(va->va_start);
        spin_lock(&vmap_block_tree_lock);
@@ -764,12 +768,7 @@ static void free_vmap_block(struct vmap_block *vb)
        struct vmap_block *tmp;
        unsigned long vb_idx;
 
-       spin_lock(&vb->vbq->lock);
-       if (!list_empty(&vb->free_list))
-               list_del(&vb->free_list);
-       if (!list_empty(&vb->dirty_list))
-               list_del(&vb->dirty_list);
-       spin_unlock(&vb->vbq->lock);
+       BUG_ON(!list_empty(&vb->free_list));
 
        vb_idx = addr_to_vb_idx(vb->va->va_start);
        spin_lock(&vmap_block_tree_lock);
@@ -854,11 +853,7 @@ static void vb_free(const void *addr, unsigned long size)
 
        spin_lock(&vb->lock);
        bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
-       if (!vb->dirty) {
-               spin_lock(&vb->vbq->lock);
-               list_add(&vb->dirty_list, &vb->vbq->dirty);
-               spin_unlock(&vb->vbq->lock);
-       }
+
        vb->dirty += 1UL << order;
        if (vb->dirty == VMAP_BBMAP_BITS) {
                BUG_ON(vb->free || !list_empty(&vb->free_list));
@@ -1196,6 +1191,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
 }
 EXPORT_SYMBOL_GPL(__get_vm_area);
 
+struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
+                                      unsigned long start, unsigned long end,
+                                      void *caller)
+{
+       return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+                                 caller);
+}
+
 /**
  *     get_vm_area  -  reserve a contiguous kernel virtual area
  *     @size:          size of the area
@@ -1339,6 +1342,7 @@ EXPORT_SYMBOL(vfree);
 void vunmap(const void *addr)
 {
        BUG_ON(in_interrupt());
+       might_sleep();
        __vunmap(addr, 0);
 }
 EXPORT_SYMBOL(vunmap);
@@ -1358,6 +1362,8 @@ void *vmap(struct page **pages, unsigned int count,
 {
        struct vm_struct *area;
 
+       might_sleep();
+
        if (count > num_physpages)
                return NULL;