nfsd4: shut down callback queue outside state lock
[safe/jmp/linux-2.6] / mm / vmalloc.c
index 37e6929..ae00746 100644 (file)
@@ -509,6 +509,9 @@ static unsigned long lazy_max_pages(void)
 
 static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
 
+/* for per-CPU blocks */
+static void purge_fragmented_blocks_allcpus(void);
+
 /*
  * Purges all lazily-freed vmap areas.
  *
@@ -539,6 +542,9 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
        } else
                spin_lock(&purge_lock);
 
+       if (sync)
+               purge_fragmented_blocks_allcpus();
+
        rcu_read_lock();
        list_for_each_entry_rcu(va, &vmap_area_list, list) {
                if (va->flags & VM_LAZY_FREE) {
@@ -555,10 +561,8 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
        }
        rcu_read_unlock();
 
-       if (nr) {
-               BUG_ON(nr > atomic_read(&vmap_lazy_nr));
+       if (nr)
                atomic_sub(nr, &vmap_lazy_nr);
-       }
 
        if (nr || force_flush)
                flush_tlb_kernel_range(*start, *end);
@@ -669,8 +673,6 @@ static bool vmap_initialized __read_mostly = false;
 struct vmap_block_queue {
        spinlock_t lock;
        struct list_head free;
-       struct list_head dirty;
-       unsigned int nr_dirty;
 };
 
 struct vmap_block {
@@ -680,10 +682,9 @@ struct vmap_block {
        unsigned long free, dirty;
        DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
        DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
-       union {
-               struct list_head free_list;
-               struct rcu_head rcu_head;
-       };
+       struct list_head free_list;
+       struct rcu_head rcu_head;
+       struct list_head purge;
 };
 
 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
@@ -759,7 +760,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
        vbq = &get_cpu_var(vmap_block_queue);
        vb->vbq = vbq;
        spin_lock(&vbq->lock);
-       list_add(&vb->free_list, &vbq->free);
+       list_add_rcu(&vb->free_list, &vbq->free);
        spin_unlock(&vbq->lock);
        put_cpu_var(vmap_block_queue);
 
@@ -778,8 +779,6 @@ static void free_vmap_block(struct vmap_block *vb)
        struct vmap_block *tmp;
        unsigned long vb_idx;
 
-       BUG_ON(!list_empty(&vb->free_list));
-
        vb_idx = addr_to_vb_idx(vb->va->va_start);
        spin_lock(&vmap_block_tree_lock);
        tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
@@ -790,12 +789,61 @@ static void free_vmap_block(struct vmap_block *vb)
        call_rcu(&vb->rcu_head, rcu_free_vb);
 }
 
+static void purge_fragmented_blocks(int cpu)
+{
+       LIST_HEAD(purge);
+       struct vmap_block *vb;
+       struct vmap_block *n_vb;
+       struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+
+               if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
+                       continue;
+
+               spin_lock(&vb->lock);
+               if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
+                       vb->free = 0; /* prevent further allocs after releasing lock */
+                       vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
+                       bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
+                       bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
+                       spin_lock(&vbq->lock);
+                       list_del_rcu(&vb->free_list);
+                       spin_unlock(&vbq->lock);
+                       spin_unlock(&vb->lock);
+                       list_add_tail(&vb->purge, &purge);
+               } else
+                       spin_unlock(&vb->lock);
+       }
+       rcu_read_unlock();
+
+       list_for_each_entry_safe(vb, n_vb, &purge, purge) {
+               list_del(&vb->purge);
+               free_vmap_block(vb);
+       }
+}
+
+static void purge_fragmented_blocks_thiscpu(void)
+{
+       purge_fragmented_blocks(smp_processor_id());
+}
+
+static void purge_fragmented_blocks_allcpus(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               purge_fragmented_blocks(cpu);
+}
+
 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
 {
        struct vmap_block_queue *vbq;
        struct vmap_block *vb;
        unsigned long addr = 0;
        unsigned int order;
+       int purge = 0;
 
        BUG_ON(size & ~PAGE_MASK);
        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
@@ -808,24 +856,38 @@ again:
                int i;
 
                spin_lock(&vb->lock);
+               if (vb->free < 1UL << order)
+                       goto next;
+
                i = bitmap_find_free_region(vb->alloc_map,
                                                VMAP_BBMAP_BITS, order);
 
-               if (i >= 0) {
-                       addr = vb->va->va_start + (i << PAGE_SHIFT);
-                       BUG_ON(addr_to_vb_idx(addr) !=
-                                       addr_to_vb_idx(vb->va->va_start));
-                       vb->free -= 1UL << order;
-                       if (vb->free == 0) {
-                               spin_lock(&vbq->lock);
-                               list_del_init(&vb->free_list);
-                               spin_unlock(&vbq->lock);
+               if (i < 0) {
+                       if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
+                               /* fragmented and no outstanding allocations */
+                               BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
+                               purge = 1;
                        }
-                       spin_unlock(&vb->lock);
-                       break;
+                       goto next;
+               }
+               addr = vb->va->va_start + (i << PAGE_SHIFT);
+               BUG_ON(addr_to_vb_idx(addr) !=
+                               addr_to_vb_idx(vb->va->va_start));
+               vb->free -= 1UL << order;
+               if (vb->free == 0) {
+                       spin_lock(&vbq->lock);
+                       list_del_rcu(&vb->free_list);
+                       spin_unlock(&vbq->lock);
                }
                spin_unlock(&vb->lock);
+               break;
+next:
+               spin_unlock(&vb->lock);
        }
+
+       if (purge)
+               purge_fragmented_blocks_thiscpu();
+
        put_cpu_var(vmap_block_queue);
        rcu_read_unlock();
 
@@ -862,11 +924,11 @@ static void vb_free(const void *addr, unsigned long size)
        BUG_ON(!vb);
 
        spin_lock(&vb->lock);
-       bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
+       BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
 
        vb->dirty += 1UL << order;
        if (vb->dirty == VMAP_BBMAP_BITS) {
-               BUG_ON(vb->free || !list_empty(&vb->free_list));
+               BUG_ON(vb->free);
                spin_unlock(&vb->lock);
                free_vmap_block(vb);
        } else
@@ -1035,8 +1097,6 @@ void __init vmalloc_init(void)
                vbq = &per_cpu(vmap_block_queue, i);
                spin_lock_init(&vbq->lock);
                INIT_LIST_HEAD(&vbq->free);
-               INIT_LIST_HEAD(&vbq->dirty);
-               vbq->nr_dirty = 0;
        }
 
        /* Import existing vmlist entries. */