s390: struct device - replace bus_id with dev_name(), dev_set_name()
[safe/jmp/linux-2.6] / mm / vmalloc.c
index 2644afb..520a759 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/rbtree.h>
 #include <linux/radix-tree.h>
 #include <linux/rcupdate.h>
+#include <linux/bootmem.h>
 
 #include <asm/atomic.h>
 #include <asm/uaccess.h>
@@ -322,6 +323,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
        unsigned long addr;
        int purged = 0;
 
+       BUG_ON(!size);
        BUG_ON(size & ~PAGE_MASK);
 
        va = kmalloc_node(sizeof(struct vmap_area),
@@ -333,6 +335,9 @@ retry:
        addr = ALIGN(vstart, align);
 
        spin_lock(&vmap_area_lock);
+       if (addr + size - 1 < addr)
+               goto overflow;
+
        /* XXX: could have a last_hole cache */
        n = vmap_area_root.rb_node;
        if (n) {
@@ -364,6 +369,8 @@ retry:
 
                while (addr + size > first->va_start && addr + size <= vend) {
                        addr = ALIGN(first->va_end + PAGE_SIZE, align);
+                       if (addr + size - 1 < addr)
+                               goto overflow;
 
                        n = rb_next(&first->rb_node);
                        if (n)
@@ -374,6 +381,7 @@ retry:
        }
 found:
        if (addr + size > vend) {
+overflow:
                spin_unlock(&vmap_area_lock);
                if (!purged) {
                        purge_vmap_area_lazy();
@@ -433,6 +441,27 @@ static void unmap_vmap_area(struct vmap_area *va)
        vunmap_page_range(va->va_start, va->va_end);
 }
 
+static void vmap_debug_free_range(unsigned long start, unsigned long end)
+{
+       /*
+        * Unmap page tables and force a TLB flush immediately if
+        * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
+        * bugs similarly to those in linear kernel virtual address
+        * space after a page has been freed.
+        *
+        * All the lazy freeing logic is still retained, in order to
+        * minimise intrusiveness of this debugging feature.
+        *
+        * This is going to be *slow* (linear kernel virtual address
+        * debugging doesn't do a broadcast TLB flush so it is a lot
+        * faster).
+        */
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       vunmap_page_range(start, end);
+       flush_tlb_kernel_range(start, end);
+#endif
+}
+
 /*
  * lazy_max_pages is the maximum amount of virtual address space we gather up
  * before attempting to purge with a TLB flush.
@@ -476,6 +505,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
        static DEFINE_SPINLOCK(purge_lock);
        LIST_HEAD(valist);
        struct vmap_area *va;
+       struct vmap_area *n_va;
        int nr = 0;
 
        /*
@@ -515,7 +545,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
 
        if (nr) {
                spin_lock(&vmap_area_lock);
-               list_for_each_entry(va, &valist, purge_list)
+               list_for_each_entry_safe(va, n_va, &valist, purge_list)
                        __free_vmap_area(va);
                spin_unlock(&vmap_area_lock);
        }
@@ -913,6 +943,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
        BUG_ON(addr & (PAGE_SIZE-1));
 
        debug_check_no_locks_freed(mem, size);
+       vmap_debug_free_range(addr, addr+size);
 
        if (likely(count <= VMAP_MAX_ALLOC))
                vb_free(mem, size);
@@ -961,6 +992,8 @@ EXPORT_SYMBOL(vm_map_ram);
 
 void __init vmalloc_init(void)
 {
+       struct vmap_area *va;
+       struct vm_struct *tmp;
        int i;
 
        for_each_possible_cpu(i) {
@@ -973,12 +1006,22 @@ void __init vmalloc_init(void)
                vbq->nr_dirty = 0;
        }
 
+       /* Import existing vmlist entries. */
+       for (tmp = vmlist; tmp; tmp = tmp->next) {
+               va = alloc_bootmem(sizeof(struct vmap_area));
+               va->flags = tmp->flags | VM_VM_AREA;
+               va->va_start = (unsigned long)tmp->addr;
+               va->va_end = va->va_start + tmp->size;
+               __insert_vmap_area(va);
+       }
        vmap_initialized = true;
 }
 
 void unmap_kernel_range(unsigned long addr, unsigned long size)
 {
        unsigned long end = addr + size;
+
+       flush_cache_vunmap(addr, end);
        vunmap_page_range(addr, end);
        flush_tlb_kernel_range(addr, end);
 }
@@ -1073,6 +1116,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
 }
 EXPORT_SYMBOL_GPL(__get_vm_area);
 
+struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
+                                      unsigned long start, unsigned long end,
+                                      void *caller)
+{
+       return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+                                 caller);
+}
+
 /**
  *     get_vm_area  -  reserve a contiguous kernel virtual area
  *     @size:          size of the area
@@ -1129,6 +1180,8 @@ struct vm_struct *remove_vm_area(const void *addr)
        if (va && va->flags & VM_VM_AREA) {
                struct vm_struct *vm = va->private;
                struct vm_struct *tmp, **p;
+
+               vmap_debug_free_range(va->va_start, va->va_end);
                free_unmap_vmap_area(va);
                vm->size -= PAGE_SIZE;
 
@@ -1376,7 +1429,8 @@ void *vmalloc_user(unsigned long size)
        struct vm_struct *area;
        void *ret;
 
-       ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+       ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+                            PAGE_KERNEL, -1, __builtin_return_address(0));
        if (ret) {
                area = find_vm_area(ret);
                area->flags |= VM_USERMAP;
@@ -1421,7 +1475,8 @@ EXPORT_SYMBOL(vmalloc_node);
 
 void *vmalloc_exec(unsigned long size)
 {
-       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
+       return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
+                             -1, __builtin_return_address(0));
 }
 
 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
@@ -1441,7 +1496,8 @@ void *vmalloc_exec(unsigned long size)
  */
 void *vmalloc_32(unsigned long size)
 {
-       return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
+       return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,
+                             -1, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(vmalloc_32);
 
@@ -1457,7 +1513,8 @@ void *vmalloc_32_user(unsigned long size)
        struct vm_struct *area;
        void *ret;
 
-       ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
+       ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
+                            -1, __builtin_return_address(0));
        if (ret) {
                area = find_vm_area(ret);
                area->flags |= VM_USERMAP;