CGroup API files: make CGROUP_DEBUG default to off
[safe/jmp/linux-2.6] / mm / mmap.c
index c7ed061..677d184 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/backing-dev.h>
 #include <linux/mm.h>
 #include <linux/shm.h>
 #include <linux/mman.h>
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #include <asm/tlb.h>
+#include <asm/mmu_context.h>
+
+#ifndef arch_mmap_check
+#define arch_mmap_check(addr, len, flags)      (0)
+#endif
+
+#ifndef arch_rebalance_pgtables
+#define arch_rebalance_pgtables(addr, len)             (addr)
+#endif
 
 static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -88,7 +98,7 @@ atomic_t vm_committed_space = ATOMIC_INIT(0);
  * Note this is a helper function intended to be used by LSMs which
  * wish to use this logic.
  */
-int __vm_enough_memory(long pages, int cap_sys_admin)
+int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 {
        unsigned long free, allowed;
 
@@ -112,7 +122,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
                 * which are reclaimable, under pressure.  The dentry
                 * cache and most inode caches should fall into this
                 */
-               free += atomic_read(&slab_reclaim_pages);
+               free += global_page_state(NR_SLAB_RECLAIMABLE);
 
                /*
                 * Leave the last 3% for root
@@ -161,7 +171,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
 
        /* Don't let a single process grow too big:
           leave 3% of the size of this process for other processes */
-       allowed -= current->mm->total_vm / 32;
+       allowed -= mm->total_vm / 32;
 
        /*
         * cast `allowed' as a signed long because vm_committed_space
@@ -175,8 +185,6 @@ error:
        return -ENOMEM;
 }
 
-EXPORT_SYMBOL(__vm_enough_memory);
-
 /*
  * Requires inode->i_mapping->i_mmap_lock
  */
@@ -184,7 +192,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
 {
        if (vma->vm_flags & VM_DENYWRITE)
-               atomic_inc(&file->f_dentry->d_inode->i_writecount);
+               atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
        if (vma->vm_flags & VM_SHARED)
                mapping->i_mmap_writable--;
 
@@ -224,7 +232,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
                vma->vm_ops->close(vma);
        if (vma->vm_file)
                fput(vma->vm_file);
-       mpol_free(vma_policy(vma));
+       mpol_put(vma_policy(vma));
        kmem_cache_free(vm_area_cachep, vma);
        return next;
 }
@@ -237,7 +245,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
 
        down_write(&mm->mmap_sem);
 
-       if (brk < mm->end_code)
+       if (brk < mm->start_brk)
                goto out;
 
        /*
@@ -247,7 +255,8 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
         * not page aligned -Ram Gupta
         */
        rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
-       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+       if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
+                       (mm->end_data - mm->start_data) > rlim)
                goto out;
 
        newbrk = PAGE_ALIGN(brk);
@@ -295,6 +304,8 @@ static int browse_rb(struct rb_root *root)
                        printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
                i++;
                pn = nd;
+               prev = vma->vm_start;
+               pend = vma->vm_end;
        }
        j = 0;
        for (nd = pn; nd; nd = rb_prev(nd)) {
@@ -395,7 +406,7 @@ static inline void __vma_link_file(struct vm_area_struct *vma)
                struct address_space *mapping = file->f_mapping;
 
                if (vma->vm_flags & VM_DENYWRITE)
-                       atomic_dec(&file->f_dentry->d_inode->i_writecount);
+                       atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
                if (vma->vm_flags & VM_SHARED)
                        mapping->i_mmap_writable++;
 
@@ -615,7 +626,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                if (file)
                        fput(file);
                mm->map_count--;
-               mpol_free(vma_policy(next));
+               mpol_put(vma_policy(next));
                kmem_cache_free(vm_area_cachep, next);
                /*
                 * In mprotect's case 6 (see comments on vma_merge),
@@ -887,26 +898,12 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                        unsigned long flags, unsigned long pgoff)
 {
        struct mm_struct * mm = current->mm;
-       struct vm_area_struct * vma, * prev;
        struct inode *inode;
        unsigned int vm_flags;
-       int correct_wcount = 0;
        int error;
-       struct rb_node ** rb_link, * rb_parent;
        int accountable = 1;
-       unsigned long charged = 0, reqprot = prot;
-
-       if (file) {
-               if (is_file_hugepages(file))
-                       accountable = 0;
+       unsigned long reqprot = prot;
 
-               if (!file->f_op || !file->f_op->mmap)
-                       return -ENODEV;
-
-               if ((prot & PROT_EXEC) &&
-                   (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
-                       return -EPERM;
-       }
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
         *
@@ -914,12 +911,19 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
         *  mounted, in which case we dont add PROT_EXEC.)
         */
        if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
-               if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)))
+               if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
                        prot |= PROT_EXEC;
 
        if (!len)
                return -EINVAL;
 
+       if (!(flags & MAP_FIXED))
+               addr = round_hint_to_min(addr);
+
+       error = arch_mmap_check(addr, len, flags);
+       if (error)
+               return error;
+
        /* Careful about overflows.. */
        len = PAGE_ALIGN(len);
        if (!len || len > TASK_SIZE)
@@ -963,7 +967,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                        return -EAGAIN;
        }
 
-       inode = file ? file->f_dentry->d_inode : NULL;
+       inode = file ? file->f_path.dentry->d_inode : NULL;
 
        if (file) {
                switch (flags & MAP_TYPE) {
@@ -992,6 +996,16 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                case MAP_PRIVATE:
                        if (!(file->f_mode & FMODE_READ))
                                return -EACCES;
+                       if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
+                               if (vm_flags & VM_EXEC)
+                                       return -EPERM;
+                               vm_flags &= ~VM_MAYEXEC;
+                       }
+                       if (is_file_hugepages(file))
+                               accountable = 0;
+
+                       if (!file->f_op || !file->f_op->mmap)
+                               return -ENODEV;
                        break;
 
                default:
@@ -1013,10 +1027,60 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                }
        }
 
-       error = security_file_mmap(file, reqprot, prot, flags);
+       error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
        if (error)
                return error;
-               
+
+       return mmap_region(file, addr, len, flags, vm_flags, pgoff,
+                          accountable);
+}
+EXPORT_SYMBOL(do_mmap_pgoff);
+
+/*
+ * Some shared mappigns will want the pages marked read-only
+ * to track write events. If so, we'll downgrade vm_page_prot
+ * to the private version (using protection_map[] without the
+ * VM_SHARED bit).
+ */
+int vma_wants_writenotify(struct vm_area_struct *vma)
+{
+       unsigned int vm_flags = vma->vm_flags;
+
+       /* If it was private or non-writable, the write bit is already clear */
+       if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
+               return 0;
+
+       /* The backer wishes to know when pages are first written to? */
+       if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+               return 1;
+
+       /* The open routine did something to the protections already? */
+       if (pgprot_val(vma->vm_page_prot) !=
+           pgprot_val(vm_get_page_prot(vm_flags)))
+               return 0;
+
+       /* Specialty mapping? */
+       if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
+               return 0;
+
+       /* Can the mapping track the dirty pages? */
+       return vma->vm_file && vma->vm_file->f_mapping &&
+               mapping_cap_account_dirty(vma->vm_file->f_mapping);
+}
+
+unsigned long mmap_region(struct file *file, unsigned long addr,
+                         unsigned long len, unsigned long flags,
+                         unsigned int vm_flags, unsigned long pgoff,
+                         int accountable)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma, *prev;
+       int correct_wcount = 0;
+       int error;
+       struct rb_node **rb_link, *rb_parent;
+       unsigned long charged = 0;
+       struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
+
        /* Clear old maps */
        error = -ENOMEM;
 munmap_back:
@@ -1072,8 +1136,7 @@ munmap_back:
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_flags = vm_flags;
-       vma->vm_page_prot = protection_map[vm_flags &
-                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+       vma->vm_page_prot = vm_get_page_prot(vm_flags);
        vma->vm_pgoff = pgoff;
 
        if (file) {
@@ -1097,12 +1160,6 @@ munmap_back:
                        goto free_vma;
        }
 
-       /* Don't make the VMA automatically writable if it's shared, but the
-        * backer wishes to know when pages are first written to */
-       if (vma->vm_ops && vma->vm_ops->page_mkwrite)
-               vma->vm_page_prot =
-                       protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
-
        /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
         * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
         * that memory reservation must be checked; but that reservation
@@ -1120,34 +1177,31 @@ munmap_back:
        pgoff = vma->vm_pgoff;
        vm_flags = vma->vm_flags;
 
-       if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
+       if (vma_wants_writenotify(vma))
+               vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
+
+       if (file && vma_merge(mm, prev, addr, vma->vm_end,
                        vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
-               file = vma->vm_file;
-               vma_link(mm, vma, prev, rb_link, rb_parent);
-               if (correct_wcount)
-                       atomic_inc(&inode->i_writecount);
-       } else {
-               if (file) {
-                       if (correct_wcount)
-                               atomic_inc(&inode->i_writecount);
-                       fput(file);
-               }
-               mpol_free(vma_policy(vma));
+               mpol_put(vma_policy(vma));
                kmem_cache_free(vm_area_cachep, vma);
+               fput(file);
+       } else {
+               vma_link(mm, vma, prev, rb_link, rb_parent);
+               file = vma->vm_file;
        }
-out:   
+
+       /* Once vma denies write, undo our temporary denial count */
+       if (correct_wcount)
+               atomic_inc(&inode->i_writecount);
+out:
        mm->total_vm += len >> PAGE_SHIFT;
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
                mm->locked_vm += len >> PAGE_SHIFT;
                make_pages_present(addr, addr + len);
        }
-       if (flags & MAP_POPULATE) {
-               up_write(&mm->mmap_sem);
-               sys_remap_file_pages(addr, len, 0,
-                                       pgoff, flags & MAP_NONBLOCK);
-               down_write(&mm->mmap_sem);
-       }
+       if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
+               make_pages_present(addr, addr + len);
        return addr;
 
 unmap_and_free_vma:
@@ -1167,8 +1221,6 @@ unacct_error:
        return error;
 }
 
-EXPORT_SYMBOL(do_mmap_pgoff);
-
 /* Get an address range which is currently unmapped.
  * For shmat() with addr=0.
  *
@@ -1192,6 +1244,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
+       if (flags & MAP_FIXED)
+               return addr;
+
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
@@ -1265,6 +1320,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
+       if (flags & MAP_FIXED)
+               return addr;
+
        /* requesting a specific address */
        if (addr) {
                addr = PAGE_ALIGN(addr);
@@ -1352,40 +1410,22 @@ unsigned long
 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags)
 {
-       unsigned long ret;
-
-       if (!(flags & MAP_FIXED)) {
-               unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
-
-               get_area = current->mm->get_unmapped_area;
-               if (file && file->f_op && file->f_op->get_unmapped_area)
-                       get_area = file->f_op->get_unmapped_area;
-               addr = get_area(file, addr, len, pgoff, flags);
-               if (IS_ERR_VALUE(addr))
-                       return addr;
-       }
+       unsigned long (*get_area)(struct file *, unsigned long,
+                                 unsigned long, unsigned long, unsigned long);
+
+       get_area = current->mm->get_unmapped_area;
+       if (file && file->f_op && file->f_op->get_unmapped_area)
+               get_area = file->f_op->get_unmapped_area;
+       addr = get_area(file, addr, len, pgoff, flags);
+       if (IS_ERR_VALUE(addr))
+               return addr;
 
        if (addr > TASK_SIZE - len)
                return -ENOMEM;
        if (addr & ~PAGE_MASK)
                return -EINVAL;
-       if (file && is_file_hugepages(file))  {
-               /*
-                * Check if the given range is hugepage aligned, and
-                * can be made suitable for hugepages.
-                */
-               ret = prepare_hugepage_range(addr, len);
-       } else {
-               /*
-                * Ensure that a normal request is not falling in a
-                * reserved hugepage range.  For some archs like IA-64,
-                * there is a separate region for hugepages.
-                */
-               ret = is_hugepage_only_range(current->mm, addr, len);
-       }
-       if (ret)
-               return -EINVAL;
-       return addr;
+
+       return arch_rebalance_pgtables(addr, len);
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1472,6 +1512,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
+       unsigned long new_start;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
@@ -1491,6 +1532,12 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
                        return -ENOMEM;
        }
 
+       /* Check to ensure the stack will not grow into a hugetlb-only region */
+       new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
+                       vma->vm_end - size;
+       if (is_hugepage_only_range(vma->vm_mm, new_start, size))
+               return -EFAULT;
+
        /*
         * Overcommit..  This must be the final test, as it will
         * update security statistics.
@@ -1533,9 +1580,14 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
+        * Also guard against wrapping around to address 0.
         */
-       address += 4 + PAGE_SIZE - 1;
-       address &= PAGE_MASK;
+       if (address < PAGE_ALIGN(address+4))
+               address = PAGE_ALIGN(address+4);
+       else {
+               anon_vma_unlock(vma);
+               return -ENOMEM;
+       }
        error = 0;
 
        /* Somebody else might have raced and expanded it already */
@@ -1554,33 +1606,11 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
 
-#ifdef CONFIG_STACK_GROWSUP
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
-{
-       return expand_upwards(vma, address);
-}
-
-struct vm_area_struct *
-find_extend_vma(struct mm_struct *mm, unsigned long addr)
-{
-       struct vm_area_struct *vma, *prev;
-
-       addr &= PAGE_MASK;
-       vma = find_vma_prev(mm, addr, &prev);
-       if (vma && (vma->vm_start <= addr))
-               return vma;
-       if (!prev || expand_stack(prev, addr))
-               return NULL;
-       if (prev->vm_flags & VM_LOCKED) {
-               make_pages_present(addr, prev->vm_end);
-       }
-       return prev;
-}
-#else
 /*
  * vma is the first one with address < vma->vm_start.  Have to extend vma.
  */
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
+static inline int expand_downwards(struct vm_area_struct *vma,
+                                  unsigned long address)
 {
        int error;
 
@@ -1590,6 +1620,12 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
         */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
+
+       address &= PAGE_MASK;
+       error = security_file_mmap(NULL, 0, 0, 0, address, 1);
+       if (error)
+               return error;
+
        anon_vma_lock(vma);
 
        /*
@@ -1597,8 +1633,6 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
         */
-       address &= PAGE_MASK;
-       error = 0;
 
        /* Somebody else might have raced and expanded it already */
        if (address < vma->vm_start) {
@@ -1617,6 +1651,38 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
        return error;
 }
 
+int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
+{
+       return expand_downwards(vma, address);
+}
+
+#ifdef CONFIG_STACK_GROWSUP
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+       return expand_upwards(vma, address);
+}
+
+struct vm_area_struct *
+find_extend_vma(struct mm_struct *mm, unsigned long addr)
+{
+       struct vm_area_struct *vma, *prev;
+
+       addr &= PAGE_MASK;
+       vma = find_vma_prev(mm, addr, &prev);
+       if (vma && (vma->vm_start <= addr))
+               return vma;
+       if (!prev || expand_stack(prev, addr))
+               return NULL;
+       if (prev->vm_flags & VM_LOCKED)
+               make_pages_present(addr, prev->vm_end);
+       return prev;
+}
+#else
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+       return expand_downwards(vma, address);
+}
+
 struct vm_area_struct *
 find_extend_vma(struct mm_struct * mm, unsigned long addr)
 {
@@ -1634,9 +1700,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
-       if (vma->vm_flags & VM_LOCKED) {
+       if (vma->vm_flags & VM_LOCKED)
                make_pages_present(addr, start);
-       }
        return vma;
 }
 #endif
@@ -1717,7 +1782,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
 
 /*
  * Split a vma into two pieces at address 'addr', a new vma is allocated
- * either for the first part or the the tail.
+ * either for the first part or the tail.
  */
 int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
              unsigned long addr, int new_below)
@@ -1731,7 +1796,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        if (mm->map_count >= sysctl_max_map_count)
                return -ENOMEM;
 
-       new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -1745,7 +1810,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
                new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
        }
 
-       pol = mpol_copy(vma_policy(vma));
+       pol = mpol_dup(vma_policy(vma));
        if (IS_ERR(pol)) {
                kmem_cache_free(vm_area_cachep, new);
                return PTR_ERR(pol);
@@ -1866,6 +1931,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        unsigned long flags;
        struct rb_node ** rb_link, * rb_parent;
        pgoff_t pgoff = addr >> PAGE_SHIFT;
+       int error;
 
        len = PAGE_ALIGN(len);
        if (!len)
@@ -1874,6 +1940,19 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if ((addr + len) > TASK_SIZE || (addr + len) < addr)
                return -EINVAL;
 
+       if (is_hugepage_only_range(mm, addr, len))
+               return -EINVAL;
+
+       error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
+       if (error)
+               return error;
+
+       flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
+       error = arch_mmap_check(addr, len, flags);
+       if (error)
+               return error;
+
        /*
         * mlock MCL_FUTURE?
         */
@@ -1914,8 +1993,6 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (security_vm_enough_memory(len >> PAGE_SHIFT))
                return -ENOMEM;
 
-       flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
-
        /* Can we just expand an old private anonymous mapping? */
        if (vma_merge(mm, prev, addr, addr + len, flags,
                                        NULL, NULL, pgoff, NULL))
@@ -1935,8 +2012,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        vma->vm_end = addr + len;
        vma->vm_pgoff = pgoff;
        vma->vm_flags = flags;
-       vma->vm_page_prot = protection_map[flags &
-                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+       vma->vm_page_prot = vm_get_page_prot(flags);
        vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
        mm->total_vm += len >> PAGE_SHIFT;
@@ -1957,6 +2033,9 @@ void exit_mmap(struct mm_struct *mm)
        unsigned long nr_accounted = 0;
        unsigned long end;
 
+       /* mm's last user has gone, and its about to be pulled down */
+       arch_exit_mmap(mm);
+
        lru_add_drain();
        flush_cache_mm(mm);
        tlb = tlb_gather_mmu(mm, 1);
@@ -2006,7 +2085,7 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
        if (__vma && __vma->vm_start < vma->vm_end)
                return -ENOMEM;
        if ((vma->vm_flags & VM_ACCOUNT) &&
-            security_vm_enough_memory(vma_pages(vma)))
+            security_vm_enough_memory_mm(mm, vma_pages(vma)))
                return -ENOMEM;
        vma_link(mm, vma, prev, rb_link, rb_parent);
        return 0;
@@ -2044,10 +2123,10 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                    vma_start < new_vma->vm_end)
                        *vmap = new_vma;
        } else {
-               new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+               new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
-                       pol = mpol_copy(vma_policy(vma));
+                       pol = mpol_dup(vma_policy(vma));
                        if (IS_ERR(pol)) {
                                kmem_cache_free(vm_area_cachep, new_vma);
                                return NULL;
@@ -2081,3 +2160,82 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
                return 0;
        return 1;
 }
+
+
+static int special_mapping_fault(struct vm_area_struct *vma,
+                               struct vm_fault *vmf)
+{
+       pgoff_t pgoff;
+       struct page **pages;
+
+       /*
+        * special mappings have no vm_file, and in that case, the mm
+        * uses vm_pgoff internally. So we have to subtract it from here.
+        * We are allowed to do this because we are the mm; do not copy
+        * this code into drivers!
+        */
+       pgoff = vmf->pgoff - vma->vm_pgoff;
+
+       for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
+               pgoff--;
+
+       if (*pages) {
+               struct page *page = *pages;
+               get_page(page);
+               vmf->page = page;
+               return 0;
+       }
+
+       return VM_FAULT_SIGBUS;
+}
+
+/*
+ * Having a close hook prevents vma merging regardless of flags.
+ */
+static void special_mapping_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct special_mapping_vmops = {
+       .close = special_mapping_close,
+       .fault = special_mapping_fault,
+};
+
+/*
+ * Called with mm->mmap_sem held for writing.
+ * Insert a new vma covering the given region, with the given flags.
+ * Its pages are supplied by the given array of struct page *.
+ * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
+ * The region past the last page supplied will always produce SIGBUS.
+ * The array pointer and the pages it points to are assumed to stay alive
+ * for as long as this mapping might exist.
+ */
+int install_special_mapping(struct mm_struct *mm,
+                           unsigned long addr, unsigned long len,
+                           unsigned long vm_flags, struct page **pages)
+{
+       struct vm_area_struct *vma;
+
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       if (unlikely(vma == NULL))
+               return -ENOMEM;
+
+       vma->vm_mm = mm;
+       vma->vm_start = addr;
+       vma->vm_end = addr + len;
+
+       vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+       vma->vm_ops = &special_mapping_vmops;
+       vma->vm_private_data = pages;
+
+       if (unlikely(insert_vm_struct(mm, vma))) {
+               kmem_cache_free(vm_area_cachep, vma);
+               return -ENOMEM;
+       }
+
+       mm->total_vm += len >> PAGE_SHIFT;
+
+       return 0;
+}