xen: compilation fix of drivers/xen/events.c on IA64
[safe/jmp/linux-2.6] / mm / mmap.c
index 15678aa..74f4d15 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
 #include <linux/mount.h>
 #include <linux/mempolicy.h>
 #include <linux/rmap.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #include <asm/tlb.h>
 #include <asm/mmu_context.h>
 
+#include "internal.h"
+
 #ifndef arch_mmap_check
 #define arch_mmap_check(addr, len, flags)      (0)
 #endif
 
+#ifndef arch_rebalance_pgtables
+#define arch_rebalance_pgtables(addr, len)             (addr)
+#endif
+
 static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
                unsigned long start, unsigned long end);
@@ -68,15 +75,16 @@ pgprot_t protection_map[16] = {
 
 pgprot_t vm_get_page_prot(unsigned long vm_flags)
 {
-       return protection_map[vm_flags &
-                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+       return __pgprot(pgprot_val(protection_map[vm_flags &
+                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
+                       pgprot_val(arch_vm_get_page_prot(vm_flags)));
 }
 EXPORT_SYMBOL(vm_get_page_prot);
 
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50;      /* default is 50% */
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
-atomic_t vm_committed_space = ATOMIC_INIT(0);
+atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
 
 /*
  * Check that a process has enough memory to allocate a new virtual
@@ -173,7 +181,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
         * cast `allowed' as a signed long because vm_committed_space
         * sometimes has a negative value
         */
-       if (atomic_read(&vm_committed_space) < (long)allowed)
+       if (atomic_long_read(&vm_committed_space) < (long)allowed)
                return 0;
 error:
        vm_unacct_memory(pages);
@@ -226,9 +234,12 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        might_sleep();
        if (vma->vm_ops && vma->vm_ops->close)
                vma->vm_ops->close(vma);
-       if (vma->vm_file)
+       if (vma->vm_file) {
                fput(vma->vm_file);
-       mpol_free(vma_policy(vma));
+               if (vma->vm_flags & VM_EXECUTABLE)
+                       removed_exe_file_vma(vma->vm_mm);
+       }
+       mpol_put(vma_policy(vma));
        kmem_cache_free(vm_area_cachep, vma);
        return next;
 }
@@ -238,10 +249,16 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
        unsigned long rlim, retval;
        unsigned long newbrk, oldbrk;
        struct mm_struct *mm = current->mm;
+       unsigned long min_brk;
 
        down_write(&mm->mmap_sem);
 
-       if (brk < mm->end_code)
+#ifdef CONFIG_COMPAT_BRK
+       min_brk = mm->end_code;
+#else
+       min_brk = mm->start_brk;
+#endif
+       if (brk < min_brk)
                goto out;
 
        /*
@@ -251,7 +268,8 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
         * not page aligned -Ram Gupta
         */
        rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
-       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+       if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
+                       (mm->end_data - mm->start_data) > rlim)
                goto out;
 
        newbrk = PAGE_ALIGN(brk);
@@ -352,7 +370,7 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr,
                if (vma_tmp->vm_end > addr) {
                        vma = vma_tmp;
                        if (vma_tmp->vm_start <= addr)
-                               return vma;
+                               break;
                        __rb_link = &__rb_parent->rb_left;
                } else {
                        rb_prev = __rb_parent;
@@ -392,7 +410,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
        rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 }
 
-static inline void __vma_link_file(struct vm_area_struct *vma)
+static void __vma_link_file(struct vm_area_struct *vma)
 {
        struct file * file;
 
@@ -618,10 +636,13 @@ again:                    remove_next = 1 + (end > next->vm_end);
                spin_unlock(&mapping->i_mmap_lock);
 
        if (remove_next) {
-               if (file)
+               if (file) {
                        fput(file);
+                       if (next->vm_flags & VM_EXECUTABLE)
+                               removed_exe_file_vma(mm);
+               }
                mm->map_count--;
-               mpol_free(vma_policy(next));
+               mpol_put(vma_policy(next));
                kmem_cache_free(vm_area_cachep, next);
                /*
                 * In mprotect's case 6 (see comments on vma_merge),
@@ -641,8 +662,6 @@ again:                      remove_next = 1 + (end > next->vm_end);
  * If the vma has a ->close operation then the driver probably needs to release
  * per-vma resources, so we don't attempt to merge those.
  */
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
-
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                        struct file *file, unsigned long vm_flags)
 {
@@ -951,6 +970,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                        return -EPERM;
                vm_flags |= VM_LOCKED;
        }
+
        /* mlock MCL_FUTURE? */
        if (vm_flags & VM_LOCKED) {
                unsigned long locked, lock_limit;
@@ -1009,6 +1029,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        } else {
                switch (flags & MAP_TYPE) {
                case MAP_SHARED:
+                       /*
+                        * Ignore pgoff.
+                        */
+                       pgoff = 0;
                        vm_flags |= VM_SHARED | VM_MAYSHARE;
                        break;
                case MAP_PRIVATE:
@@ -1063,7 +1087,6 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
                mapping_cap_account_dirty(vma->vm_file->f_mapping);
 }
 
-
 unsigned long mmap_region(struct file *file, unsigned long addr,
                          unsigned long len, unsigned long flags,
                          unsigned int vm_flags, unsigned long pgoff,
@@ -1091,6 +1114,9 @@ munmap_back:
        if (!may_expand_vm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
+       if (flags & MAP_NORESERVE)
+               vm_flags |= VM_NORESERVE;
+
        if (accountable && (!(flags & MAP_NORESERVE) ||
                            sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
                if (vm_flags & VM_SHARED) {
@@ -1112,10 +1138,12 @@ munmap_back:
         * The VM_SHARED test is necessary because shmem_zero_setup
         * will create the file object for a shared anonymous map below.
         */
-       if (!file && !(vm_flags & VM_SHARED) &&
-           vma_merge(mm, prev, addr, addr + len, vm_flags,
-                                       NULL, NULL, pgoff, NULL))
-               goto out;
+       if (!file && !(vm_flags & VM_SHARED)) {
+               vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
+                                       NULL, NULL, pgoff, NULL);
+               if (vma)
+                       goto out;
+       }
 
        /*
         * Determine the object being mapped and call the appropriate
@@ -1150,6 +1178,8 @@ munmap_back:
                error = file->f_op->mmap(file, vma);
                if (error)
                        goto unmap_and_free_vma;
+               if (vm_flags & VM_EXECUTABLE)
+                       added_exe_file_vma(mm);
        } else if (vm_flags & VM_SHARED) {
                error = shmem_zero_setup(vma);
                if (error)
@@ -1176,29 +1206,33 @@ munmap_back:
        if (vma_wants_writenotify(vma))
                vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
 
-       if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
+       if (file && vma_merge(mm, prev, addr, vma->vm_end,
                        vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
-               file = vma->vm_file;
-               vma_link(mm, vma, prev, rb_link, rb_parent);
-               if (correct_wcount)
-                       atomic_inc(&inode->i_writecount);
-       } else {
-               if (file) {
-                       if (correct_wcount)
-                               atomic_inc(&inode->i_writecount);
-                       fput(file);
-               }
-               mpol_free(vma_policy(vma));
+               mpol_put(vma_policy(vma));
                kmem_cache_free(vm_area_cachep, vma);
+               fput(file);
+               if (vm_flags & VM_EXECUTABLE)
+                       removed_exe_file_vma(mm);
+       } else {
+               vma_link(mm, vma, prev, rb_link, rb_parent);
+               file = vma->vm_file;
        }
-out:   
+
+       /* Once vma denies write, undo our temporary denial count */
+       if (correct_wcount)
+               atomic_inc(&inode->i_writecount);
+out:
        mm->total_vm += len >> PAGE_SHIFT;
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
-               make_pages_present(addr, addr + len);
-       }
-       if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
+               /*
+                * makes pages present; downgrades, drops, reacquires mmap_sem
+                */
+               long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);
+               if (nr_pages < 0)
+                       return nr_pages;        /* vma gone! */
+               mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
+       } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
                make_pages_present(addr, addr + len);
        return addr;
 
@@ -1423,7 +1457,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (addr & ~PAGE_MASK)
                return -EINVAL;
 
-       return addr;
+       return arch_rebalance_pgtables(addr, len);
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1557,7 +1591,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
 #ifndef CONFIG_IA64
-static inline
+static
 #endif
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
@@ -1607,7 +1641,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 /*
  * vma is the first one with address < vma->vm_start.  Have to extend vma.
  */
-static inline int expand_downwards(struct vm_area_struct *vma,
+static int expand_downwards(struct vm_area_struct *vma,
                                   unsigned long address)
 {
        int error;
@@ -1620,7 +1654,7 @@ static inline int expand_downwards(struct vm_area_struct *vma,
                return -ENOMEM;
 
        address &= PAGE_MASK;
-       error = security_file_mmap(0, 0, 0, 0, address, 1);
+       error = security_file_mmap(NULL, 0, 0, 0, address, 1);
        if (error)
                return error;
 
@@ -1669,10 +1703,12 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
        vma = find_vma_prev(mm, addr, &prev);
        if (vma && (vma->vm_start <= addr))
                return vma;
-       if (!prev || expand_stack(prev, addr))
+       if (expand_stack(prev, addr))
                return NULL;
-       if (prev->vm_flags & VM_LOCKED)
-               make_pages_present(addr, prev->vm_end);
+       if (prev->vm_flags & VM_LOCKED) {
+               if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)
+                       return NULL;    /* vma gone! */
+       }
        return prev;
 }
 #else
@@ -1698,8 +1734,10 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
-       if (vma->vm_flags & VM_LOCKED)
-               make_pages_present(addr, start);
+       if (vma->vm_flags & VM_LOCKED) {
+               if (mlock_vma_pages_range(vma, addr, start) < 0)
+                       return NULL;    /* vma gone! */
+       }
        return vma;
 }
 #endif
@@ -1718,8 +1756,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
                long nrpages = vma_pages(vma);
 
                mm->total_vm -= nrpages;
-               if (vma->vm_flags & VM_LOCKED)
-                       mm->locked_vm -= nrpages;
                vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
                vma = remove_vma(vma);
        } while (vma);
@@ -1744,7 +1780,7 @@ static void unmap_region(struct mm_struct *mm,
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
-       free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
+       free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
                                 next? next->vm_start: 0);
        tlb_finish_mmu(tlb, start, end);
 }
@@ -1788,7 +1824,8 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        struct mempolicy *pol;
        struct vm_area_struct *new;
 
-       if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK))
+       if (is_vm_hugetlb_page(vma) && (addr &
+                                       ~(huge_page_mask(hstate_vma(vma)))))
                return -EINVAL;
 
        if (mm->map_count >= sysctl_max_map_count)
@@ -1808,15 +1845,18 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
                new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
        }
 
-       pol = mpol_copy(vma_policy(vma));
+       pol = mpol_dup(vma_policy(vma));
        if (IS_ERR(pol)) {
                kmem_cache_free(vm_area_cachep, new);
                return PTR_ERR(pol);
        }
        vma_set_policy(new, pol);
 
-       if (new->vm_file)
+       if (new->vm_file) {
                get_file(new->vm_file);
+               if (vma->vm_flags & VM_EXECUTABLE)
+                       added_exe_file_vma(mm);
+       }
 
        if (new->vm_ops && new->vm_ops->open)
                new->vm_ops->open(new);
@@ -1881,6 +1921,20 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        vma = prev? prev->vm_next: mm->mmap;
 
        /*
+        * unlock any mlock()ed ranges before detaching vmas
+        */
+       if (mm->locked_vm) {
+               struct vm_area_struct *tmp = vma;
+               while (tmp && tmp->vm_start < end) {
+                       if (tmp->vm_flags & VM_LOCKED) {
+                               mm->locked_vm -= vma_pages(tmp);
+                               munlock_vma_pages_all(tmp);
+                       }
+                       tmp = tmp->vm_next;
+               }
+       }
+
+       /*
         * Remove the vma's, and unmap the actual pages
         */
        detach_vmas_to_be_unmapped(mm, vma, prev, end);
@@ -1941,7 +1995,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (is_hugepage_only_range(mm, addr, len))
                return -EINVAL;
 
-       error = security_file_mmap(0, 0, 0, 0, addr, 1);
+       error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
        if (error)
                return error;
 
@@ -1992,8 +2046,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
                return -ENOMEM;
 
        /* Can we just expand an old private anonymous mapping? */
-       if (vma_merge(mm, prev, addr, addr + len, flags,
-                                       NULL, NULL, pgoff, NULL))
+       vma = vma_merge(mm, prev, addr, addr + len, flags,
+                                       NULL, NULL, pgoff, NULL);
+       if (vma)
                goto out;
 
        /*
@@ -2015,8 +2070,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
 out:
        mm->total_vm += len >> PAGE_SHIFT;
        if (flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
-               make_pages_present(addr, addr + len);
+               if (!mlock_vma_pages_range(vma, addr, addr + len))
+                       mm->locked_vm += (len >> PAGE_SHIFT);
        }
        return addr;
 }
@@ -2027,13 +2082,23 @@ EXPORT_SYMBOL(do_brk);
 void exit_mmap(struct mm_struct *mm)
 {
        struct mmu_gather *tlb;
-       struct vm_area_struct *vma = mm->mmap;
+       struct vm_area_struct *vma;
        unsigned long nr_accounted = 0;
        unsigned long end;
 
        /* mm's last user has gone, and its about to be pulled down */
        arch_exit_mmap(mm);
-
+       mmu_notifier_release(mm);
+
+       if (mm->locked_vm) {
+               vma = mm->mmap;
+               while (vma) {
+                       if (vma->vm_flags & VM_LOCKED)
+                               munlock_vma_pages_all(vma);
+                       vma = vma->vm_next;
+               }
+       }
+       vma = mm->mmap;
        lru_add_drain();
        flush_cache_mm(mm);
        tlb = tlb_gather_mmu(mm, 1);
@@ -2041,7 +2106,7 @@ void exit_mmap(struct mm_struct *mm)
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
-       free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
+       free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
        tlb_finish_mmu(tlb, 0, end);
 
        /*
@@ -2124,7 +2189,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
-                       pol = mpol_copy(vma_policy(vma));
+                       pol = mpol_dup(vma_policy(vma));
                        if (IS_ERR(pol)) {
                                kmem_cache_free(vm_area_cachep, new_vma);
                                return NULL;
@@ -2133,8 +2198,11 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                        new_vma->vm_start = addr;
                        new_vma->vm_end = addr + len;
                        new_vma->vm_pgoff = pgoff;
-                       if (new_vma->vm_file)
+                       if (new_vma->vm_file) {
                                get_file(new_vma->vm_file);
+                               if (vma->vm_flags & VM_EXECUTABLE)
+                                       added_exe_file_vma(mm);
+                       }
                        if (new_vma->vm_ops && new_vma->vm_ops->open)
                                new_vma->vm_ops->open(new_vma);
                        vma_link(mm, new_vma, prev, rb_link, rb_parent);
@@ -2160,24 +2228,31 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
 }
 
 
-static struct page *special_mapping_nopage(struct vm_area_struct *vma,
-                                          unsigned long address, int *type)
+static int special_mapping_fault(struct vm_area_struct *vma,
+                               struct vm_fault *vmf)
 {
+       pgoff_t pgoff;
        struct page **pages;
 
-       BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+       /*
+        * special mappings have no vm_file, and in that case, the mm
+        * uses vm_pgoff internally. So we have to subtract it from here.
+        * We are allowed to do this because we are the mm; do not copy
+        * this code into drivers!
+        */
+       pgoff = vmf->pgoff - vma->vm_pgoff;
 
-       address -= vma->vm_start;
-       for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
-               address -= PAGE_SIZE;
+       for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
+               pgoff--;
 
        if (*pages) {
                struct page *page = *pages;
                get_page(page);
-               return page;
+               vmf->page = page;
+               return 0;
        }
 
-       return NOPAGE_SIGBUS;
+       return VM_FAULT_SIGBUS;
 }
 
 /*
@@ -2189,7 +2264,7 @@ static void special_mapping_close(struct vm_area_struct *vma)
 
 static struct vm_operations_struct special_mapping_vmops = {
        .close = special_mapping_close,
-       .nopage = special_mapping_nopage,
+       .fault = special_mapping_fault,
 };
 
 /*
@@ -2215,7 +2290,7 @@ int install_special_mapping(struct mm_struct *mm,
        vma->vm_start = addr;
        vma->vm_end = addr + len;
 
-       vma->vm_flags = vm_flags | mm->def_flags;
+       vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
        vma->vm_ops = &special_mapping_vmops;
@@ -2230,3 +2305,167 @@ int install_special_mapping(struct mm_struct *mm,
 
        return 0;
 }
+
+static DEFINE_MUTEX(mm_all_locks_mutex);
+
+static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
+{
+       if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
+               /*
+                * The LSB of head.next can't change from under us
+                * because we hold the mm_all_locks_mutex.
+                */
+               spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
+               /*
+                * We can safely modify head.next after taking the
+                * anon_vma->lock. If some other vma in this mm shares
+                * the same anon_vma we won't take it again.
+                *
+                * No need of atomic instructions here, head.next
+                * can't change from under us thanks to the
+                * anon_vma->lock.
+                */
+               if (__test_and_set_bit(0, (unsigned long *)
+                                      &anon_vma->head.next))
+                       BUG();
+       }
+}
+
+static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
+{
+       if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
+               /*
+                * AS_MM_ALL_LOCKS can't change from under us because
+                * we hold the mm_all_locks_mutex.
+                *
+                * Operations on ->flags have to be atomic because
+                * even if AS_MM_ALL_LOCKS is stable thanks to the
+                * mm_all_locks_mutex, there may be other cpus
+                * changing other bitflags in parallel to us.
+                */
+               if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
+                       BUG();
+               spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
+       }
+}
+
+/*
+ * This operation locks against the VM for all pte/vma/mm related
+ * operations that could ever happen on a certain mm. This includes
+ * vmtruncate, try_to_unmap, and all page faults.
+ *
+ * The caller must take the mmap_sem in write mode before calling
+ * mm_take_all_locks(). The caller isn't allowed to release the
+ * mmap_sem until mm_drop_all_locks() returns.
+ *
+ * mmap_sem in write mode is required in order to block all operations
+ * that could modify pagetables and free pages without need of
+ * altering the vma layout (for example populate_range() with
+ * nonlinear vmas). It's also needed in write mode to avoid new
+ * anon_vmas to be associated with existing vmas.
+ *
+ * A single task can't take more than one mm_take_all_locks() in a row
+ * or it would deadlock.
+ *
+ * The LSB in anon_vma->head.next and the AS_MM_ALL_LOCKS bitflag in
+ * mapping->flags avoid to take the same lock twice, if more than one
+ * vma in this mm is backed by the same anon_vma or address_space.
+ *
+ * We can take all the locks in random order because the VM code
+ * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never
+ * takes more than one of them in a row. Secondly we're protected
+ * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
+ *
+ * mm_take_all_locks() and mm_drop_all_locks are expensive operations
+ * that may have to take thousand of locks.
+ *
+ * mm_take_all_locks() can fail if it's interrupted by signals.
+ */
+int mm_take_all_locks(struct mm_struct *mm)
+{
+       struct vm_area_struct *vma;
+       int ret = -EINTR;
+
+       BUG_ON(down_read_trylock(&mm->mmap_sem));
+
+       mutex_lock(&mm_all_locks_mutex);
+
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               if (signal_pending(current))
+                       goto out_unlock;
+               if (vma->vm_file && vma->vm_file->f_mapping)
+                       vm_lock_mapping(mm, vma->vm_file->f_mapping);
+       }
+
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               if (signal_pending(current))
+                       goto out_unlock;
+               if (vma->anon_vma)
+                       vm_lock_anon_vma(mm, vma->anon_vma);
+       }
+
+       ret = 0;
+
+out_unlock:
+       if (ret)
+               mm_drop_all_locks(mm);
+
+       return ret;
+}
+
+static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
+{
+       if (test_bit(0, (unsigned long *) &anon_vma->head.next)) {
+               /*
+                * The LSB of head.next can't change to 0 from under
+                * us because we hold the mm_all_locks_mutex.
+                *
+                * We must however clear the bitflag before unlocking
+                * the vma so the users using the anon_vma->head will
+                * never see our bitflag.
+                *
+                * No need of atomic instructions here, head.next
+                * can't change from under us until we release the
+                * anon_vma->lock.
+                */
+               if (!__test_and_clear_bit(0, (unsigned long *)
+                                         &anon_vma->head.next))
+                       BUG();
+               spin_unlock(&anon_vma->lock);
+       }
+}
+
+static void vm_unlock_mapping(struct address_space *mapping)
+{
+       if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
+               /*
+                * AS_MM_ALL_LOCKS can't change to 0 from under us
+                * because we hold the mm_all_locks_mutex.
+                */
+               spin_unlock(&mapping->i_mmap_lock);
+               if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
+                                       &mapping->flags))
+                       BUG();
+       }
+}
+
+/*
+ * The mmap_sem cannot be released by the caller until
+ * mm_drop_all_locks() returns.
+ */
+void mm_drop_all_locks(struct mm_struct *mm)
+{
+       struct vm_area_struct *vma;
+
+       BUG_ON(down_read_trylock(&mm->mmap_sem));
+       BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
+
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               if (vma->anon_vma)
+                       vm_unlock_anon_vma(vma->anon_vma);
+               if (vma->vm_file && vma->vm_file->f_mapping)
+                       vm_unlock_mapping(vma->vm_file->f_mapping);
+       }
+
+       mutex_unlock(&mm_all_locks_mutex);
+}