mm: fix various kernel-doc comments
[safe/jmp/linux-2.6] / mm / mmap.c
index cc3a208..a32d28c 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/backing-dev.h>
 #include <linux/mm.h>
 #include <linux/shm.h>
 #include <linux/mman.h>
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #include <asm/tlb.h>
+#include <asm/mmu_context.h>
 
 #ifndef arch_mmap_check
 #define arch_mmap_check(addr, len, flags)      (0)
 #endif
 
+#ifndef arch_rebalance_pgtables
+#define arch_rebalance_pgtables(addr, len)             (addr)
+#endif
+
 static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
                unsigned long start, unsigned long end);
@@ -92,7 +98,7 @@ atomic_t vm_committed_space = ATOMIC_INIT(0);
  * Note this is a helper function intended to be used by LSMs which
  * wish to use this logic.
  */
-int __vm_enough_memory(long pages, int cap_sys_admin)
+int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 {
        unsigned long free, allowed;
 
@@ -165,7 +171,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
 
        /* Don't let a single process grow too big:
           leave 3% of the size of this process for other processes */
-       allowed -= current->mm->total_vm / 32;
+       allowed -= mm->total_vm / 32;
 
        /*
         * cast `allowed' as a signed long because vm_committed_space
@@ -179,8 +185,6 @@ error:
        return -ENOMEM;
 }
 
-EXPORT_SYMBOL(__vm_enough_memory);
-
 /*
  * Requires inode->i_mapping->i_mmap_lock
  */
@@ -241,7 +245,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
 
        down_write(&mm->mmap_sem);
 
-       if (brk < mm->end_code)
+       if (brk < mm->start_brk)
                goto out;
 
        /*
@@ -251,7 +255,8 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
         * not page aligned -Ram Gupta
         */
        rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
-       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+       if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
+                       (mm->end_data - mm->start_data) > rlim)
                goto out;
 
        newbrk = PAGE_ALIGN(brk);
@@ -299,6 +304,8 @@ static int browse_rb(struct rb_root *root)
                        printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
                i++;
                pn = nd;
+               prev = vma->vm_start;
+               pend = vma->vm_end;
        }
        j = 0;
        for (nd = pn; nd; nd = rb_prev(nd)) {
@@ -891,14 +898,11 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                        unsigned long flags, unsigned long pgoff)
 {
        struct mm_struct * mm = current->mm;
-       struct vm_area_struct * vma, * prev;
        struct inode *inode;
        unsigned int vm_flags;
-       int correct_wcount = 0;
        int error;
-       struct rb_node ** rb_link, * rb_parent;
        int accountable = 1;
-       unsigned long charged = 0, reqprot = prot;
+       unsigned long reqprot = prot;
 
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
@@ -913,6 +917,9 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        if (!len)
                return -EINVAL;
 
+       if (!(flags & MAP_FIXED))
+               addr = round_hint_to_min(addr);
+
        error = arch_mmap_check(addr, len, flags);
        if (error)
                return error;
@@ -1020,10 +1027,61 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                }
        }
 
-       error = security_file_mmap(file, reqprot, prot, flags);
+       error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
        if (error)
                return error;
-               
+
+       return mmap_region(file, addr, len, flags, vm_flags, pgoff,
+                          accountable);
+}
+EXPORT_SYMBOL(do_mmap_pgoff);
+
+/*
+ * Some shared mappigns will want the pages marked read-only
+ * to track write events. If so, we'll downgrade vm_page_prot
+ * to the private version (using protection_map[] without the
+ * VM_SHARED bit).
+ */
+int vma_wants_writenotify(struct vm_area_struct *vma)
+{
+       unsigned int vm_flags = vma->vm_flags;
+
+       /* If it was private or non-writable, the write bit is already clear */
+       if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
+               return 0;
+
+       /* The backer wishes to know when pages are first written to? */
+       if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+               return 1;
+
+       /* The open routine did something to the protections already? */
+       if (pgprot_val(vma->vm_page_prot) !=
+           pgprot_val(vm_get_page_prot(vm_flags)))
+               return 0;
+
+       /* Specialty mapping? */
+       if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
+               return 0;
+
+       /* Can the mapping track the dirty pages? */
+       return vma->vm_file && vma->vm_file->f_mapping &&
+               mapping_cap_account_dirty(vma->vm_file->f_mapping);
+}
+
+
+unsigned long mmap_region(struct file *file, unsigned long addr,
+                         unsigned long len, unsigned long flags,
+                         unsigned int vm_flags, unsigned long pgoff,
+                         int accountable)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma, *prev;
+       int correct_wcount = 0;
+       int error;
+       struct rb_node **rb_link, *rb_parent;
+       unsigned long charged = 0;
+       struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
+
        /* Clear old maps */
        error = -ENOMEM;
 munmap_back:
@@ -1079,8 +1137,7 @@ munmap_back:
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_flags = vm_flags;
-       vma->vm_page_prot = protection_map[vm_flags &
-                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+       vma->vm_page_prot = vm_get_page_prot(vm_flags);
        vma->vm_pgoff = pgoff;
 
        if (file) {
@@ -1122,8 +1179,7 @@ munmap_back:
        vm_flags = vma->vm_flags;
 
        if (vma_wants_writenotify(vma))
-               vma->vm_page_prot =
-                       protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
+               vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
 
        if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
                        vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
@@ -1147,12 +1203,8 @@ out:
                mm->locked_vm += len >> PAGE_SHIFT;
                make_pages_present(addr, addr + len);
        }
-       if (flags & MAP_POPULATE) {
-               up_write(&mm->mmap_sem);
-               sys_remap_file_pages(addr, len, 0,
-                                       pgoff, flags & MAP_NONBLOCK);
-               down_write(&mm->mmap_sem);
-       }
+       if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
+               make_pages_present(addr, addr + len);
        return addr;
 
 unmap_and_free_vma:
@@ -1172,8 +1224,6 @@ unacct_error:
        return error;
 }
 
-EXPORT_SYMBOL(do_mmap_pgoff);
-
 /* Get an address range which is currently unmapped.
  * For shmat() with addr=0.
  *
@@ -1197,6 +1247,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
+       if (flags & MAP_FIXED)
+               return addr;
+
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
@@ -1270,6 +1323,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
+       if (flags & MAP_FIXED)
+               return addr;
+
        /* requesting a specific address */
        if (addr) {
                addr = PAGE_ALIGN(addr);
@@ -1357,40 +1413,22 @@ unsigned long
 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags)
 {
-       unsigned long ret;
-
-       if (!(flags & MAP_FIXED)) {
-               unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
-
-               get_area = current->mm->get_unmapped_area;
-               if (file && file->f_op && file->f_op->get_unmapped_area)
-                       get_area = file->f_op->get_unmapped_area;
-               addr = get_area(file, addr, len, pgoff, flags);
-               if (IS_ERR_VALUE(addr))
-                       return addr;
-       }
+       unsigned long (*get_area)(struct file *, unsigned long,
+                                 unsigned long, unsigned long, unsigned long);
+
+       get_area = current->mm->get_unmapped_area;
+       if (file && file->f_op && file->f_op->get_unmapped_area)
+               get_area = file->f_op->get_unmapped_area;
+       addr = get_area(file, addr, len, pgoff, flags);
+       if (IS_ERR_VALUE(addr))
+               return addr;
 
        if (addr > TASK_SIZE - len)
                return -ENOMEM;
        if (addr & ~PAGE_MASK)
                return -EINVAL;
-       if (file && is_file_hugepages(file))  {
-               /*
-                * Check if the given range is hugepage aligned, and
-                * can be made suitable for hugepages.
-                */
-               ret = prepare_hugepage_range(addr, len, pgoff);
-       } else {
-               /*
-                * Ensure that a normal request is not falling in a
-                * reserved hugepage range.  For some archs like IA-64,
-                * there is a separate region for hugepages.
-                */
-               ret = is_hugepage_only_range(current->mm, addr, len);
-       }
-       if (ret)
-               return -EINVAL;
-       return addr;
+
+       return arch_rebalance_pgtables(addr, len);
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1545,9 +1583,14 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
+        * Also guard against wrapping around to address 0.
         */
-       address += 4 + PAGE_SIZE - 1;
-       address &= PAGE_MASK;
+       if (address < PAGE_ALIGN(address+4))
+               address = PAGE_ALIGN(address+4);
+       else {
+               anon_vma_unlock(vma);
+               return -ENOMEM;
+       }
        error = 0;
 
        /* Somebody else might have raced and expanded it already */
@@ -1566,33 +1609,11 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
 
-#ifdef CONFIG_STACK_GROWSUP
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
-{
-       return expand_upwards(vma, address);
-}
-
-struct vm_area_struct *
-find_extend_vma(struct mm_struct *mm, unsigned long addr)
-{
-       struct vm_area_struct *vma, *prev;
-
-       addr &= PAGE_MASK;
-       vma = find_vma_prev(mm, addr, &prev);
-       if (vma && (vma->vm_start <= addr))
-               return vma;
-       if (!prev || expand_stack(prev, addr))
-               return NULL;
-       if (prev->vm_flags & VM_LOCKED) {
-               make_pages_present(addr, prev->vm_end);
-       }
-       return prev;
-}
-#else
 /*
  * vma is the first one with address < vma->vm_start.  Have to extend vma.
  */
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
+static inline int expand_downwards(struct vm_area_struct *vma,
+                                  unsigned long address)
 {
        int error;
 
@@ -1602,6 +1623,12 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
         */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
+
+       address &= PAGE_MASK;
+       error = security_file_mmap(NULL, 0, 0, 0, address, 1);
+       if (error)
+               return error;
+
        anon_vma_lock(vma);
 
        /*
@@ -1609,8 +1636,6 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
         */
-       address &= PAGE_MASK;
-       error = 0;
 
        /* Somebody else might have raced and expanded it already */
        if (address < vma->vm_start) {
@@ -1629,6 +1654,38 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
        return error;
 }
 
+int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
+{
+       return expand_downwards(vma, address);
+}
+
+#ifdef CONFIG_STACK_GROWSUP
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+       return expand_upwards(vma, address);
+}
+
+struct vm_area_struct *
+find_extend_vma(struct mm_struct *mm, unsigned long addr)
+{
+       struct vm_area_struct *vma, *prev;
+
+       addr &= PAGE_MASK;
+       vma = find_vma_prev(mm, addr, &prev);
+       if (vma && (vma->vm_start <= addr))
+               return vma;
+       if (!prev || expand_stack(prev, addr))
+               return NULL;
+       if (prev->vm_flags & VM_LOCKED)
+               make_pages_present(addr, prev->vm_end);
+       return prev;
+}
+#else
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+       return expand_downwards(vma, address);
+}
+
 struct vm_area_struct *
 find_extend_vma(struct mm_struct * mm, unsigned long addr)
 {
@@ -1646,9 +1703,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
-       if (vma->vm_flags & VM_LOCKED) {
+       if (vma->vm_flags & VM_LOCKED)
                make_pages_present(addr, start);
-       }
        return vma;
 }
 #endif
@@ -1729,7 +1785,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
 
 /*
  * Split a vma into two pieces at address 'addr', a new vma is allocated
- * either for the first part or the the tail.
+ * either for the first part or the tail.
  */
 int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
              unsigned long addr, int new_below)
@@ -1890,6 +1946,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (is_hugepage_only_range(mm, addr, len))
                return -EINVAL;
 
+       error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
+       if (error)
+               return error;
+
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = arch_mmap_check(addr, len, flags);
@@ -1955,8 +2015,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        vma->vm_end = addr + len;
        vma->vm_pgoff = pgoff;
        vma->vm_flags = flags;
-       vma->vm_page_prot = protection_map[flags &
-                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+       vma->vm_page_prot = vm_get_page_prot(flags);
        vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
        mm->total_vm += len >> PAGE_SHIFT;
@@ -1977,6 +2036,9 @@ void exit_mmap(struct mm_struct *mm)
        unsigned long nr_accounted = 0;
        unsigned long end;
 
+       /* mm's last user has gone, and its about to be pulled down */
+       arch_exit_mmap(mm);
+
        lru_add_drain();
        flush_cache_mm(mm);
        tlb = tlb_gather_mmu(mm, 1);
@@ -2026,7 +2088,7 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
        if (__vma && __vma->vm_start < vma->vm_end)
                return -ENOMEM;
        if ((vma->vm_flags & VM_ACCOUNT) &&
-            security_vm_enough_memory(vma_pages(vma)))
+            security_vm_enough_memory_mm(mm, vma_pages(vma)))
                return -ENOMEM;
        vma_link(mm, vma, prev, rb_link, rb_parent);
        return 0;
@@ -2101,3 +2163,82 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
                return 0;
        return 1;
 }
+
+
+static int special_mapping_fault(struct vm_area_struct *vma,
+                               struct vm_fault *vmf)
+{
+       pgoff_t pgoff;
+       struct page **pages;
+
+       /*
+        * special mappings have no vm_file, and in that case, the mm
+        * uses vm_pgoff internally. So we have to subtract it from here.
+        * We are allowed to do this because we are the mm; do not copy
+        * this code into drivers!
+        */
+       pgoff = vmf->pgoff - vma->vm_pgoff;
+
+       for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
+               pgoff--;
+
+       if (*pages) {
+               struct page *page = *pages;
+               get_page(page);
+               vmf->page = page;
+               return 0;
+       }
+
+       return VM_FAULT_SIGBUS;
+}
+
+/*
+ * Having a close hook prevents vma merging regardless of flags.
+ */
+static void special_mapping_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct special_mapping_vmops = {
+       .close = special_mapping_close,
+       .fault = special_mapping_fault,
+};
+
+/*
+ * Called with mm->mmap_sem held for writing.
+ * Insert a new vma covering the given region, with the given flags.
+ * Its pages are supplied by the given array of struct page *.
+ * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
+ * The region past the last page supplied will always produce SIGBUS.
+ * The array pointer and the pages it points to are assumed to stay alive
+ * for as long as this mapping might exist.
+ */
+int install_special_mapping(struct mm_struct *mm,
+                           unsigned long addr, unsigned long len,
+                           unsigned long vm_flags, struct page **pages)
+{
+       struct vm_area_struct *vma;
+
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       if (unlikely(vma == NULL))
+               return -ENOMEM;
+
+       vma->vm_mm = mm;
+       vma->vm_start = addr;
+       vma->vm_end = addr + len;
+
+       vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+       vma->vm_ops = &special_mapping_vmops;
+       vma->vm_private_data = pages;
+
+       if (unlikely(insert_vm_struct(mm, vma))) {
+               kmem_cache_free(vm_area_cachep, vma);
+               return -ENOMEM;
+       }
+
+       mm->total_vm += len >> PAGE_SHIFT;
+
+       return 0;
+}