sh: convert /proc/cpu/aligmnent, /proc/cpu/kernel_alignment to seq_file
[safe/jmp/linux-2.6] / mm / mremap.c
index 1a77439..97bff25 100644 (file)
@@ -3,7 +3,7 @@
  *
  *     (C) Copyright 1996 Linus Torvalds
  *
- *     Address space accounting code   <alan@redhat.com>
+ *     Address space accounting code   <alan@lxorguk.ukuu.org.uk>
  *     (C) Copyright 2002 Red Hat Inc, All Rights Reserved
  */
 
@@ -11,6 +11,7 @@
 #include <linux/hugetlb.h>
 #include <linux/slab.h>
 #include <linux/shm.h>
+#include <linux/ksm.h>
 #include <linux/mman.h>
 #include <linux/swap.h>
 #include <linux/capability.h>
@@ -24,6 +25,8 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
+#include "internal.h"
+
 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pgd;
@@ -83,8 +86,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        if (vma->vm_file) {
                /*
                 * Subtle point from Rajesh Venkatasubramanian: before
-                * moving file-based ptes, we must lock vmtruncate out,
-                * since it might clean the dst vma before the src vma,
+                * moving file-based ptes, we must lock truncate_pagecache
+                * out, since it might clean the dst vma before the src vma,
                 * and we propagate stale pages into the dst afterward.
                 */
                mapping = vma->vm_file->f_mapping;
@@ -172,6 +175,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        unsigned long excess = 0;
        unsigned long hiwater_vm;
        int split = 0;
+       int err;
 
        /*
         * We'd prefer to avoid failure later on in do_munmap:
@@ -180,6 +184,18 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        if (mm->map_count >= sysctl_max_map_count - 3)
                return -ENOMEM;
 
+       /*
+        * Advise KSM to break any KSM pages in the area to be moved:
+        * it would be confusing if they were to turn up at the new
+        * location, where they happen to coincide with different KSM
+        * pages recently unmapped.  But leave vma->vm_flags as it was,
+        * so KSM can come around to merge on vma and new_vma afterwards.
+        */
+       err = ksm_madvise(vma, old_addr, old_addr + old_len,
+                                               MADV_UNMERGEABLE, &vm_flags);
+       if (err)
+               return err;
+
        new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
        new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
        if (!new_vma)
@@ -238,8 +254,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        if (vm_flags & VM_LOCKED) {
                mm->locked_vm += new_len >> PAGE_SHIFT;
                if (new_len > old_len)
-                       make_pages_present(new_addr + old_len,
-                                          new_addr + new_len);
+                       mlock_vma_pages_range(new_vma, new_addr + old_len,
+                                                      new_addr + new_len);
        }
 
        return new_addr;
@@ -379,7 +395,7 @@ unsigned long do_mremap(unsigned long addr,
                        vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
                        if (vma->vm_flags & VM_LOCKED) {
                                mm->locked_vm += pages;
-                               make_pages_present(addr + old_len,
+                               mlock_vma_pages_range(vma, addr + old_len,
                                                   addr + new_len);
                        }
                        ret = addr;
@@ -418,9 +434,9 @@ out_nc:
        return ret;
 }
 
-asmlinkage unsigned long sys_mremap(unsigned long addr,
-       unsigned long old_len, unsigned long new_len,
-       unsigned long flags, unsigned long new_addr)
+SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+               unsigned long, new_len, unsigned long, flags,
+               unsigned long, new_addr)
 {
        unsigned long ret;