mm: don't call mark_page_accessed() in do_swap_page()
[safe/jmp/linux-2.6] / mm / memory.c
index c2d4c47..0017111 100644 (file)
@@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn)
 {
        int ret;
+       pgprot_t pgprot = vma->vm_page_prot;
        /*
         * Technically, architectures with pte_special can avoid all these
         * restrictions (same for remap_pfn_range).  However we would like
@@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
-       if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
+       if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
                return -EINVAL;
 
-       ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+       ret = insert_pfn(vma, addr, pfn, pgprot);
 
        if (ret)
                untrack_pfn_vma(vma, pfn, PAGE_SIZE);
@@ -1664,16 +1665,24 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * behaviour that some programs depend on. We mark the "original"
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         */
-       if (addr == vma->vm_start && end == vma->vm_end)
+       if (addr == vma->vm_start && end == vma->vm_end) {
                vma->vm_pgoff = pfn;
-       else if (is_cow_mapping(vma->vm_flags))
+               vma->vm_flags |= VM_PFN_AT_MMAP;
+       } else if (is_cow_mapping(vma->vm_flags))
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
-       err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
-       if (err)
+       err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
+       if (err) {
+               /*
+                * To indicate that track_pfn related cleanup is not
+                * needed from higher level routine calling unmap_vmas
+                */
+               vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
+               vma->vm_flags &= ~VM_PFN_AT_MMAP;
                return -EINVAL;
+       }
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
@@ -1992,7 +2001,7 @@ gotten:
         * Don't let another task, with possibly unlocked vma,
         * keep the mlocked page.
         */
-       if (vma->vm_flags & VM_LOCKED) {
+       if ((vma->vm_flags & VM_LOCKED) && old_page) {
                lock_page(old_page);    /* for LRU manipulation */
                clear_page_mlock(old_page);
                unlock_page(old_page);
@@ -2426,8 +2435,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(PGMAJFAULT);
        }
 
-       mark_page_accessed(page);
-
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);