vmscan: zone_reclaim() don't use insane swap_cluster_max
[safe/jmp/linux-2.6] / mm / rmap.c
index c3d6dc4..ebee816 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -336,21 +336,15 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  * Subfunctions of page_referenced: page_referenced_one called
  * repeatedly from either page_referenced_anon or page_referenced_file.
  */
-static int page_referenced_one(struct page *page,
-                              struct vm_area_struct *vma,
-                              unsigned int *mapcount,
+static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
+                              unsigned long address, unsigned int *mapcount,
                               unsigned long *vm_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
-       unsigned long address;
        pte_t *pte;
        spinlock_t *ptl;
        int referenced = 0;
 
-       address = vma_address(page, vma);
-       if (address == -EFAULT)
-               goto out;
-
        pte = page_check_address(page, mm, address, &ptl, 0);
        if (!pte)
                goto out;
@@ -409,6 +403,9 @@ static int page_referenced_anon(struct page *page,
 
        mapcount = page_mapcount(page);
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+               unsigned long address = vma_address(page, vma);
+               if (address == -EFAULT)
+                       continue;
                /*
                 * If we are reclaiming on behalf of a cgroup, skip
                 * counting on behalf of references from different
@@ -416,7 +413,7 @@ static int page_referenced_anon(struct page *page,
                 */
                if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
                        continue;
-               referenced += page_referenced_one(page, vma,
+               referenced += page_referenced_one(page, vma, address,
                                                  &mapcount, vm_flags);
                if (!mapcount)
                        break;
@@ -474,6 +471,9 @@ static int page_referenced_file(struct page *page,
        mapcount = page_mapcount(page);
 
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+               unsigned long address = vma_address(page, vma);
+               if (address == -EFAULT)
+                       continue;
                /*
                 * If we are reclaiming on behalf of a cgroup, skip
                 * counting on behalf of references from different
@@ -481,7 +481,7 @@ static int page_referenced_file(struct page *page,
                 */
                if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
                        continue;
-               referenced += page_referenced_one(page, vma,
+               referenced += page_referenced_one(page, vma, address,
                                                  &mapcount, vm_flags);
                if (!mapcount)
                        break;
@@ -535,18 +535,14 @@ int page_referenced(struct page *page,
        return referenced;
 }
 
-static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
+static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
+                           unsigned long address)
 {
        struct mm_struct *mm = vma->vm_mm;
-       unsigned long address;
        pte_t *pte;
        spinlock_t *ptl;
        int ret = 0;
 
-       address = vma_address(page, vma);
-       if (address == -EFAULT)
-               goto out;
-
        pte = page_check_address(page, mm, address, &ptl, 1);
        if (!pte)
                goto out;
@@ -578,8 +574,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
 
        spin_lock(&mapping->i_mmap_lock);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
-               if (vma->vm_flags & VM_SHARED)
-                       ret += page_mkclean_one(page, vma);
+               if (vma->vm_flags & VM_SHARED) {
+                       unsigned long address = vma_address(page, vma);
+                       if (address == -EFAULT)
+                               continue;
+                       ret += page_mkclean_one(page, vma, address);
+               }
        }
        spin_unlock(&mapping->i_mmap_lock);
        return ret;
@@ -761,19 +761,14 @@ void page_remove_rmap(struct page *page)
  * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
  */
 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
-                               enum ttu_flags flags)
+                           unsigned long address, enum ttu_flags flags)
 {
        struct mm_struct *mm = vma->vm_mm;
-       unsigned long address;
        pte_t *pte;
        pte_t pteval;
        spinlock_t *ptl;
        int ret = SWAP_AGAIN;
 
-       address = vma_address(page, vma);
-       if (address == -EFAULT)
-               goto out;
-
        pte = page_check_address(page, mm, address, &ptl, 0);
        if (!pte)
                goto out;
@@ -788,7 +783,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        ret = SWAP_MLOCK;
                        goto out_unmap;
                }
-               if (MLOCK_PAGES && TTU_ACTION(flags) == TTU_MUNLOCK)
+               if (TTU_ACTION(flags) == TTU_MUNLOCK)
                        goto out_unmap;
        }
        if (!(flags & TTU_IGNORE_ACCESS)) {
@@ -861,7 +856,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 out_unmap:
        pte_unmap_unlock(pte, ptl);
 
-       if (MLOCK_PAGES && ret == SWAP_MLOCK) {
+       if (ret == SWAP_MLOCK) {
                ret = SWAP_AGAIN;
                if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
                        if (vma->vm_flags & VM_LOCKED) {
@@ -938,11 +933,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
                return ret;
 
        /*
-        * MLOCK_PAGES => feature is configured.
-        * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
+        * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
         * keep the sem while scanning the cluster for mlocking pages.
         */
-       if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
+       if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
                locked_vma = (vma->vm_flags & VM_LOCKED);
                if (!locked_vma)
                        up_read(&vma->vm_mm->mmap_sem); /* don't need it */
@@ -1019,7 +1013,10 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
                return ret;
 
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
-               ret = try_to_unmap_one(page, vma, flags);
+               unsigned long address = vma_address(page, vma);
+               if (address == -EFAULT)
+                       continue;
+               ret = try_to_unmap_one(page, vma, address, flags);
                if (ret != SWAP_AGAIN || !page_mapped(page))
                        break;
        }
@@ -1057,7 +1054,10 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
 
        spin_lock(&mapping->i_mmap_lock);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
-               ret = try_to_unmap_one(page, vma, flags);
+               unsigned long address = vma_address(page, vma);
+               if (address == -EFAULT)
+                       continue;
+               ret = try_to_unmap_one(page, vma, address, flags);
                if (ret != SWAP_AGAIN || !page_mapped(page))
                        goto out;
        }
@@ -1075,9 +1075,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
 
        list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
                                                shared.vm_set.list) {
-               if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
-                       (vma->vm_flags & VM_LOCKED))
-                       continue;
                cursor = (unsigned long) vma->vm_private_data;
                if (cursor > max_nl_cursor)
                        max_nl_cursor = cursor;
@@ -1110,9 +1107,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
        do {
                list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
                                                shared.vm_set.list) {
-                       if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
-                           (vma->vm_flags & VM_LOCKED))
-                               continue;
                        cursor = (unsigned long) vma->vm_private_data;
                        while ( cursor < max_nl_cursor &&
                                cursor < vma->vm_end - vma->vm_start) {