Merge branch 'master' into for-2.6.35
[safe/jmp/linux-2.6] / mm / memory.c
index a459761..833952d 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/kallsyms.h>
 #include <linux/swapops.h>
 #include <linux/elf.h>
+#include <linux/gfp.h>
 
 #include <asm/io.h>
 #include <asm/pgalloc.h>
@@ -124,7 +125,7 @@ core_initcall(init_zero_pfn);
 
 #if defined(SPLIT_RSS_COUNTING)
 
-void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
+static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
 {
        int i;
 
@@ -190,9 +191,6 @@ static void check_sync_rss_stat(struct task_struct *task)
 {
 }
 
-void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
-{
-}
 #endif
 
 /*
@@ -374,7 +372,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 * Hide vma from rmap and truncate_pagecache before freeing
                 * pgtables
                 */
-               anon_vma_unlink(vma);
+               unlink_anon_vmas(vma);
                unlink_file_vma(vma);
 
                if (is_vm_hugetlb_page(vma)) {
@@ -388,7 +386,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                               && !is_vm_hugetlb_page(next)) {
                                vma = next;
                                next = vma->vm_next;
-                               anon_vma_unlink(vma);
+                               unlink_anon_vmas(vma);
                                unlink_file_vma(vma);
                        }
                        free_pgd_range(tlb, addr, vma->vm_end,
@@ -512,12 +510,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
                "BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
                current->comm,
                (long long)pte_val(pte), (long long)pmd_val(*pmd));
-       if (page) {
-               printk(KERN_ALERT
-               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
-               page, (void *)page->flags, page_count(page),
-               page_mapcount(page), page->mapping, page->index);
-       }
+       if (page)
+               dump_page(page);
        printk(KERN_ALERT
                "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
                (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
@@ -679,7 +673,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                                                 &src_mm->mmlist);
                                spin_unlock(&mmlist_lock);
                        }
-                       if (is_write_migration_entry(entry) &&
+                       if (likely(!non_swap_entry(entry)))
+                               rss[MM_SWAPENTS]++;
+                       else if (is_write_migration_entry(entry) &&
                                        is_cow_mapping(vm_flags)) {
                                /*
                                 * COW mappings require pages in both parent
@@ -974,9 +970,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                if (pte_file(ptent)) {
                        if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
                                print_bad_pte(vma, addr, ptent, NULL);
-               } else if
-                 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
-                       print_bad_pte(vma, addr, ptent, NULL);
+               } else {
+                       swp_entry_t entry = pte_to_swp_entry(ptent);
+
+                       if (!non_swap_entry(entry))
+                               rss[MM_SWAPENTS]--;
+                       if (unlikely(!free_swap_and_cache(entry)))
+                               print_bad_pte(vma, addr, ptent, NULL);
+               }
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
        } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
 
@@ -2131,6 +2132,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        page_cache_release(old_page);
                }
                reuse = reuse_swap_page(old_page);
+               if (reuse)
+                       /*
+                        * The page is all ours.  Move it to our anon_vma so
+                        * the rmap code will not search our parent or siblings.
+                        * Protected against the rmap code by the page lock.
+                        */
+                       page_move_anon_rmap(old_page, vma, address);
                unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
@@ -2692,6 +2700,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
         */
 
        inc_mm_counter_fast(mm, MM_ANONPAGES);
+       dec_mm_counter_fast(mm, MM_SWAPENTS);
        pte = mk_pte(page, vma->vm_page_prot);
        if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);