filemap: fix kernel-doc warnings
[safe/jmp/linux-2.6] / mm / rmap.c
index 892e187..1652166 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -47,8 +47,6 @@
 #include <linux/rmap.h>
 #include <linux/rcupdate.h>
 #include <linux/module.h>
-#include <linux/mm_inline.h>
-#include <linux/kallsyms.h>
 #include <linux/memcontrol.h>
 #include <linux/mmu_notifier.h>
 #include <linux/migrate.h>
@@ -673,10 +671,11 @@ void page_add_new_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address)
 {
        VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
-       atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
+       SetPageSwapBacked(page);
+       atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
        __page_set_anon_rmap(page, vma, address);
        if (page_evictable(page, vma))
-               lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
+               lru_cache_add_lru(page, LRU_ACTIVE_ANON);
        else
                add_page_to_unevictable_list(page);
 }
@@ -708,7 +707,6 @@ void page_add_file_rmap(struct page *page)
  */
 void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
 {
-       BUG_ON(page_mapcount(page) == 0);
        if (PageAnon(page))
                __page_check_anon_rmap(page, vma, address);
        atomic_inc(&page->_mapcount);
@@ -718,28 +716,12 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
 /**
  * page_remove_rmap - take down pte mapping from a page
  * @page: page to remove mapping from
- * @vma: the vm area in which the mapping is removed
  *
  * The caller needs to hold the pte lock.
  */
-void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
+void page_remove_rmap(struct page *page)
 {
        if (atomic_add_negative(-1, &page->_mapcount)) {
-               if (unlikely(page_mapcount(page) < 0)) {
-                       printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
-                       printk (KERN_EMERG "  page pfn = %lx\n", page_to_pfn(page));
-                       printk (KERN_EMERG "  page->flags = %lx\n", page->flags);
-                       printk (KERN_EMERG "  page->count = %x\n", page_count(page));
-                       printk (KERN_EMERG "  page->mapping = %p\n", page->mapping);
-                       print_symbol (KERN_EMERG "  vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
-                       if (vma->vm_ops) {
-                               print_symbol (KERN_EMERG "  vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault);
-                       }
-                       if (vma->vm_file && vma->vm_file->f_op)
-                               print_symbol (KERN_EMERG "  vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
-                       BUG();
-               }
-
                /*
                 * Now that the last pte has gone, s390 must transfer dirty
                 * flag from storage key to struct page.  We can usually skip
@@ -853,7 +835,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                dec_mm_counter(mm, file_rss);
 
 
-       page_remove_rmap(page, vma);
+       page_remove_rmap(page);
        page_cache_release(page);
 
 out_unmap:
@@ -968,7 +950,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
                if (pte_dirty(pteval))
                        set_page_dirty(page);
 
-               page_remove_rmap(page, vma);
+               page_remove_rmap(page);
                page_cache_release(page);
                dec_mm_counter(mm, file_rss);
                (*mapcount)--;
@@ -1090,7 +1072,8 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
        spin_lock(&mapping->i_mmap_lock);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                if (MLOCK_PAGES && unlikely(unlock)) {
-                       if (!(vma->vm_flags & VM_LOCKED))
+                       if (!((vma->vm_flags & VM_LOCKED) &&
+                                               page_mapped_in_vma(page, vma)))
                                continue;       /* must visit all vmas */
                        ret = SWAP_MLOCK;
                } else {