mm: clean up page_remove_rmap()
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Tue, 22 Sep 2009 00:01:28 +0000 (17:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Sep 2009 14:17:26 +0000 (07:17 -0700)
page_remove_rmap() has multiple PageAnon() tests and it has deep nesting.
Clean this up.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/rmap.c

index 0895b5c..1406e67 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -739,34 +739,37 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
  */
 void page_remove_rmap(struct page *page)
 {
-       if (atomic_add_negative(-1, &page->_mapcount)) {
-               /*
-                * Now that the last pte has gone, s390 must transfer dirty
-                * flag from storage key to struct page.  We can usually skip
-                * this if the page is anon, so about to be freed; but perhaps
-                * not if it's in swapcache - there might be another pte slot
-                * containing the swap entry, but page not yet written to swap.
-                */
-               if ((!PageAnon(page) || PageSwapCache(page)) &&
-                   page_test_dirty(page)) {
-                       page_clear_dirty(page);
-                       set_page_dirty(page);
-               }
-               if (PageAnon(page))
-                       mem_cgroup_uncharge_page(page);
-               __dec_zone_page_state(page,
-                       PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
-               mem_cgroup_update_mapped_file_stat(page, -1);
-               /*
-                * It would be tidy to reset the PageAnon mapping here,
-                * but that might overwrite a racing page_add_anon_rmap
-                * which increments mapcount after us but sets mapping
-                * before us: so leave the reset to free_hot_cold_page,
-                * and remember that it's only reliable while mapped.
-                * Leaving it set also helps swapoff to reinstate ptes
-                * faster for those pages still in swapcache.
-                */
+       /* page still mapped by someone else? */
+       if (!atomic_add_negative(-1, &page->_mapcount))
+               return;
+
+       /*
+        * Now that the last pte has gone, s390 must transfer dirty
+        * flag from storage key to struct page.  We can usually skip
+        * this if the page is anon, so about to be freed; but perhaps
+        * not if it's in swapcache - there might be another pte slot
+        * containing the swap entry, but page not yet written to swap.
+        */
+       if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
+               page_clear_dirty(page);
+               set_page_dirty(page);
        }
+       if (PageAnon(page)) {
+               mem_cgroup_uncharge_page(page);
+               __dec_zone_page_state(page, NR_ANON_PAGES);
+       } else {
+               __dec_zone_page_state(page, NR_FILE_MAPPED);
+       }
+       mem_cgroup_update_mapped_file_stat(page, -1);
+       /*
+        * It would be tidy to reset the PageAnon mapping here,
+        * but that might overwrite a racing page_add_anon_rmap
+        * which increments mapcount after us but sets mapping
+        * before us: so leave the reset to free_hot_cold_page,
+        * and remember that it's only reliable while mapped.
+        * Leaving it set also helps swapoff to reinstate ptes
+        * faster for those pages still in swapcache.
+        */
 }
 
 /*