mm: introduce free_pages_prepare()
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Mon, 24 May 2010 21:32:38 +0000 (14:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 25 May 2010 15:07:00 +0000 (08:07 -0700)
free_hot_cold_page() and __free_pages_ok() have very similar freeing
preparation.  Consolidate them.

[akpm@linux-foundation.org: fix busted coding style]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index 95ad42d..8f4f278 100644 (file)
@@ -620,20 +620,23 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        spin_unlock(&zone->lock);
 }
 
-static void __free_pages_ok(struct page *page, unsigned int order)
+static bool free_pages_prepare(struct page *page, unsigned int order)
 {
-       unsigned long flags;
        int i;
        int bad = 0;
-       int wasMlocked = __TestClearPageMlocked(page);
 
        trace_mm_page_free_direct(page, order);
        kmemcheck_free_shadow(page, order);
 
-       for (i = 0 ; i < (1 << order) ; ++i)
-               bad += free_pages_check(page + i);
+       for (i = 0; i < (1 << order); i++) {
+               struct page *pg = page + i;
+
+               if (PageAnon(pg))
+                       pg->mapping = NULL;
+               bad += free_pages_check(pg);
+       }
        if (bad)
-               return;
+               return false;
 
        if (!PageHighMem(page)) {
                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
@@ -643,6 +646,17 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        arch_free_page(page, order);
        kernel_map_pages(page, 1 << order, 0);
 
+       return true;
+}
+
+static void __free_pages_ok(struct page *page, unsigned int order)
+{
+       unsigned long flags;
+       int wasMlocked = __TestClearPageMlocked(page);
+
+       if (!free_pages_prepare(page, order))
+               return;
+
        local_irq_save(flags);
        if (unlikely(wasMlocked))
                free_page_mlock(page);
@@ -1128,21 +1142,9 @@ void free_hot_cold_page(struct page *page, int cold)
        int migratetype;
        int wasMlocked = __TestClearPageMlocked(page);
 
-       trace_mm_page_free_direct(page, 0);
-       kmemcheck_free_shadow(page, 0);
-
-       if (PageAnon(page))
-               page->mapping = NULL;
-       if (free_pages_check(page))
+       if (!free_pages_prepare(page, 0))
                return;
 
-       if (!PageHighMem(page)) {
-               debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
-               debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
-       }
-       arch_free_page(page, 0);
-       kernel_map_pages(page, 1, 0);
-
        migratetype = get_pageblock_migratetype(page);
        set_page_private(page, migratetype);
        local_irq_save(flags);