page-allocator: change migratetype for all pageblocks within a high-order page during...
[safe/jmp/linux-2.6] / mm / page_alloc.c
index e50c225..2075980 100644 (file)
@@ -557,7 +557,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        unsigned long flags;
        int i;
        int bad = 0;
-       int wasMlocked = TestClearPageMlocked(page);
+       int wasMlocked = __TestClearPageMlocked(page);
 
        kmemcheck_free_shadow(page, order);
 
@@ -783,6 +783,17 @@ static int move_freepages_block(struct zone *zone, struct page *page,
        return move_freepages(zone, start_page, end_page, migratetype);
 }
 
+static void change_pageblock_range(struct page *pageblock_page,
+                                       int start_order, int migratetype)
+{
+       int nr_pageblocks = 1 << (start_order - pageblock_order);
+
+       while (nr_pageblocks--) {
+               set_pageblock_migratetype(pageblock_page, migratetype);
+               pageblock_page += pageblock_nr_pages;
+       }
+}
+
 /* Remove an element from the buddy allocator from the fallback list */
 static inline struct page *
 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
@@ -836,8 +847,9 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                        list_del(&page->lru);
                        rmv_page_order(page);
 
-                       if (current_order == pageblock_order)
-                               set_pageblock_migratetype(page,
+                       /* Take ownership for orders >= pageblock_order */
+                       if (current_order >= pageblock_order)
+                               change_pageblock_range(page, current_order,
                                                        start_migratetype);
 
                        expand(zone, page, order, current_order, area, migratetype);
@@ -1026,7 +1038,7 @@ static void free_hot_cold_page(struct page *page, int cold)
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
        unsigned long flags;
-       int wasMlocked = TestClearPageMlocked(page);
+       int wasMlocked = __TestClearPageMlocked(page);
 
        kmemcheck_free_shadow(page, 0);
 
@@ -1771,6 +1783,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 
        wake_all_kswapd(order, zonelist, high_zoneidx);
 
+restart:
        /*
         * OK, we're below the kswapd watermark and have kicked background
         * reclaim. Now things get more complex, so set up alloc_flags according
@@ -1778,7 +1791,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         */
        alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
-restart:
        /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -1922,31 +1934,25 @@ EXPORT_SYMBOL(__alloc_pages_nodemask);
  */
 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
 {
-       struct page * page;
+       struct page *page;
+
+       /*
+        * __get_free_pages() returns a 32-bit address, which cannot represent
+        * a highmem page
+        */
+       VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
+
        page = alloc_pages(gfp_mask, order);
        if (!page)
                return 0;
        return (unsigned long) page_address(page);
 }
-
 EXPORT_SYMBOL(__get_free_pages);
 
 unsigned long get_zeroed_page(gfp_t gfp_mask)
 {
-       struct page * page;
-
-       /*
-        * get_zeroed_page() returns a 32-bit address, which cannot represent
-        * a highmem page
-        */
-       VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
-
-       page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
-       if (page)
-               return (unsigned long) page_address(page);
-       return 0;
+       return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
 }
-
 EXPORT_SYMBOL(get_zeroed_page);
 
 void __pagevec_free(struct pagevec *pvec)
@@ -2134,16 +2140,18 @@ void show_free_areas(void)
                }
        }
 
-       printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
-               " inactive_file:%lu"
+       printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
+               " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
                " unevictable:%lu"
                " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n"
                " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
                " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
                global_page_state(NR_ACTIVE_ANON),
-               global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_ANON),
+               global_page_state(NR_ISOLATED_ANON),
+               global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_FILE),
+               global_page_state(NR_ISOLATED_FILE),
                global_page_state(NR_UNEVICTABLE),
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
@@ -2171,6 +2179,8 @@ void show_free_areas(void)
                        " active_file:%lukB"
                        " inactive_file:%lukB"
                        " unevictable:%lukB"
+                       " isolated(anon):%lukB"
+                       " isolated(file):%lukB"
                        " present:%lukB"
                        " mlocked:%lukB"
                        " dirty:%lukB"
@@ -2197,6 +2207,8 @@ void show_free_areas(void)
                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
                        K(zone_page_state(zone, NR_UNEVICTABLE)),
+                       K(zone_page_state(zone, NR_ISOLATED_ANON)),
+                       K(zone_page_state(zone, NR_ISOLATED_FILE)),
                        K(zone->present_pages),
                        K(zone_page_state(zone, NR_MLOCK)),
                        K(zone_page_state(zone, NR_FILE_DIRTY)),