mm/vmscan: remove page_queue_congested() comment
[safe/jmp/linux-2.6] / mm / page_alloc.c
index caa9268..84d9da1 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/page_cgroup.h>
 #include <linux/debugobjects.h>
 #include <linux/kmemleak.h>
+#include <trace/events/kmem.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -123,8 +124,8 @@ static char * const zone_names[MAX_NR_ZONES] = {
 
 int min_free_kbytes = 1024;
 
-unsigned long __meminitdata nr_kernel_pages;
-unsigned long __meminitdata nr_all_pages;
+static unsigned long __meminitdata nr_kernel_pages;
+static unsigned long __meminitdata nr_all_pages;
 static unsigned long __meminitdata dma_reserve;
 
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
@@ -535,6 +536,7 @@ static void free_pages_bulk(struct zone *zone, int count,
                page = list_entry(list->prev, struct page, lru);
                /* have to delete it as __free_one_page list manipulates */
                list_del(&page->lru);
+               trace_mm_page_pcpu_drain(page, order, page_private(page));
                __free_one_page(page, zone, order, page_private(page));
        }
        spin_unlock(&zone->lock);
@@ -557,7 +559,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        unsigned long flags;
        int i;
        int bad = 0;
-       int wasMlocked = TestClearPageMlocked(page);
+       int wasMlocked = __TestClearPageMlocked(page);
 
        kmemcheck_free_shadow(page, order);
 
@@ -783,6 +785,17 @@ static int move_freepages_block(struct zone *zone, struct page *page,
        return move_freepages(zone, start_page, end_page, migratetype);
 }
 
+static void change_pageblock_range(struct page *pageblock_page,
+                                       int start_order, int migratetype)
+{
+       int nr_pageblocks = 1 << (start_order - pageblock_order);
+
+       while (nr_pageblocks--) {
+               set_pageblock_migratetype(pageblock_page, migratetype);
+               pageblock_page += pageblock_nr_pages;
+       }
+}
+
 /* Remove an element from the buddy allocator from the fallback list */
 static inline struct page *
 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
@@ -817,13 +830,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                         * agressive about taking ownership of free pages
                         */
                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
-                                       start_migratetype == MIGRATE_RECLAIMABLE) {
+                                       start_migratetype == MIGRATE_RECLAIMABLE ||
+                                       page_group_by_mobility_disabled) {
                                unsigned long pages;
                                pages = move_freepages_block(zone, page,
                                                                start_migratetype);
 
                                /* Claim the whole block if over half of it is free */
-                               if (pages >= (1 << (pageblock_order-1)))
+                               if (pages >= (1 << (pageblock_order-1)) ||
+                                               page_group_by_mobility_disabled)
                                        set_pageblock_migratetype(page,
                                                                start_migratetype);
 
@@ -834,11 +849,16 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                        list_del(&page->lru);
                        rmv_page_order(page);
 
-                       if (current_order == pageblock_order)
-                               set_pageblock_migratetype(page,
+                       /* Take ownership for orders >= pageblock_order */
+                       if (current_order >= pageblock_order)
+                               change_pageblock_range(page, current_order,
                                                        start_migratetype);
 
                        expand(zone, page, order, current_order, area, migratetype);
+
+                       trace_mm_page_alloc_extfrag(page, order, current_order,
+                               start_migratetype, migratetype);
+
                        return page;
                }
        }
@@ -872,6 +892,7 @@ retry_reserve:
                }
        }
 
+       trace_mm_page_alloc_zone_locked(page, order, migratetype);
        return page;
 }
 
@@ -882,7 +903,7 @@ retry_reserve:
  */
 static int rmqueue_bulk(struct zone *zone, unsigned int order, 
                        unsigned long count, struct list_head *list,
-                       int migratetype)
+                       int migratetype, int cold)
 {
        int i;
        
@@ -901,7 +922,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                 * merge IO requests if the physical pages are ordered
                 * properly.
                 */
-               list_add(&page->lru, list);
+               if (likely(cold == 0))
+                       list_add(&page->lru, list);
+               else
+                       list_add_tail(&page->lru, list);
                set_page_private(page, migratetype);
                list = &page->lru;
        }
@@ -1021,7 +1045,7 @@ static void free_hot_cold_page(struct page *page, int cold)
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
        unsigned long flags;
-       int wasMlocked = TestClearPageMlocked(page);
+       int wasMlocked = __TestClearPageMlocked(page);
 
        kmemcheck_free_shadow(page, 0);
 
@@ -1059,14 +1083,10 @@ static void free_hot_cold_page(struct page *page, int cold)
 
 void free_hot_page(struct page *page)
 {
+       trace_mm_page_free_direct(page, 0);
        free_hot_cold_page(page, 0);
 }
        
-void free_cold_page(struct page *page)
-{
-       free_hot_cold_page(page, 1);
-}
-
 /*
  * split_page takes a non-compound higher-order page, and splits it into
  * n (1<<order) sub-pages: page[0..n]
@@ -1119,7 +1139,8 @@ again:
                local_irq_save(flags);
                if (!pcp->count) {
                        pcp->count = rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
+                                       pcp->batch, &pcp->list,
+                                       migratetype, cold);
                        if (unlikely(!pcp->count))
                                goto failed;
                }
@@ -1137,9 +1158,20 @@ again:
 
                /* Allocate more to the pcp list if necessary */
                if (unlikely(&page->lru == &pcp->list)) {
+                       int get_one_page = 0;
+
                        pcp->count += rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
-                       page = list_entry(pcp->list.next, struct page, lru);
+                                       pcp->batch, &pcp->list,
+                                       migratetype, cold);
+                       list_for_each_entry(page, &pcp->list, lru) {
+                               if (get_pageblock_migratetype(page) !=
+                                           MIGRATE_ISOLATE) {
+                                       get_one_page = 1;
+                                       break;
+                               }
+                       }
+                       if (!get_one_page)
+                               goto failed;
                }
 
                list_del(&page->lru);
@@ -1620,10 +1652,6 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        /* We now go into synchronous reclaim */
        cpuset_memory_pressure_bump();
-
-       /*
-        * The task's cpuset might have expanded its set of allowable nodes
-        */
        p->flags |= PF_MEMALLOC;
        lockdep_set_current_reclaim_state(gfp_mask);
        reclaim_state.reclaimed_slab = 0;
@@ -1740,8 +1768,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         * be using allocators in order of preference for an area that is
         * too large.
         */
-       if (WARN_ON_ONCE(order >= MAX_ORDER))
+       if (order >= MAX_ORDER) {
+               WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
                return NULL;
+       }
 
        /*
         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
@@ -1756,6 +1786,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 
        wake_all_kswapd(order, zonelist, high_zoneidx);
 
+restart:
        /*
         * OK, we're below the kswapd watermark and have kicked background
         * reclaim. Now things get more complex, so set up alloc_flags according
@@ -1763,7 +1794,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         */
        alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
-restart:
        /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -1789,6 +1819,10 @@ rebalance:
        if (p->flags & PF_MEMALLOC)
                goto nopage;
 
+       /* Avoid allocations with no watermarks from looping endlessly */
+       if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
+               goto nopage;
+
        /* Try direct reclaim and then allocating */
        page = __alloc_pages_direct_reclaim(gfp_mask, order,
                                        zonelist, high_zoneidx,
@@ -1894,6 +1928,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                                zonelist, high_zoneidx, nodemask,
                                preferred_zone, migratetype);
 
+       trace_mm_page_alloc(page, order, gfp_mask, migratetype);
        return page;
 }
 EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -1903,44 +1938,41 @@ EXPORT_SYMBOL(__alloc_pages_nodemask);
  */
 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
 {
-       struct page * page;
+       struct page *page;
+
+       /*
+        * __get_free_pages() returns a 32-bit address, which cannot represent
+        * a highmem page
+        */
+       VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
+
        page = alloc_pages(gfp_mask, order);
        if (!page)
                return 0;
        return (unsigned long) page_address(page);
 }
-
 EXPORT_SYMBOL(__get_free_pages);
 
 unsigned long get_zeroed_page(gfp_t gfp_mask)
 {
-       struct page * page;
-
-       /*
-        * get_zeroed_page() returns a 32-bit address, which cannot represent
-        * a highmem page
-        */
-       VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
-
-       page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
-       if (page)
-               return (unsigned long) page_address(page);
-       return 0;
+       return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
 }
-
 EXPORT_SYMBOL(get_zeroed_page);
 
 void __pagevec_free(struct pagevec *pvec)
 {
        int i = pagevec_count(pvec);
 
-       while (--i >= 0)
+       while (--i >= 0) {
+               trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
                free_hot_cold_page(pvec->pages[i], pvec->cold);
+       }
 }
 
 void __free_pages(struct page *page, unsigned int order)
 {
        if (put_page_testzero(page)) {
+               trace_mm_page_free_direct(page, order);
                if (order == 0)
                        free_hot_page(page);
                else
@@ -2115,23 +2147,28 @@ void show_free_areas(void)
                }
        }
 
-       printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
-               " inactive_file:%lu"
+       printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
+               " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
                " unevictable:%lu"
-               " dirty:%lu writeback:%lu unstable:%lu\n"
-               " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
+               " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n"
+               " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
+               " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
                global_page_state(NR_ACTIVE_ANON),
-               global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_ANON),
+               global_page_state(NR_ISOLATED_ANON),
+               global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_FILE),
+               global_page_state(NR_ISOLATED_FILE),
                global_page_state(NR_UNEVICTABLE),
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
+               nr_blockdev_pages(),
                global_page_state(NR_FREE_PAGES),
-               global_page_state(NR_SLAB_RECLAIMABLE) +
-                       global_page_state(NR_SLAB_UNRECLAIMABLE),
+               global_page_state(NR_SLAB_RECLAIMABLE),
+               global_page_state(NR_SLAB_UNRECLAIMABLE),
                global_page_state(NR_FILE_MAPPED),
+               global_page_state(NR_SHMEM),
                global_page_state(NR_PAGETABLE),
                global_page_state(NR_BOUNCE));
 
@@ -2149,7 +2186,21 @@ void show_free_areas(void)
                        " active_file:%lukB"
                        " inactive_file:%lukB"
                        " unevictable:%lukB"
+                       " isolated(anon):%lukB"
+                       " isolated(file):%lukB"
                        " present:%lukB"
+                       " mlocked:%lukB"
+                       " dirty:%lukB"
+                       " writeback:%lukB"
+                       " mapped:%lukB"
+                       " shmem:%lukB"
+                       " slab_reclaimable:%lukB"
+                       " slab_unreclaimable:%lukB"
+                       " kernel_stack:%lukB"
+                       " pagetables:%lukB"
+                       " unstable:%lukB"
+                       " bounce:%lukB"
+                       " writeback_tmp:%lukB"
                        " pages_scanned:%lu"
                        " all_unreclaimable? %s"
                        "\n",
@@ -2163,7 +2214,22 @@ void show_free_areas(void)
                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
                        K(zone_page_state(zone, NR_UNEVICTABLE)),
+                       K(zone_page_state(zone, NR_ISOLATED_ANON)),
+                       K(zone_page_state(zone, NR_ISOLATED_FILE)),
                        K(zone->present_pages),
+                       K(zone_page_state(zone, NR_MLOCK)),
+                       K(zone_page_state(zone, NR_FILE_DIRTY)),
+                       K(zone_page_state(zone, NR_WRITEBACK)),
+                       K(zone_page_state(zone, NR_FILE_MAPPED)),
+                       K(zone_page_state(zone, NR_SHMEM)),
+                       K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
+                       K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
+                       zone_page_state(zone, NR_KERNEL_STACK) *
+                               THREAD_SIZE / 1024,
+                       K(zone_page_state(zone, NR_PAGETABLE)),
+                       K(zone_page_state(zone, NR_UNSTABLE_NFS)),
+                       K(zone_page_state(zone, NR_BOUNCE)),
+                       K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
                        zone->pages_scanned,
                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
                        );
@@ -2533,7 +2599,6 @@ static void build_zonelists(pg_data_t *pgdat)
        prev_node = local_node;
        nodes_clear(used_mask);
 
-       memset(node_load, 0, sizeof(node_load));
        memset(node_order, 0, sizeof(node_order));
        j = 0;
 
@@ -2642,6 +2707,9 @@ static int __build_all_zonelists(void *dummy)
 {
        int nid;
 
+#ifdef CONFIG_NUMA
+       memset(node_load, 0, sizeof(node_load));
+#endif
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
 
@@ -2768,7 +2836,8 @@ static void setup_zone_migrate_reserve(struct zone *zone)
 {
        unsigned long start_pfn, pfn, end_pfn;
        struct page *page;
-       unsigned long reserve, block_migratetype;
+       unsigned long block_migratetype;
+       int reserve;
 
        /* Get the start pfn, end pfn and the number of blocks to reserve */
        start_pfn = zone->zone_start_pfn;
@@ -2776,6 +2845,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
+       /*
+        * Reserve blocks are generally in place to help high-order atomic
+        * allocations that are short-lived. A min_free_kbytes value that
+        * would result in more than 2 reserve blocks for atomic allocations
+        * is assumed to be in place to help anti-fragmentation for the
+        * future allocation of hugepages at runtime.
+        */
+       reserve = min(2, reserve);
+
        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
                if (!pfn_valid(pfn))
                        continue;
@@ -3131,6 +3209,32 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
        return 0;
 }
 
+static int __zone_pcp_update(void *data)
+{
+       struct zone *zone = data;
+       int cpu;
+       unsigned long batch = zone_batchsize(zone), flags;
+
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+               struct per_cpu_pageset *pset;
+               struct per_cpu_pages *pcp;
+
+               pset = zone_pcp(zone, cpu);
+               pcp = &pset->pcp;
+
+               local_irq_save(flags);
+               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+               setup_pageset(pset, batch);
+               local_irq_restore(flags);
+       }
+       return 0;
+}
+
+void zone_pcp_update(struct zone *zone)
+{
+       stop_machine(__zone_pcp_update, zone, NULL);
+}
+
 static __meminit void zone_pcp_init(struct zone *zone)
 {
        int cpu;
@@ -3705,7 +3809,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone_pcp_init(zone);
                for_each_lru(l) {
                        INIT_LIST_HEAD(&zone->lru[l].list);
-                       zone->lru[l].nr_saved_scan = 0;
+                       zone->reclaim_stat.nr_saved_scan[l] = 0;
                }
                zone->reclaim_stat.recent_rotated[0] = 0;
                zone->reclaim_stat.recent_rotated[1] = 0;
@@ -4494,7 +4598,7 @@ void setup_per_zone_wmarks(void)
        calculate_totalreserve_pages();
 }
 
-/**
+/*
  * The inactive anon list should be small enough that the VM never has to
  * do too much work, but large enough that each inactive page has a chance
  * to be referenced again before it is swapped out.
@@ -4717,7 +4821,14 @@ void *__init alloc_large_system_hash(const char *tablename,
                        numentries <<= (PAGE_SHIFT - scale);
 
                /* Make sure we've got at least a 0-order allocation.. */
-               if (unlikely((numentries * bucketsize) < PAGE_SIZE))
+               if (unlikely(flags & HASH_SMALL)) {
+                       /* Makes no sense without HASH_EARLY */
+                       WARN_ON(!(flags & HASH_EARLY));
+                       if (!(numentries >> *_hash_shift)) {
+                               numentries = 1UL << *_hash_shift;
+                               BUG_ON(!numentries);
+                       }
+               } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
                        numentries = PAGE_SIZE / bucketsize;
        }
        numentries = roundup_pow_of_two(numentries);
@@ -4859,13 +4970,16 @@ int set_migratetype_isolate(struct page *page)
        struct zone *zone;
        unsigned long flags;
        int ret = -EBUSY;
+       int zone_idx;
 
        zone = page_zone(page);
+       zone_idx = zone_idx(zone);
        spin_lock_irqsave(&zone->lock, flags);
        /*
         * In future, more migrate types will be able to be isolation target.
         */
-       if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
+       if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
+           zone_idx != ZONE_MOVABLE)
                goto out;
        set_pageblock_migratetype(page, MIGRATE_ISOLATE);
        move_freepages_block(zone, page, MIGRATE_ISOLATE);