memory hotplug: update zone pcp at memory online
[safe/jmp/linux-2.6] / mm / page_alloc.c
index c95a77c..1a3a893 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/bootmem.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/pagevec.h>
@@ -72,6 +73,7 @@ unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
 unsigned long highest_memmap_pfn __read_mostly;
 int percpu_pagelist_fraction;
+gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 int pageblock_order __read_mostly;
@@ -486,7 +488,6 @@ static inline void __free_one_page(struct page *page,
  */
 static inline void free_page_mlock(struct page *page)
 {
-       __ClearPageMlocked(page);
        __dec_zone_page_state(page, NR_MLOCK);
        __count_vm_event(UNEVICTABLE_MLOCKFREED);
 }
@@ -556,7 +557,9 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        unsigned long flags;
        int i;
        int bad = 0;
-       int clearMlocked = PageMlocked(page);
+       int wasMlocked = TestClearPageMlocked(page);
+
+       kmemcheck_free_shadow(page, order);
 
        for (i = 0 ; i < (1 << order) ; ++i)
                bad += free_pages_check(page + i);
@@ -572,7 +575,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        kernel_map_pages(page, 1 << order, 0);
 
        local_irq_save(flags);
-       if (unlikely(clearMlocked))
+       if (unlikely(wasMlocked))
                free_page_mlock(page);
        __count_vm_events(PGFREE, 1 << order);
        free_one_page(page_zone(page), page, order,
@@ -814,13 +817,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                         * agressive about taking ownership of free pages
                         */
                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
-                                       start_migratetype == MIGRATE_RECLAIMABLE) {
+                                       start_migratetype == MIGRATE_RECLAIMABLE ||
+                                       page_group_by_mobility_disabled) {
                                unsigned long pages;
                                pages = move_freepages_block(zone, page,
                                                                start_migratetype);
 
                                /* Claim the whole block if over half of it is free */
-                               if (pages >= (1 << (pageblock_order-1)))
+                               if (pages >= (1 << (pageblock_order-1)) ||
+                                               page_group_by_mobility_disabled)
                                        set_pageblock_migratetype(page,
                                                                start_migratetype);
 
@@ -879,7 +884,7 @@ retry_reserve:
  */
 static int rmqueue_bulk(struct zone *zone, unsigned int order, 
                        unsigned long count, struct list_head *list,
-                       int migratetype)
+                       int migratetype, int cold)
 {
        int i;
        
@@ -898,7 +903,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                 * merge IO requests if the physical pages are ordered
                 * properly.
                 */
-               list_add(&page->lru, list);
+               if (likely(cold == 0))
+                       list_add(&page->lru, list);
+               else
+                       list_add_tail(&page->lru, list);
                set_page_private(page, migratetype);
                list = &page->lru;
        }
@@ -1018,7 +1026,9 @@ static void free_hot_cold_page(struct page *page, int cold)
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
        unsigned long flags;
-       int clearMlocked = PageMlocked(page);
+       int wasMlocked = TestClearPageMlocked(page);
+
+       kmemcheck_free_shadow(page, 0);
 
        if (PageAnon(page))
                page->mapping = NULL;
@@ -1035,7 +1045,7 @@ static void free_hot_cold_page(struct page *page, int cold)
        pcp = &zone_pcp(zone, get_cpu())->pcp;
        set_page_private(page, get_pageblock_migratetype(page));
        local_irq_save(flags);
-       if (unlikely(clearMlocked))
+       if (unlikely(wasMlocked))
                free_page_mlock(page);
        __count_vm_event(PGFREE);
 
@@ -1076,6 +1086,16 @@ void split_page(struct page *page, unsigned int order)
 
        VM_BUG_ON(PageCompound(page));
        VM_BUG_ON(!page_count(page));
+
+#ifdef CONFIG_KMEMCHECK
+       /*
+        * Split shadow pages too, because free(page[0]) would
+        * otherwise free the whole shadow.
+        */
+       if (kmemcheck_page_is_tracked(page))
+               split_page(virt_to_page(page[0].shadow), order);
+#endif
+
        for (i = 1; i < (1 << order); i++)
                set_page_refcounted(page + i);
 }
@@ -1104,7 +1124,8 @@ again:
                local_irq_save(flags);
                if (!pcp->count) {
                        pcp->count = rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
+                                       pcp->batch, &pcp->list,
+                                       migratetype, cold);
                        if (unlikely(!pcp->count))
                                goto failed;
                }
@@ -1123,7 +1144,8 @@ again:
                /* Allocate more to the pcp list if necessary */
                if (unlikely(&page->lru == &pcp->list)) {
                        pcp->count += rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
+                                       pcp->batch, &pcp->list,
+                                       migratetype, cold);
                        page = list_entry(pcp->list.next, struct page, lru);
                }
 
@@ -1138,10 +1160,10 @@ again:
                         * properly detect and handle allocation failures.
                         *
                         * We most definitely don't want callers attempting to
-                        * allocate greater than single-page units with
+                        * allocate greater than order-1 page units with
                         * __GFP_NOFAIL.
                         */
-                       WARN_ON_ONCE(order > 0);
+                       WARN_ON_ONCE(order > 1);
                }
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order, migratetype);
@@ -1462,15 +1484,33 @@ zonelist_scan:
                BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
                        unsigned long mark;
+                       int ret;
+
                        mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
-                       if (!zone_watermark_ok(zone, order, mark,
-                                   classzone_idx, alloc_flags)) {
-                               if (!zone_reclaim_mode ||
-                                   !zone_reclaim(zone, gfp_mask, order))
+                       if (zone_watermark_ok(zone, order, mark,
+                                   classzone_idx, alloc_flags))
+                               goto try_this_zone;
+
+                       if (zone_reclaim_mode == 0)
+                               goto this_zone_full;
+
+                       ret = zone_reclaim(zone, gfp_mask, order);
+                       switch (ret) {
+                       case ZONE_RECLAIM_NOSCAN:
+                               /* did not scan */
+                               goto try_next_zone;
+                       case ZONE_RECLAIM_FULL:
+                               /* scanned but unreclaimable */
+                               goto this_zone_full;
+                       default:
+                               /* did we reclaim enough */
+                               if (!zone_watermark_ok(zone, order, mark,
+                                               classzone_idx, alloc_flags))
                                        goto this_zone_full;
                        }
                }
 
+try_this_zone:
                page = buffered_rmqueue(preferred_zone, zone, order,
                                                gfp_mask, migratetype);
                if (page)
@@ -1561,7 +1601,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                goto out;
 
        /* The OOM killer will not help higher order allocs */
-       if (order > PAGE_ALLOC_COSTLY_ORDER)
+       if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
                goto out;
 
        /* Exhausted what can be done so it's blamo time */
@@ -1587,10 +1627,6 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        /* We now go into synchronous reclaim */
        cpuset_memory_pressure_bump();
-
-       /*
-        * The task's cpuset might have expanded its set of allowable nodes
-        */
        p->flags |= PF_MEMALLOC;
        lockdep_set_current_reclaim_state(gfp_mask);
        reclaim_state.reclaimed_slab = 0;
@@ -1633,7 +1669,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
                        preferred_zone, migratetype);
 
                if (!page && gfp_mask & __GFP_NOFAIL)
-                       congestion_wait(WRITE, HZ/50);
+                       congestion_wait(BLK_RW_ASYNC, HZ/50);
        } while (!page && (gfp_mask & __GFP_NOFAIL));
 
        return page;
@@ -1707,8 +1743,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         * be using allocators in order of preference for an area that is
         * too large.
         */
-       if (WARN_ON_ONCE(order >= MAX_ORDER))
+       if (order >= MAX_ORDER) {
+               WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
                return NULL;
+       }
 
        /*
         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
@@ -1756,6 +1794,10 @@ rebalance:
        if (p->flags & PF_MEMALLOC)
                goto nopage;
 
+       /* Avoid allocations with no watermarks from looping endlessly */
+       if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
+               goto nopage;
+
        /* Try direct reclaim and then allocating */
        page = __alloc_pages_direct_reclaim(gfp_mask, order,
                                        zonelist, high_zoneidx,
@@ -1781,11 +1823,13 @@ rebalance:
                                goto got_pg;
 
                        /*
-                        * The OOM killer does not trigger for high-order allocations
-                        * but if no progress is being made, there are no other
-                        * options and retrying is unlikely to help
+                        * The OOM killer does not trigger for high-order
+                        * ~__GFP_NOFAIL allocations so if no progress is being
+                        * made, there are no other options and retrying is
+                        * unlikely to help.
                         */
-                       if (order > PAGE_ALLOC_COSTLY_ORDER)
+                       if (order > PAGE_ALLOC_COSTLY_ORDER &&
+                                               !(gfp_mask & __GFP_NOFAIL))
                                goto nopage;
 
                        goto restart;
@@ -1796,7 +1840,7 @@ rebalance:
        pages_reclaimed += did_some_progress;
        if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
                /* Wait for some write requests to complete then retry */
-               congestion_wait(WRITE, HZ/50);
+               congestion_wait(BLK_RW_ASYNC, HZ/50);
                goto rebalance;
        }
 
@@ -1808,7 +1852,10 @@ nopage:
                dump_stack();
                show_mem();
        }
+       return page;
 got_pg:
+       if (kmemcheck_enabled)
+               kmemcheck_pagealloc_alloc(page, order, gfp_mask);
        return page;
 
 }
@@ -1825,6 +1872,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        struct page *page;
        int migratetype = allocflags_to_migratetype(gfp_mask);
 
+       gfp_mask &= gfp_allowed_mask;
+
        lockdep_trace_alloc(gfp_mask);
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
@@ -1943,7 +1992,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
                unsigned long alloc_end = addr + (PAGE_SIZE << order);
                unsigned long used = addr + PAGE_ALIGN(size);
 
-               split_page(virt_to_page(addr), order);
+               split_page(virt_to_page((void *)addr), order);
                while (used < alloc_end) {
                        free_page(used);
                        used += PAGE_SIZE;
@@ -2493,7 +2542,6 @@ static void build_zonelists(pg_data_t *pgdat)
        prev_node = local_node;
        nodes_clear(used_mask);
 
-       memset(node_load, 0, sizeof(node_load));
        memset(node_order, 0, sizeof(node_order));
        j = 0;
 
@@ -2602,6 +2650,9 @@ static int __build_all_zonelists(void *dummy)
 {
        int nid;
 
+#ifdef CONFIG_NUMA
+       memset(node_load, 0, sizeof(node_load));
+#endif
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
 
@@ -2986,7 +3037,7 @@ bad:
                if (dzone == zone)
                        break;
                kfree(zone_pcp(dzone, cpu));
-               zone_pcp(dzone, cpu) = NULL;
+               zone_pcp(dzone, cpu) = &boot_pageset[cpu];
        }
        return -ENOMEM;
 }
@@ -3001,7 +3052,7 @@ static inline void free_zone_pagesets(int cpu)
                /* Free per_cpu_pageset if it is slab allocated */
                if (pset != &boot_pageset[cpu])
                        kfree(pset);
-               zone_pcp(zone, cpu) = NULL;
+               zone_pcp(zone, cpu) = &boot_pageset[cpu];
        }
 }
 
@@ -3091,6 +3142,32 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
        return 0;
 }
 
+static int __zone_pcp_update(void *data)
+{
+       struct zone *zone = data;
+       int cpu;
+       unsigned long batch = zone_batchsize(zone), flags;
+
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+               struct per_cpu_pageset *pset;
+               struct per_cpu_pages *pcp;
+
+               pset = zone_pcp(zone, cpu);
+               pcp = &pset->pcp;
+
+               local_irq_save(flags);
+               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+               setup_pageset(pset, batch);
+               local_irq_restore(flags);
+       }
+       return 0;
+}
+
+void zone_pcp_update(struct zone *zone)
+{
+       stop_machine(__zone_pcp_update, zone, NULL);
+}
+
 static __meminit void zone_pcp_init(struct zone *zone)
 {
        int cpu;
@@ -3992,6 +4069,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
        int i, nid;
        unsigned long usable_startpfn;
        unsigned long kernelcore_node, kernelcore_remaining;
+       /* save the state before borrow the nodemask */
+       nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
        unsigned long totalpages = early_calculate_totalpages();
        int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
 
@@ -4019,7 +4098,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
 
        /* If kernelcore was not specified, there is no ZONE_MOVABLE */
        if (!required_kernelcore)
-               return;
+               goto out;
 
        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
        find_usable_zone_for_movable();
@@ -4118,6 +4197,10 @@ restart:
        for (nid = 0; nid < MAX_NUMNODES; nid++)
                zone_movable_pfn[nid] =
                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+
+out:
+       /* restore the node_state */
+       node_states[N_HIGH_MEMORY] = saved_node_state;
 }
 
 /* Any regular memory on that node ? */
@@ -4614,7 +4697,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
        ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
        if (!write || (ret == -EINVAL))
                return ret;
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                for_each_online_cpu(cpu) {
                        unsigned long  high;
                        high = zone->present_pages / percpu_pagelist_fraction;
@@ -4699,8 +4782,10 @@ void *__init alloc_large_system_hash(const char *tablename,
                         * some pages at the end of hash table which
                         * alloc_pages_exact() automatically does
                         */
-                       if (get_order(size) < MAX_ORDER)
+                       if (get_order(size) < MAX_ORDER) {
                                table = alloc_pages_exact(size, GFP_ATOMIC);
+                               kmemleak_alloc(table, size, 1, GFP_ATOMIC);
+                       }
                }
        } while (!table && size > PAGE_SIZE && --log2qty);
 
@@ -4718,16 +4803,6 @@ void *__init alloc_large_system_hash(const char *tablename,
        if (_hash_mask)
                *_hash_mask = (1 << log2qty) - 1;
 
-       /*
-        * If hashdist is set, the table allocation is done with __vmalloc()
-        * which invokes the kmemleak_alloc() callback. This function may also
-        * be called before the slab and kmemleak are initialised when
-        * kmemleak simply buffers the request to be executed later
-        * (GFP_ATOMIC flag ignored in this case).
-        */
-       if (!hashdist)
-               kmemleak_alloc(table, size, 1, GFP_ATOMIC);
-
        return table;
 }