KVM: Drop useless atomic test from timer function
[safe/jmp/linux-2.6] / mm / page_alloc.c
index 30d5093..a0de15f 100644 (file)
@@ -817,13 +817,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                         * agressive about taking ownership of free pages
                         */
                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
-                                       start_migratetype == MIGRATE_RECLAIMABLE) {
+                                       start_migratetype == MIGRATE_RECLAIMABLE ||
+                                       page_group_by_mobility_disabled) {
                                unsigned long pages;
                                pages = move_freepages_block(zone, page,
                                                                start_migratetype);
 
                                /* Claim the whole block if over half of it is free */
-                               if (pages >= (1 << (pageblock_order-1)))
+                               if (pages >= (1 << (pageblock_order-1)) ||
+                                               page_group_by_mobility_disabled)
                                        set_pageblock_migratetype(page,
                                                                start_migratetype);
 
@@ -882,7 +884,7 @@ retry_reserve:
  */
 static int rmqueue_bulk(struct zone *zone, unsigned int order, 
                        unsigned long count, struct list_head *list,
-                       int migratetype)
+                       int migratetype, int cold)
 {
        int i;
        
@@ -901,7 +903,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                 * merge IO requests if the physical pages are ordered
                 * properly.
                 */
-               list_add(&page->lru, list);
+               if (likely(cold == 0))
+                       list_add(&page->lru, list);
+               else
+                       list_add_tail(&page->lru, list);
                set_page_private(page, migratetype);
                list = &page->lru;
        }
@@ -1119,7 +1124,8 @@ again:
                local_irq_save(flags);
                if (!pcp->count) {
                        pcp->count = rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
+                                       pcp->batch, &pcp->list,
+                                       migratetype, cold);
                        if (unlikely(!pcp->count))
                                goto failed;
                }
@@ -1138,7 +1144,8 @@ again:
                /* Allocate more to the pcp list if necessary */
                if (unlikely(&page->lru == &pcp->list)) {
                        pcp->count += rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
+                                       pcp->batch, &pcp->list,
+                                       migratetype, cold);
                        page = list_entry(pcp->list.next, struct page, lru);
                }
 
@@ -1153,10 +1160,10 @@ again:
                         * properly detect and handle allocation failures.
                         *
                         * We most definitely don't want callers attempting to
-                        * allocate greater than single-page units with
+                        * allocate greater than order-1 page units with
                         * __GFP_NOFAIL.
                         */
-                       WARN_ON_ONCE(order > 0);
+                       WARN_ON_ONCE(order > 1);
                }
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order, migratetype);
@@ -1666,7 +1673,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
                        preferred_zone, migratetype);
 
                if (!page && gfp_mask & __GFP_NOFAIL)
-                       congestion_wait(WRITE, HZ/50);
+                       congestion_wait(BLK_RW_ASYNC, HZ/50);
        } while (!page && (gfp_mask & __GFP_NOFAIL));
 
        return page;
@@ -1740,8 +1747,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         * be using allocators in order of preference for an area that is
         * too large.
         */
-       if (WARN_ON_ONCE(order >= MAX_ORDER))
+       if (order >= MAX_ORDER) {
+               WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
                return NULL;
+       }
 
        /*
         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
@@ -1789,6 +1798,10 @@ rebalance:
        if (p->flags & PF_MEMALLOC)
                goto nopage;
 
+       /* Avoid allocations with no watermarks from looping endlessly */
+       if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
+               goto nopage;
+
        /* Try direct reclaim and then allocating */
        page = __alloc_pages_direct_reclaim(gfp_mask, order,
                                        zonelist, high_zoneidx,
@@ -1831,7 +1844,7 @@ rebalance:
        pages_reclaimed += did_some_progress;
        if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
                /* Wait for some write requests to complete then retry */
-               congestion_wait(WRITE, HZ/50);
+               congestion_wait(BLK_RW_ASYNC, HZ/50);
                goto rebalance;
        }
 
@@ -1983,7 +1996,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
                unsigned long alloc_end = addr + (PAGE_SIZE << order);
                unsigned long used = addr + PAGE_ALIGN(size);
 
-               split_page(virt_to_page(addr), order);
+               split_page(virt_to_page((void *)addr), order);
                while (used < alloc_end) {
                        free_page(used);
                        used += PAGE_SIZE;
@@ -2533,7 +2546,6 @@ static void build_zonelists(pg_data_t *pgdat)
        prev_node = local_node;
        nodes_clear(used_mask);
 
-       memset(node_load, 0, sizeof(node_load));
        memset(node_order, 0, sizeof(node_order));
        j = 0;
 
@@ -2642,6 +2654,9 @@ static int __build_all_zonelists(void *dummy)
 {
        int nid;
 
+#ifdef CONFIG_NUMA
+       memset(node_load, 0, sizeof(node_load));
+#endif
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
 
@@ -3026,7 +3041,7 @@ bad:
                if (dzone == zone)
                        break;
                kfree(zone_pcp(dzone, cpu));
-               zone_pcp(dzone, cpu) = NULL;
+               zone_pcp(dzone, cpu) = &boot_pageset[cpu];
        }
        return -ENOMEM;
 }
@@ -3041,7 +3056,7 @@ static inline void free_zone_pagesets(int cpu)
                /* Free per_cpu_pageset if it is slab allocated */
                if (pset != &boot_pageset[cpu])
                        kfree(pset);
-               zone_pcp(zone, cpu) = NULL;
+               zone_pcp(zone, cpu) = &boot_pageset[cpu];
        }
 }
 
@@ -4032,6 +4047,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
        int i, nid;
        unsigned long usable_startpfn;
        unsigned long kernelcore_node, kernelcore_remaining;
+       /* save the state before borrow the nodemask */
+       nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
        unsigned long totalpages = early_calculate_totalpages();
        int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
 
@@ -4059,7 +4076,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
 
        /* If kernelcore was not specified, there is no ZONE_MOVABLE */
        if (!required_kernelcore)
-               return;
+               goto out;
 
        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
        find_usable_zone_for_movable();
@@ -4158,6 +4175,10 @@ restart:
        for (nid = 0; nid < MAX_NUMNODES; nid++)
                zone_movable_pfn[nid] =
                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+
+out:
+       /* restore the node_state */
+       node_states[N_HIGH_MEMORY] = saved_node_state;
 }
 
 /* Any regular memory on that node ? */
@@ -4242,11 +4263,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                                                early_node_map[i].start_pfn,
                                                early_node_map[i].end_pfn);
 
-       /*
-        * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init
-        * that node_mask, clear it at first
-        */
-       nodes_clear(node_states[N_HIGH_MEMORY]);
        /* Initialise every node */
        mminit_verify_pageflags_layout();
        setup_nr_node_ids();
@@ -4659,7 +4675,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
        ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
        if (!write || (ret == -EINVAL))
                return ret;
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                for_each_online_cpu(cpu) {
                        unsigned long  high;
                        high = zone->present_pages / percpu_pagelist_fraction;
@@ -4744,8 +4760,10 @@ void *__init alloc_large_system_hash(const char *tablename,
                         * some pages at the end of hash table which
                         * alloc_pages_exact() automatically does
                         */
-                       if (get_order(size) < MAX_ORDER)
+                       if (get_order(size) < MAX_ORDER) {
                                table = alloc_pages_exact(size, GFP_ATOMIC);
+                               kmemleak_alloc(table, size, 1, GFP_ATOMIC);
+                       }
                }
        } while (!table && size > PAGE_SIZE && --log2qty);
 
@@ -4763,16 +4781,6 @@ void *__init alloc_large_system_hash(const char *tablename,
        if (_hash_mask)
                *_hash_mask = (1 << log2qty) - 1;
 
-       /*
-        * If hashdist is set, the table allocation is done with __vmalloc()
-        * which invokes the kmemleak_alloc() callback. This function may also
-        * be called before the slab and kmemleak are initialised when
-        * kmemleak simply buffers the request to be executed later
-        * (GFP_ATOMIC flag ignored in this case).
-        */
-       if (!hashdist)
-               kmemleak_alloc(table, size, 1, GFP_ATOMIC);
-
        return table;
 }