memory hotplug: update zone pcp at memory online
[safe/jmp/linux-2.6] / mm / page_alloc.c
index 5cc986e..1a3a893 100644 (file)
@@ -817,13 +817,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                         * agressive about taking ownership of free pages
                         */
                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
-                                       start_migratetype == MIGRATE_RECLAIMABLE) {
+                                       start_migratetype == MIGRATE_RECLAIMABLE ||
+                                       page_group_by_mobility_disabled) {
                                unsigned long pages;
                                pages = move_freepages_block(zone, page,
                                                                start_migratetype);
 
                                /* Claim the whole block if over half of it is free */
-                               if (pages >= (1 << (pageblock_order-1)))
+                               if (pages >= (1 << (pageblock_order-1)) ||
+                                               page_group_by_mobility_disabled)
                                        set_pageblock_migratetype(page,
                                                                start_migratetype);
 
@@ -1625,10 +1627,6 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        /* We now go into synchronous reclaim */
        cpuset_memory_pressure_bump();
-
-       /*
-        * The task's cpuset might have expanded its set of allowable nodes
-        */
        p->flags |= PF_MEMALLOC;
        lockdep_set_current_reclaim_state(gfp_mask);
        reclaim_state.reclaimed_slab = 0;
@@ -3144,6 +3142,32 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
        return 0;
 }
 
+static int __zone_pcp_update(void *data)
+{
+       struct zone *zone = data;
+       int cpu;
+       unsigned long batch = zone_batchsize(zone), flags;
+
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+               struct per_cpu_pageset *pset;
+               struct per_cpu_pages *pcp;
+
+               pset = zone_pcp(zone, cpu);
+               pcp = &pset->pcp;
+
+               local_irq_save(flags);
+               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+               setup_pageset(pset, batch);
+               local_irq_restore(flags);
+       }
+       return 0;
+}
+
+void zone_pcp_update(struct zone *zone)
+{
+       stop_machine(__zone_pcp_update, zone, NULL);
+}
+
 static __meminit void zone_pcp_init(struct zone *zone)
 {
        int cpu;