mm: fix section mismatch warnings
[safe/jmp/linux-2.6] / mm / page_alloc.c
index fd77451..8b000d6 100644 (file)
@@ -691,43 +691,26 @@ static void __init setup_nr_node_ids(void) {}
 
 #ifdef CONFIG_NUMA
 /*
- * Called from the slab reaper to drain pagesets on a particular node that
- * belongs to the currently executing processor.
+ * Called from the vmstat counter updater to drain pagesets of this
+ * currently executing processor on remote nodes after they have
+ * expired.
+ *
  * Note that this function must be called with the thread pinned to
  * a single processor.
  */
-void drain_node_pages(int nodeid)
+void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
 {
-       int i;
-       enum zone_type z;
        unsigned long flags;
+       int to_drain;
 
-       for (z = 0; z < MAX_NR_ZONES; z++) {
-               struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
-               struct per_cpu_pageset *pset;
-
-               if (!populated_zone(zone))
-                       continue;
-
-               pset = zone_pcp(zone, smp_processor_id());
-               for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
-                       struct per_cpu_pages *pcp;
-
-                       pcp = &pset->pcp[i];
-                       if (pcp->count) {
-                               int to_drain;
-
-                               local_irq_save(flags);
-                               if (pcp->count >= pcp->batch)
-                                       to_drain = pcp->batch;
-                               else
-                                       to_drain = pcp->count;
-                               free_pages_bulk(zone, to_drain, &pcp->list, 0);
-                               pcp->count -= to_drain;
-                               local_irq_restore(flags);
-                       }
-               }
-       }
+       local_irq_save(flags);
+       if (pcp->count >= pcp->batch)
+               to_drain = pcp->batch;
+       else
+               to_drain = pcp->count;
+       free_pages_bulk(zone, to_drain, &pcp->list, 0);
+       pcp->count -= to_drain;
+       local_irq_restore(flags);
 }
 #endif
 
@@ -2148,11 +2131,14 @@ static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
 
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                if (process_zones(cpu))
                        ret = NOTIFY_BAD;
                break;
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                free_zone_pagesets(cpu);
                break;
        default:
@@ -2179,7 +2165,7 @@ void __init setup_per_cpu_pageset(void)
 
 #endif
 
-static __meminit
+static noinline __init_refok
 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 {
        int i;
@@ -2298,7 +2284,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
  * was used and there are no special requirements, this is a convenient
  * alternative
  */
-int __init early_pfn_to_nid(unsigned long pfn)
+int __meminit early_pfn_to_nid(unsigned long pfn)
 {
        int i;
 
@@ -2692,7 +2678,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
        }
 }
 
-static void __meminit alloc_node_mem_map(struct pglist_data *pgdat)
+static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
 {
        /* Skip empty nodes */
        if (!pgdat->node_spanned_pages)
@@ -3012,7 +2998,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
 {
        int cpu = (unsigned long)hcpu;
 
-       if (action == CPU_DEAD) {
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
                local_irq_disable();
                __drain_pages(cpu);
                vm_events_fold_cpu(cpu);