include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / ia64 / mm / discontig.c
index ec9eeb8..6162032 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/nmi.h>
 #include <linux/swap.h>
 #include <linux/bootmem.h>
 #include <linux/acpi.h>
 #include <linux/efi.h>
 #include <linux/nodemask.h>
+#include <linux/slab.h>
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
 #include <asm/meminit.h>
  */
 struct early_node_data {
        struct ia64_node_data *node_data;
-       pg_data_t *pgdat;
        unsigned long pernode_addr;
        unsigned long pernode_size;
-       struct bootmem_data bootmem_data;
        unsigned long num_physpages;
+#ifdef CONFIG_ZONE_DMA
        unsigned long num_dma_physpages;
+#endif
        unsigned long min_pfn;
        unsigned long max_pfn;
 };
@@ -46,6 +48,8 @@ struct early_node_data {
 static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
 static nodemask_t memory_less_mask __initdata;
 
+pg_data_t *pgdat_list[MAX_NUMNODES];
+
 /*
  * To prevent cache aliasing effects, align per-node structures so that they
  * start at addresses that are strided by node number.
@@ -71,23 +75,20 @@ static nodemask_t memory_less_mask __initdata;
 static int __init build_node_maps(unsigned long start, unsigned long len,
                                  int node)
 {
-       unsigned long cstart, epfn, end = start + len;
-       struct bootmem_data *bdp = &mem_data[node].bootmem_data;
+       unsigned long spfn, epfn, end = start + len;
+       struct bootmem_data *bdp = &bootmem_node_data[node];
 
        epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
-       cstart = GRANULEROUNDDOWN(start);
+       spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
 
        if (!bdp->node_low_pfn) {
-               bdp->node_boot_start = cstart;
+               bdp->node_min_pfn = spfn;
                bdp->node_low_pfn = epfn;
        } else {
-               bdp->node_boot_start = min(cstart, bdp->node_boot_start);
+               bdp->node_min_pfn = min(spfn, bdp->node_min_pfn);
                bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
        }
 
-       min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
-       max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
-
        return 0;
 }
 
@@ -99,11 +100,11 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
  * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
  * called yet.  Note that node 0 will also count all non-existent cpus.
  */
-static int __init early_nr_cpus_node(int node)
+static int __meminit early_nr_cpus_node(int node)
 {
        int cpu, n = 0;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_possible_early_cpu(cpu)
                if (node == node_cpuid[cpu].nid)
                        n++;
 
@@ -114,7 +115,7 @@ static int __init early_nr_cpus_node(int node)
  * compute_pernodesize - compute size of pernode data
  * @node: the node id.
  */
-static unsigned long __init compute_pernodesize(int node)
+static unsigned long __meminit compute_pernodesize(int node)
 {
        unsigned long pernodesize = 0, cpus;
 
@@ -123,6 +124,7 @@ static unsigned long __init compute_pernodesize(int node)
        pernodesize += node * L1_CACHE_BYTES;
        pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
        pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
+       pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
        pernodesize = PAGE_ALIGN(pernodesize);
        return pernodesize;
 }
@@ -141,19 +143,121 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
 #ifdef CONFIG_SMP
        int cpu;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               if (node == node_cpuid[cpu].nid) {
-                       memcpy(__va(cpu_data), __phys_per_cpu_start,
-                              __per_cpu_end - __per_cpu_start);
-                       __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
-                               __per_cpu_start;
-                       cpu_data += PERCPU_PAGE_SIZE;
-               }
+       for_each_possible_early_cpu(cpu) {
+               void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
+
+               if (node != node_cpuid[cpu].nid)
+                       continue;
+
+               memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
+               __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
+                       __per_cpu_start;
+
+               /*
+                * percpu area for cpu0 is moved from the __init area
+                * which is setup by head.S and used till this point.
+                * Update ar.k3.  This move is ensures that percpu
+                * area for cpu0 is on the correct node and its
+                * virtual address isn't insanely far from other
+                * percpu areas which is important for congruent
+                * percpu allocator.
+                */
+               if (cpu == 0)
+                       ia64_set_kr(IA64_KR_PER_CPU_DATA,
+                                   (unsigned long)cpu_data -
+                                   (unsigned long)__per_cpu_start);
+
+               cpu_data += PERCPU_PAGE_SIZE;
        }
 #endif
        return cpu_data;
 }
 
+#ifdef CONFIG_SMP
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas.  All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init setup_per_cpu_areas(void)
+{
+       struct pcpu_alloc_info *ai;
+       struct pcpu_group_info *uninitialized_var(gi);
+       unsigned int *cpu_map;
+       void *base;
+       unsigned long base_offset;
+       unsigned int cpu;
+       ssize_t static_size, reserved_size, dyn_size;
+       int node, prev_node, unit, nr_units, rc;
+
+       ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
+       if (!ai)
+               panic("failed to allocate pcpu_alloc_info");
+       cpu_map = ai->groups[0].cpu_map;
+
+       /* determine base */
+       base = (void *)ULONG_MAX;
+       for_each_possible_cpu(cpu)
+               base = min(base,
+                          (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
+       base_offset = (void *)__per_cpu_start - base;
+
+       /* build cpu_map, units are grouped by node */
+       unit = 0;
+       for_each_node(node)
+               for_each_possible_cpu(cpu)
+                       if (node == node_cpuid[cpu].nid)
+                               cpu_map[unit++] = cpu;
+       nr_units = unit;
+
+       /* set basic parameters */
+       static_size = __per_cpu_end - __per_cpu_start;
+       reserved_size = PERCPU_MODULE_RESERVE;
+       dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+       if (dyn_size < 0)
+               panic("percpu area overflow static=%zd reserved=%zd\n",
+                     static_size, reserved_size);
+
+       ai->static_size         = static_size;
+       ai->reserved_size       = reserved_size;
+       ai->dyn_size            = dyn_size;
+       ai->unit_size           = PERCPU_PAGE_SIZE;
+       ai->atom_size           = PAGE_SIZE;
+       ai->alloc_size          = PERCPU_PAGE_SIZE;
+
+       /*
+        * CPUs are put into groups according to node.  Walk cpu_map
+        * and create new groups at node boundaries.
+        */
+       prev_node = -1;
+       ai->nr_groups = 0;
+       for (unit = 0; unit < nr_units; unit++) {
+               cpu = cpu_map[unit];
+               node = node_cpuid[cpu].nid;
+
+               if (node == prev_node) {
+                       gi->nr_units++;
+                       continue;
+               }
+               prev_node = node;
+
+               gi = &ai->groups[ai->nr_groups++];
+               gi->nr_units            = 1;
+               gi->base_offset         = __per_cpu_offset[cpu] + base_offset;
+               gi->cpu_map             = &cpu_map[unit];
+       }
+
+       rc = pcpu_setup_first_chunk(ai, base);
+       if (rc)
+               panic("failed to setup percpu area (err=%d)", rc);
+
+       pcpu_free_alloc_info(ai);
+}
+#endif
+
 /**
  * fill_pernode - initialize pernode data.
  * @node: the node id.
@@ -165,7 +269,7 @@ static void __init fill_pernode(int node, unsigned long pernode,
 {
        void *cpu_data;
        int cpus = early_nr_cpus_node(node);
-       struct bootmem_data *bdp = &mem_data[node].bootmem_data;
+       struct bootmem_data *bdp = &bootmem_node_data[node];
 
        mem_data[node].pernode_addr = pernode;
        mem_data[node].pernode_size = pernodesize;
@@ -175,13 +279,13 @@ static void __init fill_pernode(int node, unsigned long pernode,
        pernode += PERCPU_PAGE_SIZE * cpus;
        pernode += node * L1_CACHE_BYTES;
 
-       mem_data[node].pgdat = __va(pernode);
+       pgdat_list[node] = __va(pernode);
        pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
 
        mem_data[node].node_data = __va(pernode);
        pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
 
-       mem_data[node].pgdat->bdata = bdp;
+       pgdat_list[node]->bdata = bdp;
        pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
 
        cpu_data = per_cpu_node_setup(cpu_data, node);
@@ -220,20 +324,21 @@ static void __init fill_pernode(int node, unsigned long pernode,
 static int __init find_pernode_space(unsigned long start, unsigned long len,
                                     int node)
 {
-       unsigned long epfn;
+       unsigned long spfn, epfn;
        unsigned long pernodesize = 0, pernode, pages, mapsize;
-       struct bootmem_data *bdp = &mem_data[node].bootmem_data;
+       struct bootmem_data *bdp = &bootmem_node_data[node];
 
+       spfn = start >> PAGE_SHIFT;
        epfn = (start + len) >> PAGE_SHIFT;
 
-       pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
+       pages = bdp->node_low_pfn - bdp->node_min_pfn;
        mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
 
        /*
         * Make sure this memory falls within this node's usable memory
         * since we may have thrown some away in build_maps().
         */
-       if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
+       if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn)
                return 0;
 
        /* Don't setup this node's local space twice... */
@@ -268,7 +373,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
 static int __init free_node_bootmem(unsigned long start, unsigned long len,
                                    int node)
 {
-       free_bootmem_node(mem_data[node].pgdat, start, len);
+       free_bootmem_node(pgdat_list[node], start, len);
 
        return 0;
 }
@@ -287,7 +392,7 @@ static void __init reserve_pernode_space(void)
        int node;
 
        for_each_online_node(node) {
-               pg_data_t *pdp = mem_data[node].pgdat;
+               pg_data_t *pdp = pgdat_list[node];
 
                if (node_isset(node, memory_less_mask))
                        continue;
@@ -295,15 +400,36 @@ static void __init reserve_pernode_space(void)
                bdp = pdp->bdata;
 
                /* First the bootmem_map itself */
-               pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
+               pages = bdp->node_low_pfn - bdp->node_min_pfn;
                size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
                base = __pa(bdp->node_bootmem_map);
-               reserve_bootmem_node(pdp, base, size);
+               reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
 
                /* Now the per-node space */
                size = mem_data[node].pernode_size;
                base = __pa(mem_data[node].pernode_addr);
-               reserve_bootmem_node(pdp, base, size);
+               reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
+       }
+}
+
+static void __meminit scatter_node_data(void)
+{
+       pg_data_t **dst;
+       int node;
+
+       /*
+        * for_each_online_node() can't be used at here.
+        * node_online_map is not set for hot-added nodes at this time,
+        * because we are halfway through initialization of the new node's
+        * structures.  If for_each_online_node() is used, a new node's
+        * pg_data_ptrs will be not initialized. Instead of using it,
+        * pgdat_list[] is checked.
+        */
+       for_each_node(node) {
+               if (pgdat_list[node]) {
+                       dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
+                       memcpy(dst, pgdat_list, sizeof(pgdat_list));
+               }
        }
 }
 
@@ -317,22 +443,16 @@ static void __init reserve_pernode_space(void)
  */
 static void __init initialize_pernode_data(void)
 {
-       pg_data_t *pgdat_list[MAX_NUMNODES];
        int cpu, node;
 
-       for_each_online_node(node)
-               pgdat_list[node] = mem_data[node].pgdat;
+       scatter_node_data();
 
-       /* Copy the pg_data_t list to each node and init the node field */
-       for_each_online_node(node) {
-               memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
-                      sizeof(pgdat_list));
-       }
 #ifdef CONFIG_SMP
        /* Set the node_data pointer for each per-cpu struct */
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_early_cpu(cpu) {
                node = node_cpuid[cpu].nid;
-               per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
+               per_cpu(ia64_cpu_info, cpu).node_data =
+                       mem_data[node].node_data;
        }
 #else
        {
@@ -340,7 +460,7 @@ static void __init initialize_pernode_data(void)
                cpu = 0;
                node = node_cpuid[cpu].nid;
                cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
-                       ((char *)&per_cpu__cpu_info - __per_cpu_start));
+                       ((char *)&ia64_cpu_info - __per_cpu_start));
                cpu0_cpu_info->node_data = mem_data[node].node_data;
        }
 #endif /* CONFIG_SMP */
@@ -372,7 +492,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
        if (bestnode == -1)
                bestnode = anynode;
 
-       ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize,
+       ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
                PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 
        return ptr;
@@ -397,37 +517,6 @@ static void __init memory_less_nodes(void)
        return;
 }
 
-#ifdef CONFIG_SPARSEMEM
-/**
- * register_sparse_mem - notify SPARSEMEM that this memory range exists.
- * @start: physical start of range
- * @end: physical end of range
- * @arg: unused
- *
- * Simply calls SPARSEMEM to register memory section(s).
- */
-static int __init register_sparse_mem(unsigned long start, unsigned long end,
-       void *arg)
-{
-       int nid;
-
-       start = __pa(start) >> PAGE_SHIFT;
-       end = __pa(end) >> PAGE_SHIFT;
-       nid = early_pfn_to_nid(start);
-       memory_present(nid, start, end);
-
-       return 0;
-}
-
-static void __init arch_sparse_init(void)
-{
-       efi_memmap_walk(register_sparse_mem, NULL);
-       sparse_init();
-}
-#else
-#define arch_sparse_init() do {} while (0)
-#endif
-
 /**
  * find_memory - walk the EFI memory map and setup the bootmem allocator
  *
@@ -452,12 +541,16 @@ void __init find_memory(void)
        /* These actually end up getting called by call_pernode_memory() */
        efi_memmap_walk(filter_rsvd_memory, build_node_maps);
        efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
+       efi_memmap_walk(find_max_min_low_pfn, NULL);
 
        for_each_online_node(node)
-               if (mem_data[node].bootmem_data.node_low_pfn) {
+               if (bootmem_node_data[node].node_low_pfn) {
                        node_clear(node, memory_less_mask);
                        mem_data[node].min_pfn = ~0UL;
                }
+
+       efi_memmap_walk(filter_memory, register_active_ranges);
+
        /*
         * Initialize the boot memory maps in reverse order since that's
         * what the bootmem allocator expects
@@ -471,14 +564,14 @@ void __init find_memory(void)
                else if (node_isset(node, memory_less_mask))
                        continue;
 
-               bdp = &mem_data[node].bootmem_data;
+               bdp = &bootmem_node_data[node];
                pernode = mem_data[node].pernode_addr;
                pernodesize = mem_data[node].pernode_size;
                map = pernode + pernodesize;
 
-               init_bootmem_node(mem_data[node].pgdat,
+               init_bootmem_node(pgdat_list[node],
                                  map>>PAGE_SHIFT,
-                                 bdp->node_boot_start>>PAGE_SHIFT,
+                                 bdp->node_min_pfn,
                                  bdp->node_low_pfn);
        }
 
@@ -505,13 +598,9 @@ void __cpuinit *per_cpu_init(void)
        int cpu;
        static int first_time = 1;
 
-
-       if (smp_processor_id() != 0)
-               return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-
        if (first_time) {
                first_time = 0;
-               for (cpu = 0; cpu < NR_CPUS; cpu++)
+               for_each_possible_early_cpu(cpu)
                        per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
        }
 
@@ -532,23 +621,27 @@ void show_mem(void)
        unsigned long total_present = 0;
        pg_data_t *pgdat;
 
-       printk("Mem-info:\n");
+       printk(KERN_INFO "Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+       printk(KERN_INFO "Node memory in pages:\n");
        for_each_online_pgdat(pgdat) {
                unsigned long present;
                unsigned long flags;
                int shared = 0, cached = 0, reserved = 0;
 
-               printk("Node ID: %d\n", pgdat->node_id);
                pgdat_resize_lock(pgdat, &flags);
                present = pgdat->node_present_pages;
                for(i = 0; i < pgdat->node_spanned_pages; i++) {
                        struct page *page;
+                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
+                               touch_nmi_watchdog();
                        if (pfn_valid(pgdat->node_start_pfn + i))
                                page = pfn_to_page(pgdat->node_start_pfn + i);
-                       else
+                       else {
+                               i = vmemmap_find_next_valid_pfn(pgdat->node_id,
+                                        i) - 1;
                                continue;
+                       }
                        if (PageReserved(page))
                                reserved++;
                        else if (PageSwapCache(page))
@@ -561,18 +654,17 @@ void show_mem(void)
                total_reserved += reserved;
                total_cached += cached;
                total_shared += shared;
-               printk("\t%ld pages of RAM\n", present);
-               printk("\t%d reserved pages\n", reserved);
-               printk("\t%d pages shared\n", shared);
-               printk("\t%d pages swap cached\n", cached);
+               printk(KERN_INFO "Node %4d:  RAM: %11ld, rsvd: %8d, "
+                      "shrd: %10d, swpd: %10d\n", pgdat->node_id,
+                      present, reserved, shared, cached);
        }
-       printk("%ld pages of RAM\n", total_present);
-       printk("%d reserved pages\n", total_reserved);
-       printk("%d pages shared\n", total_shared);
-       printk("%d pages swap cached\n", total_cached);
-       printk("Total of %ld pages in page table cache\n",
-               pgtable_quicklist_total_size());
-       printk("%d free buffer pages\n", nr_free_buffer_pages());
+       printk(KERN_INFO "%ld pages of RAM\n", total_present);
+       printk(KERN_INFO "%d reserved pages\n", total_reserved);
+       printk(KERN_INFO "%d pages shared\n", total_shared);
+       printk(KERN_INFO "%d pages swap cached\n", total_cached);
+       printk(KERN_INFO "Total of %ld pages in page table cache\n",
+              quicklist_total_size());
+       printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
 }
 
 /**
@@ -637,11 +729,12 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
        unsigned long end = start + len;
 
        mem_data[node].num_physpages += len >> PAGE_SHIFT;
+#ifdef CONFIG_ZONE_DMA
        if (start <= __pa(MAX_DMA_ADDRESS))
                mem_data[node].num_dma_physpages +=
                        (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
+#endif
        start = GRANULEROUNDDOWN(start);
-       start = ORDERROUNDDOWN(start);
        end = GRANULEROUNDUP(end);
        mem_data[node].max_pfn = max(mem_data[node].max_pfn,
                                     end >> PAGE_SHIFT);
@@ -660,65 +753,71 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
 void __init paging_init(void)
 {
        unsigned long max_dma;
-       unsigned long zones_size[MAX_NR_ZONES];
-       unsigned long zholes_size[MAX_NR_ZONES];
        unsigned long pfn_offset = 0;
+       unsigned long max_pfn = 0;
        int node;
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
 
        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 
-       arch_sparse_init();
-
        efi_memmap_walk(filter_rsvd_memory, count_node_pages);
 
+       sparse_memory_present_with_active_regions(MAX_NUMNODES);
+       sparse_init();
+
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-       vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page));
-       vmem_map = (struct page *) vmalloc_end;
+       VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+               sizeof(struct page));
+       vmem_map = (struct page *) VMALLOC_END;
        efi_memmap_walk(create_mem_map_page_table, NULL);
        printk("Virtual mem_map starts at 0x%p\n", vmem_map);
 #endif
 
        for_each_online_node(node) {
-               memset(zones_size, 0, sizeof(zones_size));
-               memset(zholes_size, 0, sizeof(zholes_size));
-
                num_physpages += mem_data[node].num_physpages;
-
-               if (mem_data[node].min_pfn >= max_dma) {
-                       /* All of this node's memory is above ZONE_DMA */
-                       zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
-                               mem_data[node].min_pfn;
-                       zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn -
-                               mem_data[node].min_pfn -
-                               mem_data[node].num_physpages;
-               } else if (mem_data[node].max_pfn < max_dma) {
-                       /* All of this node's memory is in ZONE_DMA */
-                       zones_size[ZONE_DMA] = mem_data[node].max_pfn -
-                               mem_data[node].min_pfn;
-                       zholes_size[ZONE_DMA] = mem_data[node].max_pfn -
-                               mem_data[node].min_pfn -
-                               mem_data[node].num_dma_physpages;
-               } else {
-                       /* This node has memory in both zones */
-                       zones_size[ZONE_DMA] = max_dma -
-                               mem_data[node].min_pfn;
-                       zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
-                               mem_data[node].num_dma_physpages;
-                       zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
-                               max_dma;
-                       zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] -
-                               (mem_data[node].num_physpages -
-                                mem_data[node].num_dma_physpages);
-               }
-
                pfn_offset = mem_data[node].min_pfn;
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
                NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
 #endif
-               free_area_init_node(node, NODE_DATA(node), zones_size,
-                                   pfn_offset, zholes_size);
+               if (mem_data[node].max_pfn > max_pfn)
+                       max_pfn = mem_data[node].max_pfn;
        }
 
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+#ifdef CONFIG_ZONE_DMA
+       max_zone_pfns[ZONE_DMA] = max_dma;
+#endif
+       max_zone_pfns[ZONE_NORMAL] = max_pfn;
+       free_area_init_nodes(max_zone_pfns);
+
        zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
 }
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+pg_data_t *arch_alloc_nodedata(int nid)
+{
+       unsigned long size = compute_pernodesize(nid);
+
+       return kzalloc(size, GFP_KERNEL);
+}
+
+void arch_free_nodedata(pg_data_t *pgdat)
+{
+       kfree(pgdat);
+}
+
+void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
+{
+       pgdat_list[update_node] = update_pgdat;
+       scatter_node_data();
+}
+#endif
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit vmemmap_populate(struct page *start_page,
+                                               unsigned long size, int node)
+{
+       return vmemmap_populate_basepages(start_page, size, node);
+}
+#endif