netns xfrm: fix "ip xfrm state|policy count" misreport
[safe/jmp/linux-2.6] / mm / sparse.c
index 186a85b..6ce4aab 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
+#include "internal.h"
 #include <asm/dma.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
@@ -61,9 +62,12 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
        unsigned long array_size = SECTIONS_PER_ROOT *
                                   sizeof(struct mem_section);
 
-       if (slab_is_available())
-               section = kmalloc_node(array_size, GFP_KERNEL, nid);
-       else
+       if (slab_is_available()) {
+               if (node_state(nid, N_HIGH_MEMORY))
+                       section = kmalloc_node(array_size, GFP_KERNEL, nid);
+               else
+                       section = kmalloc(array_size, GFP_KERNEL);
+       } else
                section = alloc_bootmem_node(NODE_DATA(nid), array_size);
 
        if (section)
@@ -146,22 +150,39 @@ static inline int sparse_early_nid(struct mem_section *section)
        return (section->section_mem_map >> SECTION_NID_SHIFT);
 }
 
-/* Record a memory area against a node. */
-void __init memory_present(int nid, unsigned long start, unsigned long end)
+/* Validate the physical addressing limitations of the model */
+void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
+                                               unsigned long *end_pfn)
 {
-       unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
-       unsigned long pfn;
+       unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
 
        /*
         * Sanity checks - do not allow an architecture to pass
         * in larger pfns than the maximum scope of sparsemem:
         */
-       if (start >= max_arch_pfn)
-               return;
-       if (end >= max_arch_pfn)
-               end = max_arch_pfn;
+       if (*start_pfn > max_sparsemem_pfn) {
+               mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
+                       "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
+                       *start_pfn, *end_pfn, max_sparsemem_pfn);
+               WARN_ON_ONCE(1);
+               *start_pfn = max_sparsemem_pfn;
+               *end_pfn = max_sparsemem_pfn;
+       } else if (*end_pfn > max_sparsemem_pfn) {
+               mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
+                       "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
+                       *start_pfn, *end_pfn, max_sparsemem_pfn);
+               WARN_ON_ONCE(1);
+               *end_pfn = max_sparsemem_pfn;
+       }
+}
+
+/* Record a memory area against a node. */
+void __init memory_present(int nid, unsigned long start, unsigned long end)
+{
+       unsigned long pfn;
 
        start &= PAGE_SECTION_MASK;
+       mminit_validate_memmodel_limits(&start, &end);
        for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
                unsigned long section = pfn_to_section_nr(pfn);
                struct mem_section *ms;
@@ -186,6 +207,7 @@ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
        unsigned long pfn;
        unsigned long nr_pages = 0;
 
+       mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
        for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
                if (nid != early_pfn_to_nid(pfn))
                        continue;
@@ -210,7 +232,6 @@ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long p
 /*
  * Decode mem_map from the coded memmap
  */
-static
 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
 {
        /* mask off the extra low bits of information */
@@ -233,7 +254,7 @@ static int __meminit sparse_init_one_section(struct mem_section *ms,
        return 1;
 }
 
-static unsigned long usemap_size(void)
+unsigned long usemap_size(void)
 {
        unsigned long size_bytes;
        size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
@@ -248,20 +269,96 @@ static unsigned long *__kmalloc_section_usemap(void)
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static unsigned long * __init
+sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
+{
+       unsigned long section_nr;
+
+       /*
+        * A page may contain usemaps for other sections preventing the
+        * page being freed and making a section unremovable while
+        * other sections referencing the usemap retmain active. Similarly,
+        * a pgdat can prevent a section being removed. If section A
+        * contains a pgdat and section B contains the usemap, both
+        * sections become inter-dependent. This allocates usemaps
+        * from the same section as the pgdat where possible to avoid
+        * this problem.
+        */
+       section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
+       return alloc_bootmem_section(usemap_size(), section_nr);
+}
+
+static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
+{
+       unsigned long usemap_snr, pgdat_snr;
+       static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
+       static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
+       struct pglist_data *pgdat = NODE_DATA(nid);
+       int usemap_nid;
+
+       usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
+       pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
+       if (usemap_snr == pgdat_snr)
+               return;
+
+       if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
+               /* skip redundant message */
+               return;
+
+       old_usemap_snr = usemap_snr;
+       old_pgdat_snr = pgdat_snr;
+
+       usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
+       if (usemap_nid != nid) {
+               printk(KERN_INFO
+                      "node %d must be removed before remove section %ld\n",
+                      nid, usemap_snr);
+               return;
+       }
+       /*
+        * There is a circular dependency.
+        * Some platforms allow un-removable section because they will just
+        * gather other removable sections for dynamic partitioning.
+        * Just notify un-removable section's number here.
+        */
+       printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
+              pgdat_snr, nid);
+       printk(KERN_CONT
+              " have a circular dependency on usemap and pgdat allocations\n");
+}
+#else
+static unsigned long * __init
+sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
+{
+       return NULL;
+}
+
+static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
+{
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
 static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
 {
        unsigned long *usemap;
        struct mem_section *ms = __nr_to_section(pnum);
        int nid = sparse_early_nid(ms);
 
-       usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
+       usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid));
        if (usemap)
                return usemap;
 
+       usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
+       if (usemap) {
+               check_usemap_section_nr(nid, usemap);
+               return usemap;
+       }
+
        /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
        nid = 0;
 
-       printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
+       printk(KERN_WARNING "%s: allocation failed\n", __func__);
        return NULL;
 }
 
@@ -274,13 +371,13 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
        if (map)
                return map;
 
-       map = alloc_bootmem_node(NODE_DATA(nid),
-                       sizeof(struct page) * PAGES_PER_SECTION);
+       map = alloc_bootmem_pages_node(NODE_DATA(nid),
+                      PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
        return map;
 }
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 
-struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
+static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
 {
        struct page *map;
        struct mem_section *ms = __nr_to_section(pnum);
@@ -291,7 +388,7 @@ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
                return map;
 
        printk(KERN_ERR "%s: sparsemem memory map backing failed "
-                       "some memory will not be available.\n", __FUNCTION__);
+                       "some memory will not be available.\n", __func__);
        ms->section_mem_map = 0;
        return NULL;
 }
@@ -366,6 +463,9 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
 {
        return; /* XXX: Not implemented yet */
 }
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+}
 #else
 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
 {
@@ -403,17 +503,47 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
                free_pages((unsigned long)memmap,
                           get_order(sizeof(struct page) * nr_pages));
 }
+
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+       unsigned long maps_section_nr, removing_section_nr, i;
+       int magic;
+
+       for (i = 0; i < nr_pages; i++, page++) {
+               magic = atomic_read(&page->_mapcount);
+
+               BUG_ON(magic == NODE_INFO);
+
+               maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
+               removing_section_nr = page->private;
+
+               /*
+                * When this function is called, the removing section is
+                * logical offlined state. This means all pages are isolated
+                * from page allocator. If removing section's memmap is placed
+                * on the same section, it must not be freed.
+                * If it is freed, page allocator may allocate it which will
+                * be removed physically soon.
+                */
+               if (maps_section_nr != removing_section_nr)
+                       put_page_bootmem(page);
+       }
+}
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
 {
+       struct page *usemap_page;
+       unsigned long nr_pages;
+
        if (!usemap)
                return;
 
+       usemap_page = virt_to_page(usemap);
        /*
         * Check to see if allocation came from hot-plug-add
         */
-       if (PageSlab(virt_to_page(usemap))) {
+       if (PageSlab(usemap_page)) {
                kfree(usemap);
                if (memmap)
                        __kfree_section_memmap(memmap, PAGES_PER_SECTION);
@@ -421,10 +551,19 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
        }
 
        /*
-        * TODO: Allocations came from bootmem - how do I free up ?
+        * The usemap came from bootmem. This is packed with other usemaps
+        * on the section which has pgdat at boot time. Just keep it as is now.
         */
-       printk(KERN_WARNING "Not freeing up allocations from bootmem "
-                       "- leaking memory\n");
+
+       if (memmap) {
+               struct page *memmap_page;
+               memmap_page = virt_to_page(memmap);
+
+               nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+                       >> PAGE_SHIFT;
+
+               free_map_bootmem(memmap_page, nr_pages);
+       }
 }
 
 /*
@@ -432,7 +571,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
  * set.  If this is <=0, then that means that the passed-in
  * map was not consumed and must be freed.
  */
-int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
+int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
                           int nr_pages)
 {
        unsigned long section_nr = pfn_to_section_nr(start_pfn);