nfsd: 4.1 has an rfc number
[safe/jmp/linux-2.6] / mm / sparse-vmemmap.c
index 7bb7a4b..d9714bd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Virtual Memory Map support
  *
- * (C) 2007 sgi. Christoph Lameter <clameter@sgi.com>.
+ * (C) 2007 sgi. Christoph Lameter.
  *
  * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
  * virt_to_page, page_address() to be implemented as a base offset
  * case the overhead consists of a few additional pages that are
  * allocated to create a view of memory for vmemmap.
  *
- * Special Kconfig settings:
- *
- * CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP
- *
- *     The architecture has its own functions to populate the memory
- *     map and provides a vmemmap_populate function.
- *
- * CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD
- *
- *     The architecture provides functions to populate the pmd level
- *     of the vmemmap mappings.  Allowing mappings using large pages
- *     where available.
- *
- *     If neither are set then PAGE_SIZE mappings are generated which
- *     require one PTE/TLB per PAGE_SIZE chunk of the virtual memory map.
+ * The architecture is expected to provide a vmemmap_populate() function
+ * to instantiate the mapping.
  */
 #include <linux/mm.h>
 #include <linux/mmzone.h>
@@ -37,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
+#include <linux/sched.h>
 #include <asm/dma.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
  * or to back the page tables that are used to create the mapping.
  * Uses the main allocators if they are available, else bootmem.
  */
+
+static void * __init_refok __earlyonly_bootmem_alloc(int node,
+                               unsigned long size,
+                               unsigned long align,
+                               unsigned long goal)
+{
+       return __alloc_bootmem_node(NODE_DATA(node), size, align, goal);
+}
+
+
 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 {
        /* If the main allocator is up use that, fallback to bootmem. */
        if (slab_is_available()) {
-               struct page *page = alloc_pages_node(node,
+               struct page *page;
+
+               if (node_state(node, N_HIGH_MEMORY))
+                       page = alloc_pages_node(node,
                                GFP_KERNEL | __GFP_ZERO, get_order(size));
+               else
+                       page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+                               get_order(size));
                if (page)
                        return page_address(page);
                return NULL;
        } else
-               return __alloc_bootmem_node(NODE_DATA(node), size, size,
+               return __earlyonly_bootmem_alloc(node, size, size,
                                __pa(MAX_DMA_ADDRESS));
 }
 
-#ifndef CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP
 void __meminit vmemmap_verify(pte_t *pte, int node,
                                unsigned long start, unsigned long end)
 {
        unsigned long pfn = pte_pfn(*pte);
        int actual_node = early_pfn_to_nid(pfn);
 
-       if (actual_node != node)
+       if (node_distance(actual_node, node) > LOCAL_DISTANCE)
                printk(KERN_WARNING "[%lx-%lx] potential offnode "
                        "page_structs\n", start, end - 1);
 }
 
-#ifndef CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD
-static int __meminit vmemmap_populate_pte(pmd_t *pmd, unsigned long addr,
-                                       unsigned long end, int node)
+pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
 {
-       pte_t *pte;
-
-       for (pte = pte_offset_kernel(pmd, addr); addr < end;
-                                               pte++, addr += PAGE_SIZE)
-               if (pte_none(*pte)) {
-                       pte_t entry;
-                       void *p = vmemmap_alloc_block(PAGE_SIZE, node);
-                       if (!p)
-                               return -ENOMEM;
-
-                       entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
-                       set_pte(pte, entry);
-
-               } else
-                       vmemmap_verify(pte, node, addr + PAGE_SIZE, end);
+       pte_t *pte = pte_offset_kernel(pmd, addr);
+       if (pte_none(*pte)) {
+               pte_t entry;
+               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+               entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
+               set_pte_at(&init_mm, addr, pte, entry);
+       }
+       return pte;
+}
 
-       return 0;
+pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
+{
+       pmd_t *pmd = pmd_offset(pud, addr);
+       if (pmd_none(*pmd)) {
+               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+               pmd_populate_kernel(&init_mm, pmd, p);
+       }
+       return pmd;
 }
 
-int __meminit vmemmap_populate_pmd(pud_t *pud, unsigned long addr,
-                                               unsigned long end, int node)
+pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
 {
-       pmd_t *pmd;
-       int error = 0;
-       unsigned long next;
-
-       for (pmd = pmd_offset(pud, addr); addr < end && !error;
-                                               pmd++, addr = next) {
-               if (pmd_none(*pmd)) {
-                       void *p = vmemmap_alloc_block(PAGE_SIZE, node);
-                       if (!p)
-                               return -ENOMEM;
-
-                       pmd_populate_kernel(&init_mm, pmd, p);
-               } else
-                       vmemmap_verify((pte_t *)pmd, node,
-                                       pmd_addr_end(addr, end), end);
-               next = pmd_addr_end(addr, end);
-               error = vmemmap_populate_pte(pmd, addr, next, node);
+       pud_t *pud = pud_offset(pgd, addr);
+       if (pud_none(*pud)) {
+               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+               pud_populate(&init_mm, pud, p);
        }
-       return error;
+       return pud;
 }
-#endif /* CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD */
 
-static int __meminit vmemmap_populate_pud(pgd_t *pgd, unsigned long addr,
-                                               unsigned long end, int node)
+pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
 {
-       pud_t *pud;
-       int error = 0;
-       unsigned long next;
-
-       for (pud = pud_offset(pgd, addr); addr < end && !error;
-                                               pud++, addr = next) {
-               if (pud_none(*pud)) {
-                       void *p = vmemmap_alloc_block(PAGE_SIZE, node);
-                       if (!p)
-                               return -ENOMEM;
-
-                       pud_populate(&init_mm, pud, p);
-               }
-               next = pud_addr_end(addr, end);
-               error = vmemmap_populate_pmd(pud, addr, next, node);
+       pgd_t *pgd = pgd_offset_k(addr);
+       if (pgd_none(*pgd)) {
+               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+               pgd_populate(&init_mm, pgd, p);
        }
-       return error;
+       return pgd;
 }
 
-int __meminit vmemmap_populate(struct page *start_page,
-                                               unsigned long nr, int node)
+int __meminit vmemmap_populate_basepages(struct page *start_page,
+                                               unsigned long size, int node)
 {
-       pgd_t *pgd;
        unsigned long addr = (unsigned long)start_page;
-       unsigned long end = (unsigned long)(start_page + nr);
-       unsigned long next;
-       int error = 0;
-
-       printk(KERN_DEBUG "[%lx-%lx] Virtual memory section"
-               " (%ld pages) node %d\n", addr, end - 1, nr, node);
-
-       for (pgd = pgd_offset_k(addr); addr < end && !error;
-                                       pgd++, addr = next) {
-               if (pgd_none(*pgd)) {
-                       void *p = vmemmap_alloc_block(PAGE_SIZE, node);
-                       if (!p)
-                               return -ENOMEM;
-
-                       pgd_populate(&init_mm, pgd, p);
-               }
-               next = pgd_addr_end(addr,end);
-               error = vmemmap_populate_pud(pgd, addr, next, node);
+       unsigned long end = (unsigned long)(start_page + size);
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       for (; addr < end; addr += PAGE_SIZE) {
+               pgd = vmemmap_pgd_populate(addr, node);
+               if (!pgd)
+                       return -ENOMEM;
+               pud = vmemmap_pud_populate(pgd, addr, node);
+               if (!pud)
+                       return -ENOMEM;
+               pmd = vmemmap_pmd_populate(pud, addr, node);
+               if (!pmd)
+                       return -ENOMEM;
+               pte = vmemmap_pte_populate(pmd, addr, node);
+               if (!pte)
+                       return -ENOMEM;
+               vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
        }
-       return error;
+
+       return 0;
 }
-#endif /* !CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP */
 
-struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid)
+struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
 {
        struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION);
        int error = vmemmap_populate(map, PAGES_PER_SECTION, nid);