x86: move pgd_base out of init_memory_mapping()
[safe/jmp/linux-2.6] / arch / x86 / mm / init_64.c
index d6ef158..151e5ba 100644 (file)
@@ -350,8 +350,10 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
                 * pagetable pages as RO. So assume someone who pre-setup
                 * these mappings are more intelligent.
                 */
-               if (pte_val(*pte))
+               if (pte_val(*pte)) {
+                       pages++;
                        continue;
+               }
 
                if (0)
                        printk("   pte=%p addr=%lx pte=%016lx\n",
@@ -418,8 +420,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                         * not differ with respect to page frame and
                         * attributes.
                         */
-                       if (page_size_mask & (1 << PG_LEVEL_2M))
+                       if (page_size_mask & (1 << PG_LEVEL_2M)) {
+                               pages++;
                                continue;
+                       }
                        new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
                }
 
@@ -499,8 +503,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                         * not differ with respect to page frame and
                         * attributes.
                         */
-                       if (page_size_mask & (1 << PG_LEVEL_1G))
+                       if (page_size_mask & (1 << PG_LEVEL_1G)) {
+                               pages++;
                                continue;
+                       }
                        prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
                }
 
@@ -548,29 +554,48 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
 
        puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
        tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+
        if (use_gbpages) {
                unsigned long extra;
+
                extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
                pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
        } else
                pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+
        tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
 
        if (use_pse) {
                unsigned long extra;
+
                extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
+#ifdef CONFIG_X86_32
+               extra += PMD_SIZE;
+#endif
                ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
        } else
                ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
        tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
 
+#ifdef CONFIG_X86_32
+       /* for fixmap */
+       tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
+#endif
+
        /*
         * RED-PEN putting page tables only on node 0 could
         * cause a hotspot and fill up ZONE_DMA. The page tables
         * need roughly 0.5KB per GB.
         */
+#ifdef CONFIG_X86_32
+       start = 0x7000;
+       table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
+                                       tables, PAGE_SIZE);
+#else /* CONFIG_X86_64 */
        start = 0x8000;
        table_start = find_e820_area(start, end, tables, PAGE_SIZE);
+#endif
        if (table_start == -1UL)
                panic("Cannot find space for the kernel page tables");
 
@@ -590,7 +615,7 @@ static void __init init_gbpages(void)
                direct_gbpages = 0;
 }
 
-static unsigned long __init kernel_physical_mapping_init(unsigned long start,
+static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
                                                unsigned long end,
                                                unsigned long page_size_mask)
 {
@@ -641,7 +666,6 @@ static int save_mr(struct map_range *mr, int nr_range,
                   unsigned long start_pfn, unsigned long end_pfn,
                   unsigned long page_size_mask)
 {
-
        if (start_pfn < end_pfn) {
                if (nr_range >= NR_RANGE_MR)
                        panic("run out of range for init_memory_mapping\n");
@@ -665,20 +689,14 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
        unsigned long last_map_addr = 0;
        unsigned long page_size_mask = 0;
        unsigned long start_pfn, end_pfn;
+       unsigned long pos;
 
        struct map_range mr[NR_RANGE_MR];
        int nr_range, i;
        int use_pse, use_gbpages;
 
-       printk(KERN_INFO "init_memory_mapping\n");
+       printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
 
-       /*
-        * Find space for the kernel direct mapping tables.
-        *
-        * Later we should allocate these tables in the local node of the
-        * memory mapped. Unfortunately this is done currently before the
-        * nodes are discovered.
-        */
        if (!after_bootmem)
                init_gbpages();
 
@@ -702,37 +720,54 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
        memset(mr, 0, sizeof(mr));
        nr_range = 0;
 
-       /* head if not big page alignment ?*/
+       /* head if not big page alignment ? */
        start_pfn = start >> PAGE_SHIFT;
-       end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT)
+       pos = start_pfn << PAGE_SHIFT;
+       end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
                        << (PMD_SHIFT - PAGE_SHIFT);
-       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+       if (end_pfn > (end >> PAGE_SHIFT))
+               end_pfn = end >> PAGE_SHIFT;
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+               pos = end_pfn << PAGE_SHIFT;
+       }
 
-       /* big page (2M) range*/
-       start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
+       /* big page (2M) range */
+       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
                         << (PMD_SHIFT - PAGE_SHIFT);
-       end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT)
+       end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
                         << (PUD_SHIFT - PAGE_SHIFT);
-       if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)))
-               end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT));
-       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
-                       page_size_mask & (1<<PG_LEVEL_2M));
+       if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
+               end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+                               page_size_mask & (1<<PG_LEVEL_2M));
+               pos = end_pfn << PAGE_SHIFT;
+       }
 
        /* big page (1G) range */
-       start_pfn = end_pfn;
-       end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
-       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+       start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
+                        << (PUD_SHIFT - PAGE_SHIFT);
+       end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask &
                                 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
+               pos = end_pfn << PAGE_SHIFT;
+       }
 
        /* tail is not big page (1G) alignment */
-       start_pfn = end_pfn;
-       end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
-       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
-                       page_size_mask & (1<<PG_LEVEL_2M));
+       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
+                        << (PMD_SHIFT - PAGE_SHIFT);
+       end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+                               page_size_mask & (1<<PG_LEVEL_2M));
+               pos = end_pfn << PAGE_SHIFT;
+       }
 
        /* tail is not big page (2M) alignment */
-       start_pfn = end_pfn;
+       start_pfn = pos>>PAGE_SHIFT;
        end_pfn = end>>PAGE_SHIFT;
        nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
 
@@ -745,7 +780,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
                /* move it */
                old_start = mr[i].start;
                memmove(&mr[i], &mr[i+1],
-                        (nr_range - 1 - i) * sizeof (struct map_range));
+                       (nr_range - 1 - i) * sizeof(struct map_range));
                mr[i--].start = old_start;
                nr_range--;
        }
@@ -756,6 +791,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
                        (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
                         (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
 
+       /*
+        * Find space for the kernel direct mapping tables.
+        *
+        * Later we should allocate these tables in the local node of the
+        * memory mapped. Unfortunately this is done currently before the
+        * nodes are discovered.
+        */
        if (!after_bootmem)
                find_early_table_space(end, use_pse, use_gbpages);
 
@@ -831,12 +873,12 @@ int arch_add_memory(int nid, u64 start, u64 size)
        unsigned long nr_pages = size >> PAGE_SHIFT;
        int ret;
 
-       last_mapped_pfn = init_memory_mapping(start, start + size-1);
+       last_mapped_pfn = init_memory_mapping(start, start + size);
        if (last_mapped_pfn > max_pfn_mapped)
                max_pfn_mapped = last_mapped_pfn;
 
-       ret = __add_pages(zone, start_pfn, nr_pages);
-       WARN_ON(1);
+       ret = __add_pages(nid, zone, start_pfn, nr_pages);
+       WARN_ON_ONCE(ret);
 
        return ret;
 }
@@ -852,32 +894,13 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address
- * is valid. The argument is a physical page number.
- *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
-       if (pagenr <= 256)
-               return 1;
-       if (!page_is_ram(pagenr))
-               return 1;
-       return 0;
-}
-
-
 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
                         kcore_modules, kcore_vsyscall;
 
 void __init mem_init(void)
 {
        long codesize, reservedpages, datasize, initsize;
+       unsigned long absent_pages;
 
        pci_iommu_alloc();
 
@@ -891,8 +914,9 @@ void __init mem_init(void)
 #else
        totalram_pages = free_all_bootmem();
 #endif
-       reservedpages = max_pfn - totalram_pages -
-                                       absent_pages_in_range(0, max_pfn);
+
+       absent_pages = absent_pages_in_range(0, max_pfn);
+       reservedpages = max_pfn - totalram_pages - absent_pages;
        after_bootmem = 1;
 
        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
@@ -909,52 +933,16 @@ void __init mem_init(void)
                                 VSYSCALL_END - VSYSCALL_START);
 
        printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
-                               "%ldk reserved, %ldk data, %ldk init)\n",
+                        "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
                max_pfn << (PAGE_SHIFT-10),
                codesize >> 10,
+               absent_pages << (PAGE_SHIFT-10),
                reservedpages << (PAGE_SHIFT-10),
                datasize >> 10,
                initsize >> 10);
 }
 
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
-       unsigned long addr = begin;
-
-       if (addr >= end)
-               return;
-
-       /*
-        * If debugging page accesses then do not free this memory but
-        * mark them not present - any buggy init-section access will
-        * create a kernel page fault:
-        */
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-               begin, PAGE_ALIGN(end));
-       set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
-#else
-       printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-
-       for (; addr < end; addr += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(addr));
-               init_page_count(virt_to_page(addr));
-               memset((void *)(addr & ~(PAGE_SIZE-1)),
-                       POISON_FREE_INITMEM, PAGE_SIZE);
-               free_page(addr);
-               totalram_pages++;
-       }
-#endif
-}
-
-void free_initmem(void)
-{
-       free_init_pages("unused kernel memory",
-                       (unsigned long)(&__init_begin),
-                       (unsigned long)(&__init_end));
-}
-
 #ifdef CONFIG_DEBUG_RODATA
 const int rodata_test_data = 0xC3;
 EXPORT_SYMBOL_GPL(rodata_test_data);
@@ -993,13 +981,6 @@ void mark_rodata_ro(void)
 
 #endif
 
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       free_init_pages("initrd memory", start, end);
-}
-#endif
-
 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
                                   int flags)
 {