x86: move pgd_base out of init_memory_mapping()
[safe/jmp/linux-2.6] / arch / x86 / mm / init_64.c
index 27de243..151e5ba 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/nmi.h>
 
 #include <asm/processor.h>
+#include <asm/bios_ebda.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -60,7 +61,7 @@ static unsigned long dma_reserve __initdata;
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
-int direct_gbpages __meminitdata
+int direct_gbpages
 #ifdef CONFIG_DIRECT_GBPAGES
                                = 1
 #endif
@@ -86,46 +87,69 @@ early_param("gbpages", parse_direct_gbpages_on);
  * around without checking the pgd every time.
  */
 
-void show_mem(void)
-{
-       long i, total = 0, reserved = 0;
-       long shared = 0, cached = 0;
-       struct page *page;
-       pg_data_t *pgdat;
-
-       printk(KERN_INFO "Mem-info:\n");
-       show_free_areas();
-       for_each_online_pgdat(pgdat) {
-               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-                       /*
-                        * This loop can take a while with 256 GB and
-                        * 4k pages so defer the NMI watchdog:
-                        */
-                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
-                               touch_nmi_watchdog();
+int after_bootmem;
 
-                       if (!pfn_valid(pgdat->node_start_pfn + i))
-                               continue;
+pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
+EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
-                       page = pfn_to_page(pgdat->node_start_pfn + i);
-                       total++;
-                       if (PageReserved(page))
-                               reserved++;
-                       else if (PageSwapCache(page))
-                               cached++;
-                       else if (page_count(page))
-                               shared += page_count(page) - 1;
-               }
+static int do_not_nx __cpuinitdata;
+
+/*
+ * noexec=on|off
+ * Control non-executable mappings for 64-bit processes.
+ *
+ * on  Enable (default)
+ * off Disable
+ */
+static int __init nonx_setup(char *str)
+{
+       if (!str)
+               return -EINVAL;
+       if (!strncmp(str, "on", 2)) {
+               __supported_pte_mask |= _PAGE_NX;
+               do_not_nx = 0;
+       } else if (!strncmp(str, "off", 3)) {
+               do_not_nx = 1;
+               __supported_pte_mask &= ~_PAGE_NX;
        }
-       printk(KERN_INFO "%lu pages of RAM\n",          total);
-       printk(KERN_INFO "%lu reserved pages\n",        reserved);
-       printk(KERN_INFO "%lu pages shared\n",          shared);
-       printk(KERN_INFO "%lu pages swap cached\n",     cached);
+       return 0;
 }
+early_param("noexec", nonx_setup);
 
-int after_bootmem;
+void __cpuinit check_efer(void)
+{
+       unsigned long efer;
+
+       rdmsrl(MSR_EFER, efer);
+       if (!(efer & EFER_NX) || do_not_nx)
+               __supported_pte_mask &= ~_PAGE_NX;
+}
+
+int force_personality32;
+
+/*
+ * noexec32=on|off
+ * Control non executable heap for 32bit processes.
+ * To control the stack too use noexec=off
+ *
+ * on  PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
+ * off PROT_READ implies PROT_EXEC
+ */
+static int __init nonx32_setup(char *str)
+{
+       if (!strcmp(str, "on"))
+               force_personality32 &= ~READ_IMPLIES_EXEC;
+       else if (!strcmp(str, "off"))
+               force_personality32 |= READ_IMPLIES_EXEC;
+       return 1;
+}
+__setup("noexec32=", nonx32_setup);
 
-static __init void *spp_getpage(void)
+/*
+ * NOTE: This function is marked __ref because it calls __init function
+ * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
+ */
+static __ref void *spp_getpage(void)
 {
        void *ptr;
 
@@ -172,9 +196,6 @@ set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
        }
 
        pte = pte_offset_kernel(pmd, vaddr);
-       if (!pte_none(*pte) && pte_val(new_pte) &&
-           pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
-               pte_ERROR(*pte);
        set_pte(pte, new_pte);
 
        /*
@@ -258,7 +279,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 void __init cleanup_highmap(void)
 {
        unsigned long vaddr = __START_KERNEL_map;
-       unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1;
+       unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
        pmd_t *pmd = level2_kernel_pgt;
        pmd_t *last_pmd = pmd + PTRS_PER_PMD;
 
@@ -274,7 +295,7 @@ static unsigned long __initdata table_start;
 static unsigned long __meminitdata table_end;
 static unsigned long __meminitdata table_top;
 
-static __meminit void *alloc_low_page(unsigned long *phys)
+static __ref void *alloc_low_page(unsigned long *phys)
 {
        unsigned long pfn = table_end++;
        void *adr;
@@ -289,13 +310,13 @@ static __meminit void *alloc_low_page(unsigned long *phys)
        if (pfn >= table_top)
                panic("alloc_low_page: ran out of memory");
 
-       adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
+       adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
        memset(adr, 0, PAGE_SIZE);
        *phys  = pfn * PAGE_SIZE;
        return adr;
 }
 
-static __meminit void unmap_low_page(void *adr)
+static __ref void unmap_low_page(void *adr)
 {
        if (after_bootmem)
                return;
@@ -304,7 +325,8 @@ static __meminit void unmap_low_page(void *adr)
 }
 
 static unsigned long __meminit
-phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end)
+phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
+             pgprot_t prot)
 {
        unsigned pages = 0;
        unsigned long last_map_addr = end;
@@ -322,32 +344,42 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end)
                        break;
                }
 
-               if (pte_val(*pte))
+               /*
+                * We will re-use the existing mapping.
+                * Xen for example has some special requirements, like mapping
+                * pagetable pages as RO. So assume someone who pre-setup
+                * these mappings are more intelligent.
+                */
+               if (pte_val(*pte)) {
+                       pages++;
                        continue;
+               }
 
                if (0)
                        printk("   pte=%p addr=%lx pte=%016lx\n",
                               pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
-               set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL));
-               last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
                pages++;
+               set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
+               last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
        }
+
        update_page_count(PG_LEVEL_4K, pages);
 
        return last_map_addr;
 }
 
 static unsigned long __meminit
-phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end)
+phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end,
+               pgprot_t prot)
 {
        pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
 
-       return phys_pte_init(pte, address, end);
+       return phys_pte_init(pte, address, end, prot);
 }
 
 static unsigned long __meminit
 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
-                        unsigned long page_size_mask)
+             unsigned long page_size_mask, pgprot_t prot)
 {
        unsigned long pages = 0;
        unsigned long last_map_addr = end;
@@ -358,6 +390,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                unsigned long pte_phys;
                pmd_t *pmd = pmd_page + pmd_index(address);
                pte_t *pte;
+               pgprot_t new_prot = prot;
 
                if (address >= end) {
                        if (!after_bootmem) {
@@ -368,25 +401,50 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                }
 
                if (pmd_val(*pmd)) {
-                       if (!pmd_large(*pmd))
+                       if (!pmd_large(*pmd)) {
+                               spin_lock(&init_mm.page_table_lock);
                                last_map_addr = phys_pte_update(pmd, address,
-                                                                end);
-                       continue;
+                                                               end, prot);
+                               spin_unlock(&init_mm.page_table_lock);
+                               continue;
+                       }
+                       /*
+                        * If we are ok with PG_LEVEL_2M mapping, then we will
+                        * use the existing mapping,
+                        *
+                        * Otherwise, we will split the large page mapping but
+                        * use the same existing protection bits except for
+                        * large page, so that we don't violate Intel's TLB
+                        * Application note (317080) which says, while changing
+                        * the page sizes, new and old translations should
+                        * not differ with respect to page frame and
+                        * attributes.
+                        */
+                       if (page_size_mask & (1 << PG_LEVEL_2M)) {
+                               pages++;
+                               continue;
+                       }
+                       new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
                }
 
                if (page_size_mask & (1<<PG_LEVEL_2M)) {
                        pages++;
+                       spin_lock(&init_mm.page_table_lock);
                        set_pte((pte_t *)pmd,
-                               pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
+                               pfn_pte(address >> PAGE_SHIFT,
+                                       __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+                       spin_unlock(&init_mm.page_table_lock);
                        last_map_addr = (address & PMD_MASK) + PMD_SIZE;
                        continue;
                }
 
                pte = alloc_low_page(&pte_phys);
-               last_map_addr = phys_pte_init(pte, address, end);
+               last_map_addr = phys_pte_init(pte, address, end, new_prot);
                unmap_low_page(pte);
 
+               spin_lock(&init_mm.page_table_lock);
                pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
+               spin_unlock(&init_mm.page_table_lock);
        }
        update_page_count(PG_LEVEL_2M, pages);
        return last_map_addr;
@@ -394,14 +452,12 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
 
 static unsigned long __meminit
 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
-                        unsigned long page_size_mask)
+               unsigned long page_size_mask, pgprot_t prot)
 {
        pmd_t *pmd = pmd_offset(pud, 0);
        unsigned long last_map_addr;
 
-       spin_lock(&init_mm.page_table_lock);
-       last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
-       spin_unlock(&init_mm.page_table_lock);
+       last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot);
        __flush_tlb_all();
        return last_map_addr;
 }
@@ -418,6 +474,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                unsigned long pmd_phys;
                pud_t *pud = pud_page + pud_index(addr);
                pmd_t *pmd;
+               pgprot_t prot = PAGE_KERNEL;
 
                if (addr >= end)
                        break;
@@ -429,30 +486,51 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                }
 
                if (pud_val(*pud)) {
-                       if (!pud_large(*pud))
+                       if (!pud_large(*pud)) {
                                last_map_addr = phys_pmd_update(pud, addr, end,
-                                                        page_size_mask);
-                       continue;
+                                                        page_size_mask, prot);
+                               continue;
+                       }
+                       /*
+                        * If we are ok with PG_LEVEL_1G mapping, then we will
+                        * use the existing mapping.
+                        *
+                        * Otherwise, we will split the gbpage mapping but use
+                        * the same existing protection  bits except for large
+                        * page, so that we don't violate Intel's TLB
+                        * Application note (317080) which says, while changing
+                        * the page sizes, new and old translations should
+                        * not differ with respect to page frame and
+                        * attributes.
+                        */
+                       if (page_size_mask & (1 << PG_LEVEL_1G)) {
+                               pages++;
+                               continue;
+                       }
+                       prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
                }
 
                if (page_size_mask & (1<<PG_LEVEL_1G)) {
                        pages++;
+                       spin_lock(&init_mm.page_table_lock);
                        set_pte((pte_t *)pud,
                                pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
+                       spin_unlock(&init_mm.page_table_lock);
                        last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
                        continue;
                }
 
                pmd = alloc_low_page(&pmd_phys);
+               last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
+                                             prot);
+               unmap_low_page(pmd);
 
                spin_lock(&init_mm.page_table_lock);
-               last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
-               unmap_low_page(pmd);
                pud_populate(&init_mm, pud, __va(pmd_phys));
                spin_unlock(&init_mm.page_table_lock);
-
        }
        __flush_tlb_all();
+
        update_page_count(PG_LEVEL_1G, pages);
 
        return last_map_addr;
@@ -469,35 +547,55 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
        return phys_pud_init(pud, addr, end, page_size_mask);
 }
 
-static void __init find_early_table_space(unsigned long end)
+static void __init find_early_table_space(unsigned long end, int use_pse,
+                                         int use_gbpages)
 {
        unsigned long puds, pmds, ptes, tables, start;
 
        puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-       tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
-       if (direct_gbpages) {
+       tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+
+       if (use_gbpages) {
                unsigned long extra;
+
                extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
                pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
        } else
                pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-       tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
 
-       if (cpu_has_pse) {
+       tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+
+       if (use_pse) {
                unsigned long extra;
+
                extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
+#ifdef CONFIG_X86_32
+               extra += PMD_SIZE;
+#endif
                ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
        } else
                ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE);
+
+       tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
+
+#ifdef CONFIG_X86_32
+       /* for fixmap */
+       tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
+#endif
 
        /*
         * RED-PEN putting page tables only on node 0 could
         * cause a hotspot and fill up ZONE_DMA. The page tables
         * need roughly 0.5KB per GB.
         */
+#ifdef CONFIG_X86_32
+       start = 0x7000;
+       table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
+                                       tables, PAGE_SIZE);
+#else /* CONFIG_X86_64 */
        start = 0x8000;
        table_start = find_e820_area(start, end, tables, PAGE_SIZE);
+#endif
        if (table_start == -1UL)
                panic("Cannot find space for the kernel page tables");
 
@@ -517,119 +615,7 @@ static void __init init_gbpages(void)
                direct_gbpages = 0;
 }
 
-#ifdef CONFIG_MEMTEST
-
-static void __init memtest(unsigned long start_phys, unsigned long size,
-                                unsigned pattern)
-{
-       unsigned long i;
-       unsigned long *start;
-       unsigned long start_bad;
-       unsigned long last_bad;
-       unsigned long val;
-       unsigned long start_phys_aligned;
-       unsigned long count;
-       unsigned long incr;
-
-       switch (pattern) {
-       case 0:
-               val = 0UL;
-               break;
-       case 1:
-               val = -1UL;
-               break;
-       case 2:
-               val = 0x5555555555555555UL;
-               break;
-       case 3:
-               val = 0xaaaaaaaaaaaaaaaaUL;
-               break;
-       default:
-               return;
-       }
-
-       incr = sizeof(unsigned long);
-       start_phys_aligned = ALIGN(start_phys, incr);
-       count = (size - (start_phys_aligned - start_phys))/incr;
-       start = __va(start_phys_aligned);
-       start_bad = 0;
-       last_bad = 0;
-
-       for (i = 0; i < count; i++)
-               start[i] = val;
-       for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
-               if (*start != val) {
-                       if (start_phys_aligned == last_bad + incr) {
-                               last_bad += incr;
-                       } else {
-                               if (start_bad) {
-                                       printk(KERN_CONT "\n  %016lx bad mem addr %016lx - %016lx reserved",
-                                               val, start_bad, last_bad + incr);
-                                       reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
-                               }
-                               start_bad = last_bad = start_phys_aligned;
-                       }
-               }
-       }
-       if (start_bad) {
-               printk(KERN_CONT "\n  %016lx bad mem addr %016lx - %016lx reserved",
-                       val, start_bad, last_bad + incr);
-               reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
-       }
-
-}
-
-/* default is disabled */
-static int memtest_pattern __initdata;
-
-static int __init parse_memtest(char *arg)
-{
-       if (arg)
-               memtest_pattern = simple_strtoul(arg, NULL, 0);
-       return 0;
-}
-
-early_param("memtest", parse_memtest);
-
-static void __init early_memtest(unsigned long start, unsigned long end)
-{
-       u64 t_start, t_size;
-       unsigned pattern;
-
-       if (!memtest_pattern)
-               return;
-
-       printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
-       for (pattern = 0; pattern < memtest_pattern; pattern++) {
-               t_start = start;
-               t_size = 0;
-               while (t_start < end) {
-                       t_start = find_e820_area_size(t_start, &t_size, 1);
-
-                       /* done ? */
-                       if (t_start >= end)
-                               break;
-                       if (t_start + t_size > end)
-                               t_size = end - t_start;
-
-                       printk(KERN_CONT "\n  %016llx - %016llx pattern %d",
-                               (unsigned long long)t_start,
-                               (unsigned long long)t_start + t_size, pattern);
-
-                       memtest(t_start, t_size, pattern);
-
-                       t_start += t_size;
-               }
-       }
-       printk(KERN_CONT "\n");
-}
-#else
-static void __init early_memtest(unsigned long start, unsigned long end)
-{
-}
-#endif
-
-static unsigned long __init kernel_physical_mapping_init(unsigned long start,
+static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
                                                unsigned long end,
                                                unsigned long page_size_mask)
 {
@@ -644,7 +630,7 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
                unsigned long pud_phys;
                pud_t *pud;
 
-               next = start + PGDIR_SIZE;
+               next = (start + PGDIR_SIZE) & PGDIR_MASK;
                if (next > end)
                        next = end;
 
@@ -654,17 +640,16 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
                        continue;
                }
 
-               if (after_bootmem)
-                       pud = pud_offset(pgd, start & PGDIR_MASK);
-               else
-                       pud = alloc_low_page(&pud_phys);
-
+               pud = alloc_low_page(&pud_phys);
                last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
                                                 page_size_mask);
                unmap_low_page(pud);
-               pgd_populate(&init_mm, pgd_offset_k(start),
-                            __va(pud_phys));
+
+               spin_lock(&init_mm.page_table_lock);
+               pgd_populate(&init_mm, pgd, __va(pud_phys));
+               spin_unlock(&init_mm.page_table_lock);
        }
+       __flush_tlb_all();
 
        return last_map_addr;
 }
@@ -681,7 +666,6 @@ static int save_mr(struct map_range *mr, int nr_range,
                   unsigned long start_pfn, unsigned long end_pfn,
                   unsigned long page_size_mask)
 {
-
        if (start_pfn < end_pfn) {
                if (nr_range >= NR_RANGE_MR)
                        panic("run out of range for init_memory_mapping\n");
@@ -705,61 +689,85 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
        unsigned long last_map_addr = 0;
        unsigned long page_size_mask = 0;
        unsigned long start_pfn, end_pfn;
+       unsigned long pos;
 
        struct map_range mr[NR_RANGE_MR];
        int nr_range, i;
+       int use_pse, use_gbpages;
 
-       printk(KERN_INFO "init_memory_mapping\n");
+       printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
 
-       /*
-        * Find space for the kernel direct mapping tables.
-        *
-        * Later we should allocate these tables in the local node of the
-        * memory mapped. Unfortunately this is done currently before the
-        * nodes are discovered.
-        */
        if (!after_bootmem)
                init_gbpages();
 
-       if (direct_gbpages)
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       /*
+        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
+        * This will simplify cpa(), which otherwise needs to support splitting
+        * large pages into small in interrupt context, etc.
+        */
+       use_pse = use_gbpages = 0;
+#else
+       use_pse = cpu_has_pse;
+       use_gbpages = direct_gbpages;
+#endif
+
+       if (use_gbpages)
                page_size_mask |= 1 << PG_LEVEL_1G;
-       if (cpu_has_pse)
+       if (use_pse)
                page_size_mask |= 1 << PG_LEVEL_2M;
 
        memset(mr, 0, sizeof(mr));
        nr_range = 0;
 
-       /* head if not big page alignment ?*/
+       /* head if not big page alignment ? */
        start_pfn = start >> PAGE_SHIFT;
-       end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT)
+       pos = start_pfn << PAGE_SHIFT;
+       end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
                        << (PMD_SHIFT - PAGE_SHIFT);
-       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+       if (end_pfn > (end >> PAGE_SHIFT))
+               end_pfn = end >> PAGE_SHIFT;
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+               pos = end_pfn << PAGE_SHIFT;
+       }
 
-       /* big page (2M) range*/
-       start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
+       /* big page (2M) range */
+       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
                         << (PMD_SHIFT - PAGE_SHIFT);
-       end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT)
+       end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
                         << (PUD_SHIFT - PAGE_SHIFT);
-       if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)))
-               end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT));
-       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
-                       page_size_mask & (1<<PG_LEVEL_2M));
+       if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
+               end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+                               page_size_mask & (1<<PG_LEVEL_2M));
+               pos = end_pfn << PAGE_SHIFT;
+       }
 
        /* big page (1G) range */
-       start_pfn = end_pfn;
-       end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
-       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+       start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
+                        << (PUD_SHIFT - PAGE_SHIFT);
+       end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask &
                                 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
+               pos = end_pfn << PAGE_SHIFT;
+       }
 
        /* tail is not big page (1G) alignment */
-       start_pfn = end_pfn;
-       end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
-       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
-                       page_size_mask & (1<<PG_LEVEL_2M));
+       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
+                        << (PMD_SHIFT - PAGE_SHIFT);
+       end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+                               page_size_mask & (1<<PG_LEVEL_2M));
+               pos = end_pfn << PAGE_SHIFT;
+       }
 
        /* tail is not big page (2M) alignment */
-       start_pfn = end_pfn;
+       start_pfn = pos>>PAGE_SHIFT;
        end_pfn = end>>PAGE_SHIFT;
        nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
 
@@ -772,8 +780,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
                /* move it */
                old_start = mr[i].start;
                memmove(&mr[i], &mr[i+1],
-                        (nr_range - 1 - i) * sizeof (struct map_range));
-               mr[i].start = old_start;
+                       (nr_range - 1 - i) * sizeof(struct map_range));
+               mr[i--].start = old_start;
                nr_range--;
        }
 
@@ -783,8 +791,15 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
                        (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
                         (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
 
+       /*
+        * Find space for the kernel direct mapping tables.
+        *
+        * Later we should allocate these tables in the local node of the
+        * memory mapped. Unfortunately this is done currently before the
+        * nodes are discovered.
+        */
        if (!after_bootmem)
-               find_early_table_space(end);
+               find_early_table_space(end, use_pse, use_gbpages);
 
        for (i = 0; i < nr_range; i++)
                last_map_addr = kernel_physical_mapping_init(
@@ -858,12 +873,12 @@ int arch_add_memory(int nid, u64 start, u64 size)
        unsigned long nr_pages = size >> PAGE_SHIFT;
        int ret;
 
-       last_mapped_pfn = init_memory_mapping(start, start + size-1);
+       last_mapped_pfn = init_memory_mapping(start, start + size);
        if (last_mapped_pfn > max_pfn_mapped)
                max_pfn_mapped = last_mapped_pfn;
 
-       ret = __add_pages(zone, start_pfn, nr_pages);
-       WARN_ON(1);
+       ret = __add_pages(nid, zone, start_pfn, nr_pages);
+       WARN_ON_ONCE(ret);
 
        return ret;
 }
@@ -879,32 +894,13 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address
- * is valid. The argument is a physical page number.
- *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
-       if (pagenr <= 256)
-               return 1;
-       if (!page_is_ram(pagenr))
-               return 1;
-       return 0;
-}
-
-
 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
                         kcore_modules, kcore_vsyscall;
 
 void __init mem_init(void)
 {
        long codesize, reservedpages, datasize, initsize;
+       unsigned long absent_pages;
 
        pci_iommu_alloc();
 
@@ -918,8 +914,9 @@ void __init mem_init(void)
 #else
        totalram_pages = free_all_bootmem();
 #endif
-       reservedpages = max_pfn - totalram_pages -
-                                       absent_pages_in_range(0, max_pfn);
+
+       absent_pages = absent_pages_in_range(0, max_pfn);
+       reservedpages = max_pfn - totalram_pages - absent_pages;
        after_bootmem = 1;
 
        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
@@ -936,52 +933,14 @@ void __init mem_init(void)
                                 VSYSCALL_END - VSYSCALL_START);
 
        printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
-                               "%ldk reserved, %ldk data, %ldk init)\n",
+                        "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
                max_pfn << (PAGE_SHIFT-10),
                codesize >> 10,
+               absent_pages << (PAGE_SHIFT-10),
                reservedpages << (PAGE_SHIFT-10),
                datasize >> 10,
                initsize >> 10);
-
-       cpa_init();
-}
-
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
-       unsigned long addr = begin;
-
-       if (addr >= end)
-               return;
-
-       /*
-        * If debugging page accesses then do not free this memory but
-        * mark them not present - any buggy init-section access will
-        * create a kernel page fault:
-        */
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-               begin, PAGE_ALIGN(end));
-       set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
-#else
-       printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-
-       for (; addr < end; addr += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(addr));
-               init_page_count(virt_to_page(addr));
-               memset((void *)(addr & ~(PAGE_SIZE-1)),
-                       POISON_FREE_INITMEM, PAGE_SIZE);
-               free_page(addr);
-               totalram_pages++;
-       }
-#endif
-}
-
-void free_initmem(void)
-{
-       free_init_pages("unused kernel memory",
-                       (unsigned long)(&__init_begin),
-                       (unsigned long)(&__init_end));
 }
 
 #ifdef CONFIG_DEBUG_RODATA
@@ -1022,13 +981,6 @@ void mark_rodata_ro(void)
 
 #endif
 
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       free_init_pages("initrd memory", start, end);
-}
-#endif
-
 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
                                   int flags)
 {