x86: rename do_not_nx to disable_nx in mm/init_64.c
[safe/jmp/linux-2.6] / arch / x86 / mm / init_64.c
index a952726..54efa57 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/swap.h>
 #include <linux/smp.h>
 #include <linux/init.h>
+#include <linux/initrd.h>
 #include <linux/pagemap.h>
 #include <linux/bootmem.h>
 #include <linux/proc_fs.h>
@@ -30,6 +31,7 @@
 #include <linux/nmi.h>
 
 #include <asm/processor.h>
+#include <asm/bios_ebda.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/sections.h>
 #include <asm/kdebug.h>
 #include <asm/numa.h>
+#include <asm/cacheflush.h>
+#include <asm/init.h>
 
-const struct dma_mapping_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
+/*
+ * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
+ * The direct mapping extends to max_pfn_mapped, so that we can directly access
+ * apertures, ACPI and other tables without having to play with fixmaps.
+ */
+unsigned long max_low_pfn_mapped;
+unsigned long max_pfn_mapped;
 
 static unsigned long dma_reserve __initdata;
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
+static int __init parse_direct_gbpages_off(char *arg)
+{
+       direct_gbpages = 0;
+       return 0;
+}
+early_param("nogbpages", parse_direct_gbpages_off);
+
+static int __init parse_direct_gbpages_on(char *arg)
+{
+       direct_gbpages = 1;
+       return 0;
+}
+early_param("gbpages", parse_direct_gbpages_on);
+
 /*
  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  * physical space so we can cache the place of the first one and move
  * around without checking the pgd every time.
  */
 
-void show_mem(void)
-{
-       long i, total = 0, reserved = 0;
-       long shared = 0, cached = 0;
-       struct page *page;
-       pg_data_t *pgdat;
+pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
+EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
-       printk(KERN_INFO "Mem-info:\n");
-       show_free_areas();
-       printk(KERN_INFO "Free swap:       %6ldkB\n",
-               nr_swap_pages << (PAGE_SHIFT-10));
+static int disable_nx __cpuinitdata;
 
-       for_each_online_pgdat(pgdat) {
-               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-                       /*
-                        * This loop can take a while with 256 GB and
-                        * 4k pages so defer the NMI watchdog:
-                        */
-                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
-                               touch_nmi_watchdog();
+/*
+ * noexec=on|off
+ * Control non-executable mappings for 64-bit processes.
+ *
+ * on  Enable (default)
+ * off Disable
+ */
+static int __init nonx_setup(char *str)
+{
+       if (!str)
+               return -EINVAL;
+       if (!strncmp(str, "on", 2)) {
+               __supported_pte_mask |= _PAGE_NX;
+               disable_nx = 0;
+       } else if (!strncmp(str, "off", 3)) {
+               disable_nx = 1;
+               __supported_pte_mask &= ~_PAGE_NX;
+       }
+       return 0;
+}
+early_param("noexec", nonx_setup);
 
-                       if (!pfn_valid(pgdat->node_start_pfn + i))
-                               continue;
+void __cpuinit check_efer(void)
+{
+       unsigned long efer;
 
-                       page = pfn_to_page(pgdat->node_start_pfn + i);
-                       total++;
-                       if (PageReserved(page))
-                               reserved++;
-                       else if (PageSwapCache(page))
-                               cached++;
-                       else if (page_count(page))
-                               shared += page_count(page) - 1;
-               }
-       }
-       printk(KERN_INFO "%lu pages of RAM\n",          total);
-       printk(KERN_INFO "%lu reserved pages\n",        reserved);
-       printk(KERN_INFO "%lu pages shared\n",          shared);
-       printk(KERN_INFO "%lu pages swap cached\n",     cached);
+       rdmsrl(MSR_EFER, efer);
+       if (!(efer & EFER_NX) || disable_nx)
+               __supported_pte_mask &= ~_PAGE_NX;
 }
 
-int after_bootmem;
+int force_personality32;
 
-static __init void *spp_getpage(void)
+/*
+ * noexec32=on|off
+ * Control non executable heap for 32bit processes.
+ * To control the stack too use noexec=off
+ *
+ * on  PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
+ * off PROT_READ implies PROT_EXEC
+ */
+static int __init nonx32_setup(char *str)
+{
+       if (!strcmp(str, "on"))
+               force_personality32 &= ~READ_IMPLIES_EXEC;
+       else if (!strcmp(str, "off"))
+               force_personality32 |= READ_IMPLIES_EXEC;
+       return 1;
+}
+__setup("noexec32=", nonx32_setup);
+
+/*
+ * NOTE: This function is marked __ref because it calls __init function
+ * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
+ */
+static __ref void *spp_getpage(void)
 {
        void *ptr;
 
@@ -120,47 +161,51 @@ static __init void *spp_getpage(void)
        return ptr;
 }
 
-static __init void
-set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
+static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte, new_pte;
-
-       pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys);
-
-       pgd = pgd_offset_k(vaddr);
        if (pgd_none(*pgd)) {
-               printk(KERN_ERR
-                       "PGD FIXMAP MISSING, it should be setup in head.S!\n");
-               return;
+               pud_t *pud = (pud_t *)spp_getpage();
+               pgd_populate(&init_mm, pgd, pud);
+               if (pud != pud_offset(pgd, 0))
+                       printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
+                              pud, pud_offset(pgd, 0));
        }
-       pud = pud_offset(pgd, vaddr);
+       return pud_offset(pgd, vaddr);
+}
+
+static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
+{
        if (pud_none(*pud)) {
-               pmd = (pmd_t *) spp_getpage();
-               set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-               if (pmd != pmd_offset(pud, 0)) {
+               pmd_t *pmd = (pmd_t *) spp_getpage();
+               pud_populate(&init_mm, pud, pmd);
+               if (pmd != pmd_offset(pud, 0))
                        printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
-                               pmd, pmd_offset(pud, 0));
-                       return;
-               }
+                              pmd, pmd_offset(pud, 0));
        }
-       pmd = pmd_offset(pud, vaddr);
+       return pmd_offset(pud, vaddr);
+}
+
+static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
+{
        if (pmd_none(*pmd)) {
-               pte = (pte_t *) spp_getpage();
-               set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-               if (pte != pte_offset_kernel(pmd, 0)) {
+               pte_t *pte = (pte_t *) spp_getpage();
+               pmd_populate_kernel(&init_mm, pmd, pte);
+               if (pte != pte_offset_kernel(pmd, 0))
                        printk(KERN_ERR "PAGETABLE BUG #02!\n");
-                       return;
-               }
        }
-       new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
+       return pte_offset_kernel(pmd, vaddr);
+}
+
+void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
+{
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       pud = pud_page + pud_index(vaddr);
+       pmd = fill_pmd(pud, vaddr);
+       pte = fill_pte(pmd, vaddr);
 
-       pte = pte_offset_kernel(pmd, vaddr);
-       if (!pte_none(*pte) &&
-           pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
-               pte_ERROR(*pte);
        set_pte(pte, new_pte);
 
        /*
@@ -170,25 +215,112 @@ set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
        __flush_tlb_one(vaddr);
 }
 
-/* NOTE: this is meant to be run only at boot */
-void __init
-__set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 {
-       unsigned long address = __fix_to_virt(idx);
+       pgd_t *pgd;
+       pud_t *pud_page;
+
+       pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
 
-       if (idx >= __end_of_fixed_addresses) {
-               printk(KERN_ERR "Invalid __set_fixmap\n");
+       pgd = pgd_offset_k(vaddr);
+       if (pgd_none(*pgd)) {
+               printk(KERN_ERR
+                       "PGD FIXMAP MISSING, it should be setup in head.S!\n");
                return;
        }
-       set_pte_phys(address, phys, prot);
+       pud_page = (pud_t*)pgd_page_vaddr(*pgd);
+       set_pte_vaddr_pud(pud_page, vaddr, pteval);
+}
+
+pmd_t * __init populate_extra_pmd(unsigned long vaddr)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+
+       pgd = pgd_offset_k(vaddr);
+       pud = fill_pud(pgd, vaddr);
+       return fill_pmd(pud, vaddr);
+}
+
+pte_t * __init populate_extra_pte(unsigned long vaddr)
+{
+       pmd_t *pmd;
+
+       pmd = populate_extra_pmd(vaddr);
+       return fill_pte(pmd, vaddr);
+}
+
+/*
+ * Create large page table mappings for a range of physical addresses.
+ */
+static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
+                                               pgprot_t prot)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
+       for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
+               pgd = pgd_offset_k((unsigned long)__va(phys));
+               if (pgd_none(*pgd)) {
+                       pud = (pud_t *) spp_getpage();
+                       set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
+                                               _PAGE_USER));
+               }
+               pud = pud_offset(pgd, (unsigned long)__va(phys));
+               if (pud_none(*pud)) {
+                       pmd = (pmd_t *) spp_getpage();
+                       set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
+                                               _PAGE_USER));
+               }
+               pmd = pmd_offset(pud, phys);
+               BUG_ON(!pmd_none(*pmd));
+               set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
+       }
+}
+
+void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
+{
+       __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
+}
+
+void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
+{
+       __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
 }
 
-static unsigned long __initdata table_start;
-static unsigned long __meminitdata table_end;
+/*
+ * The head.S code sets up the kernel high mapping:
+ *
+ *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
+ *
+ * phys_addr holds the negative offset to the kernel, which is added
+ * to the compile time generated pmds. This results in invalid pmds up
+ * to the point where we hit the physaddr 0 mapping.
+ *
+ * We limit the mappings to the region from _text to _end.  _end is
+ * rounded up to the 2MB boundary. This catches the invalid pmds as
+ * well, as they are located before _text:
+ */
+void __init cleanup_highmap(void)
+{
+       unsigned long vaddr = __START_KERNEL_map;
+       unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
+       pmd_t *pmd = level2_kernel_pgt;
+       pmd_t *last_pmd = pmd + PTRS_PER_PMD;
 
-static __meminit void *alloc_low_page(unsigned long *phys)
+       for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
+               if (pmd_none(*pmd))
+                       continue;
+               if (vaddr < (unsigned long) _text || vaddr > end)
+                       set_pmd(pmd, __pmd(0));
+       }
+}
+
+static __ref void *alloc_low_page(unsigned long *phys)
 {
-       unsigned long pfn = table_end++;
+       unsigned long pfn = e820_table_end++;
        void *adr;
 
        if (after_bootmem) {
@@ -198,16 +330,16 @@ static __meminit void *alloc_low_page(unsigned long *phys)
                return adr;
        }
 
-       if (pfn >= end_pfn)
+       if (pfn >= e820_table_top)
                panic("alloc_low_page: ran out of memory");
 
-       adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
+       adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
        memset(adr, 0, PAGE_SIZE);
        *phys  = pfn * PAGE_SIZE;
        return adr;
 }
 
-static __meminit void unmap_low_page(void *adr)
+static __ref void unmap_low_page(void *adr)
 {
        if (after_bootmem)
                return;
@@ -215,66 +347,73 @@ static __meminit void unmap_low_page(void *adr)
        early_iounmap(adr, PAGE_SIZE);
 }
 
-/* Must run before zap_low_mappings */
-__meminit void *early_ioremap(unsigned long addr, unsigned long size)
+static unsigned long __meminit
+phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
+             pgprot_t prot)
 {
-       pmd_t *pmd, *last_pmd;
-       unsigned long vaddr;
-       int i, pmds;
+       unsigned pages = 0;
+       unsigned long last_map_addr = end;
+       int i;
+
+       pte_t *pte = pte_page + pte_index(addr);
 
-       pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
-       vaddr = __START_KERNEL_map;
-       pmd = level2_kernel_pgt;
-       last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
+       for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
 
-       for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
-               for (i = 0; i < pmds; i++) {
-                       if (pmd_present(pmd[i]))
-                               goto continue_outer_loop;
+               if (addr >= end) {
+                       if (!after_bootmem) {
+                               for(; i < PTRS_PER_PTE; i++, pte++)
+                                       set_pte(pte, __pte(0));
+                       }
+                       break;
                }
-               vaddr += addr & ~PMD_MASK;
-               addr &= PMD_MASK;
 
-               for (i = 0; i < pmds; i++, addr += PMD_SIZE)
-                       set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
-               __flush_tlb_all();
+               /*
+                * We will re-use the existing mapping.
+                * Xen for example has some special requirements, like mapping
+                * pagetable pages as RO. So assume someone who pre-setup
+                * these mappings are more intelligent.
+                */
+               if (pte_val(*pte)) {
+                       pages++;
+                       continue;
+               }
 
-               return (void *)vaddr;
-continue_outer_loop:
-               ;
+               if (0)
+                       printk("   pte=%p addr=%lx pte=%016lx\n",
+                              pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
+               pages++;
+               set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
+               last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
        }
-       printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size);
 
-       return NULL;
+       update_page_count(PG_LEVEL_4K, pages);
+
+       return last_map_addr;
 }
 
-/*
- * To avoid virtual aliases later:
- */
-__meminit void early_iounmap(void *addr, unsigned long size)
+static unsigned long __meminit
+phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end,
+               pgprot_t prot)
 {
-       unsigned long vaddr;
-       pmd_t *pmd;
-       int i, pmds;
-
-       vaddr = (unsigned long)addr;
-       pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
-       pmd = level2_kernel_pgt + pmd_index(vaddr);
+       pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
 
-       for (i = 0; i < pmds; i++)
-               pmd_clear(pmd + i);
-
-       __flush_tlb_all();
+       return phys_pte_init(pte, address, end, prot);
 }
 
-static void __meminit
-phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
+static unsigned long __meminit
+phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
+             unsigned long page_size_mask, pgprot_t prot)
 {
+       unsigned long pages = 0;
+       unsigned long last_map_addr = end;
+
        int i = pmd_index(address);
 
        for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
-               unsigned long entry;
+               unsigned long pte_phys;
                pmd_t *pmd = pmd_page + pmd_index(address);
+               pte_t *pte;
+               pgprot_t new_prot = prot;
 
                if (address >= end) {
                        if (!after_bootmem) {
@@ -284,34 +423,81 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
                        break;
                }
 
-               if (pmd_val(*pmd))
+               if (pmd_val(*pmd)) {
+                       if (!pmd_large(*pmd)) {
+                               spin_lock(&init_mm.page_table_lock);
+                               last_map_addr = phys_pte_update(pmd, address,
+                                                               end, prot);
+                               spin_unlock(&init_mm.page_table_lock);
+                               continue;
+                       }
+                       /*
+                        * If we are ok with PG_LEVEL_2M mapping, then we will
+                        * use the existing mapping,
+                        *
+                        * Otherwise, we will split the large page mapping but
+                        * use the same existing protection bits except for
+                        * large page, so that we don't violate Intel's TLB
+                        * Application note (317080) which says, while changing
+                        * the page sizes, new and old translations should
+                        * not differ with respect to page frame and
+                        * attributes.
+                        */
+                       if (page_size_mask & (1 << PG_LEVEL_2M)) {
+                               pages++;
+                               continue;
+                       }
+                       new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
+               }
+
+               if (page_size_mask & (1<<PG_LEVEL_2M)) {
+                       pages++;
+                       spin_lock(&init_mm.page_table_lock);
+                       set_pte((pte_t *)pmd,
+                               pfn_pte(address >> PAGE_SHIFT,
+                                       __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+                       spin_unlock(&init_mm.page_table_lock);
+                       last_map_addr = (address & PMD_MASK) + PMD_SIZE;
                        continue;
+               }
+
+               pte = alloc_low_page(&pte_phys);
+               last_map_addr = phys_pte_init(pte, address, end, new_prot);
+               unmap_low_page(pte);
 
-               entry = __PAGE_KERNEL_LARGE|_PAGE_GLOBAL|address;
-               entry &= __supported_pte_mask;
-               set_pmd(pmd, __pmd(entry));
+               spin_lock(&init_mm.page_table_lock);
+               pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
+               spin_unlock(&init_mm.page_table_lock);
        }
+       update_page_count(PG_LEVEL_2M, pages);
+       return last_map_addr;
 }
 
-static void __meminit
-phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
+static unsigned long __meminit
+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
+               unsigned long page_size_mask, pgprot_t prot)
 {
        pmd_t *pmd = pmd_offset(pud, 0);
-       spin_lock(&init_mm.page_table_lock);
-       phys_pmd_init(pmd, address, end);
-       spin_unlock(&init_mm.page_table_lock);
+       unsigned long last_map_addr;
+
+       last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot);
        __flush_tlb_all();
+       return last_map_addr;
 }
 
-static void __meminit
-phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
+static unsigned long __meminit
+phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
+                        unsigned long page_size_mask)
 {
+       unsigned long pages = 0;
+       unsigned long last_map_addr = end;
        int i = pud_index(addr);
 
        for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
                unsigned long pmd_phys;
                pud_t *pud = pud_page + pud_index(addr);
                pmd_t *pmd;
+               pgprot_t prot = PAGE_KERNEL;
 
                if (addr >= end)
                        break;
@@ -323,69 +509,74 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
                }
 
                if (pud_val(*pud)) {
-                       phys_pmd_update(pud, addr, end);
+                       if (!pud_large(*pud)) {
+                               last_map_addr = phys_pmd_update(pud, addr, end,
+                                                        page_size_mask, prot);
+                               continue;
+                       }
+                       /*
+                        * If we are ok with PG_LEVEL_1G mapping, then we will
+                        * use the existing mapping.
+                        *
+                        * Otherwise, we will split the gbpage mapping but use
+                        * the same existing protection  bits except for large
+                        * page, so that we don't violate Intel's TLB
+                        * Application note (317080) which says, while changing
+                        * the page sizes, new and old translations should
+                        * not differ with respect to page frame and
+                        * attributes.
+                        */
+                       if (page_size_mask & (1 << PG_LEVEL_1G)) {
+                               pages++;
+                               continue;
+                       }
+                       prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
+               }
+
+               if (page_size_mask & (1<<PG_LEVEL_1G)) {
+                       pages++;
+                       spin_lock(&init_mm.page_table_lock);
+                       set_pte((pte_t *)pud,
+                               pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
+                       spin_unlock(&init_mm.page_table_lock);
+                       last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
                        continue;
                }
 
                pmd = alloc_low_page(&pmd_phys);
+               last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
+                                             prot);
+               unmap_low_page(pmd);
 
                spin_lock(&init_mm.page_table_lock);
-               set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
-               phys_pmd_init(pmd, addr, end);
+               pud_populate(&init_mm, pud, __va(pmd_phys));
                spin_unlock(&init_mm.page_table_lock);
-
-               unmap_low_page(pmd);
        }
        __flush_tlb_all();
-}
 
-static void __init find_early_table_space(unsigned long end)
-{
-       unsigned long puds, pmds, tables, start;
+       update_page_count(PG_LEVEL_1G, pages);
 
-       puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-       pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-       tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
-                round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+       return last_map_addr;
+}
 
-       /*
-        * RED-PEN putting page tables only on node 0 could
-        * cause a hotspot and fill up ZONE_DMA. The page tables
-        * need roughly 0.5KB per GB.
-        */
-       start = 0x8000;
-       table_start = find_e820_area(start, end, tables);
-       if (table_start == -1UL)
-               panic("Cannot find space for the kernel page tables");
+static unsigned long __meminit
+phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
+                unsigned long page_size_mask)
+{
+       pud_t *pud;
 
-       table_start >>= PAGE_SHIFT;
-       table_end = table_start;
+       pud = (pud_t *)pgd_page_vaddr(*pgd);
 
-       early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
-               end, table_start << PAGE_SHIFT,
-               (table_start << PAGE_SHIFT) + tables);
+       return phys_pud_init(pud, addr, end, page_size_mask);
 }
 
-/*
- * Setup the direct mapping of the physical memory at PAGE_OFFSET.
- * This runs before bootmem is initialized and gets pages directly from
- * the physical memory. To access them they are temporarily mapped.
- */
-void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
+unsigned long __init
+kernel_physical_mapping_init(unsigned long start,
+                            unsigned long end,
+                            unsigned long page_size_mask)
 {
-       unsigned long next;
 
-       pr_debug("init_memory_mapping\n");
-
-       /*
-        * Find space for the kernel direct mapping tables.
-        *
-        * Later we should allocate these tables in the local node of the
-        * memory mapped. Unfortunately this is done currently before the
-        * nodes are discovered.
-        */
-       if (!after_bootmem)
-               find_early_table_space(end);
+       unsigned long next, last_map_addr = end;
 
        start = (unsigned long)__va(start);
        end = (unsigned long)__va(end);
@@ -395,28 +586,49 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
                unsigned long pud_phys;
                pud_t *pud;
 
-               if (after_bootmem)
-                       pud = pud_offset(pgd, start & PGDIR_MASK);
-               else
-                       pud = alloc_low_page(&pud_phys);
-
-               next = start + PGDIR_SIZE;
+               next = (start + PGDIR_SIZE) & PGDIR_MASK;
                if (next > end)
                        next = end;
-               phys_pud_init(pud, __pa(start), __pa(next));
-               if (!after_bootmem)
-                       set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
+
+               if (pgd_val(*pgd)) {
+                       last_map_addr = phys_pud_update(pgd, __pa(start),
+                                                __pa(end), page_size_mask);
+                       continue;
+               }
+
+               pud = alloc_low_page(&pud_phys);
+               last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
+                                                page_size_mask);
                unmap_low_page(pud);
-       }
 
-       if (!after_bootmem)
-               mmu_cr4_features = read_cr4();
+               spin_lock(&init_mm.page_table_lock);
+               pgd_populate(&init_mm, pgd, __va(pud_phys));
+               spin_unlock(&init_mm.page_table_lock);
+       }
        __flush_tlb_all();
 
-       reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
+       return last_map_addr;
 }
 
 #ifndef CONFIG_NUMA
+void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
+{
+       unsigned long bootmap_size, bootmap;
+
+       bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
+       bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
+                                PAGE_SIZE);
+       if (bootmap == -1L)
+               panic("Cannot find bootmem map of size %ld\n", bootmap_size);
+       /* don't touch min_low_pfn */
+       bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
+                                        0, end_pfn);
+       e820_register_active_regions(0, start_pfn, end_pfn);
+       free_bootmem_with_active_regions(0, end_pfn);
+       early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
+       reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
+}
+
 void __init paging_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES];
@@ -424,69 +636,17 @@ void __init paging_init(void)
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
        max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
        max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
-       max_zone_pfns[ZONE_NORMAL] = end_pfn;
+       max_zone_pfns[ZONE_NORMAL] = max_pfn;
 
-       memory_present(0, 0, end_pfn);
+       memory_present(0, 0, max_pfn);
        sparse_init();
        free_area_init_nodes(max_zone_pfns);
 }
 #endif
 
 /*
- * Unmap a kernel mapping if it exists. This is useful to avoid
- * prefetches from the CPU leading to inconsistent cache lines.
- * address and size must be aligned to 2MB boundaries.
- * Does nothing when the mapping doesn't exist.
- */
-void __init clear_kernel_mapping(unsigned long address, unsigned long size)
-{
-       unsigned long end = address + size;
-
-       BUG_ON(address & ~LARGE_PAGE_MASK);
-       BUG_ON(size & ~LARGE_PAGE_MASK);
-
-       for (; address < end; address += LARGE_PAGE_SIZE) {
-               pgd_t *pgd = pgd_offset_k(address);
-               pud_t *pud;
-               pmd_t *pmd;
-
-               if (pgd_none(*pgd))
-                       continue;
-
-               pud = pud_offset(pgd, address);
-               if (pud_none(*pud))
-                       continue;
-
-               pmd = pmd_offset(pud, address);
-               if (!pmd || pmd_none(*pmd))
-                       continue;
-
-               if (!(pmd_val(*pmd) & _PAGE_PSE)) {
-                       /*
-                        * Could handle this, but it should not happen
-                        * currently:
-                        */
-                       printk(KERN_ERR "clear_kernel_mapping: "
-                               "mapping has been split. will leak memory\n");
-                       pmd_ERROR(*pmd);
-               }
-               set_pmd(pmd, __pmd(0));
-       }
-       __flush_tlb_all();
-}
-
-/*
  * Memory hotplug specific functions
  */
-void online_page(struct page *page)
-{
-       ClearPageReserved(page);
-       init_page_count(page);
-       __free_page(page);
-       totalram_pages++;
-       num_physpages++;
-}
-
 #ifdef CONFIG_MEMORY_HOTPLUG
 /*
  * Memory is added always to NORMAL zone. This means you will never get
@@ -496,14 +656,16 @@ int arch_add_memory(int nid, u64 start, u64 size)
 {
        struct pglist_data *pgdat = NODE_DATA(nid);
        struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
-       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
        int ret;
 
-       init_memory_mapping(start, start + size-1);
+       last_mapped_pfn = init_memory_mapping(start, start + size);
+       if (last_mapped_pfn > max_pfn_mapped)
+               max_pfn_mapped = last_mapped_pfn;
 
-       ret = __add_pages(zone, start_pfn, nr_pages);
-       WARN_ON(1);
+       ret = __add_pages(nid, zone, start_pfn, nr_pages);
+       WARN_ON_ONCE(ret);
 
        return ret;
 }
@@ -525,19 +687,12 @@ static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
 void __init mem_init(void)
 {
        long codesize, reservedpages, datasize, initsize;
+       unsigned long absent_pages;
 
        pci_iommu_alloc();
 
        /* clear_bss() already clear the empty_zero_page */
 
-       /* temporary debugging - double check it's true: */
-       {
-               int i;
-
-               for (i = 0; i < 1024; i++)
-                       WARN_ON_ONCE(empty_zero_page[i]);
-       }
-
        reservedpages = 0;
 
        /* this will put all low memory onto the freelists */
@@ -546,8 +701,9 @@ void __init mem_init(void)
 #else
        totalram_pages = free_all_bootmem();
 #endif
-       reservedpages = end_pfn - totalram_pages -
-                                       absent_pages_in_range(0, end_pfn);
+
+       absent_pages = absent_pages_in_range(0, max_pfn);
+       reservedpages = max_pfn - totalram_pages - absent_pages;
        after_bootmem = 1;
 
        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
@@ -564,80 +720,40 @@ void __init mem_init(void)
                                 VSYSCALL_END - VSYSCALL_START);
 
        printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
-                               "%ldk reserved, %ldk data, %ldk init)\n",
+                        "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-               end_pfn << (PAGE_SHIFT-10),
+               max_pfn << (PAGE_SHIFT-10),
                codesize >> 10,
+               absent_pages << (PAGE_SHIFT-10),
                reservedpages << (PAGE_SHIFT-10),
                datasize >> 10,
                initsize >> 10);
 }
 
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
-       unsigned long addr;
-
-       if (begin >= end)
-               return;
-
-       /*
-        * If debugging page accesses then do not free this memory but
-        * mark them not present - any buggy init-section access will
-        * create a kernel page fault:
-        */
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-               begin, PAGE_ALIGN(end));
-       set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
-#else
-       printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-
-       for (addr = begin; addr < end; addr += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(addr));
-               init_page_count(virt_to_page(addr));
-               memset((void *)(addr & ~(PAGE_SIZE-1)),
-                       POISON_FREE_INITMEM, PAGE_SIZE);
-               free_page(addr);
-               totalram_pages++;
-       }
-#endif
-}
-
-void free_initmem(void)
-{
-       free_init_pages("unused kernel memory",
-                       (unsigned long)(&__init_begin),
-                       (unsigned long)(&__init_end));
-}
-
 #ifdef CONFIG_DEBUG_RODATA
 const int rodata_test_data = 0xC3;
 EXPORT_SYMBOL_GPL(rodata_test_data);
 
 void mark_rodata_ro(void)
 {
-       unsigned long start = (unsigned long)_stext, end;
-
-#ifdef CONFIG_HOTPLUG_CPU
-       /* It must still be possible to apply SMP alternatives. */
-       if (num_possible_cpus() > 1)
-               start = (unsigned long)_etext;
-#endif
+       unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
+       unsigned long rodata_start =
+               ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
 
-#ifdef CONFIG_KPROBES
-       start = (unsigned long)__start_rodata;
+#ifdef CONFIG_DYNAMIC_FTRACE
+       /* Dynamic tracing modifies the kernel text section */
+       start = rodata_start;
 #endif
 
-       end = (unsigned long)__end_rodata;
-       start = (start + PAGE_SIZE - 1) & PAGE_MASK;
-       end &= PAGE_MASK;
-       if (end <= start)
-               return;
-
-       set_memory_ro(start, (end - start) >> PAGE_SHIFT);
-
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
+       set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+
+       /*
+        * The rodata section (but not the kernel text!) should also be
+        * not-executable.
+        */
+       set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
 
        rodata_test();
 
@@ -649,45 +765,53 @@ void mark_rodata_ro(void)
        set_memory_ro(start, (end-start) >> PAGE_SHIFT);
 #endif
 }
-#endif
 
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       free_init_pages("initrd memory", start, end);
-}
 #endif
 
-void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
+int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
+                                  int flags)
 {
 #ifdef CONFIG_NUMA
-       int nid = phys_to_nid(phys);
+       int nid, next_nid;
+       int ret;
 #endif
        unsigned long pfn = phys >> PAGE_SHIFT;
 
-       if (pfn >= end_pfn) {
+       if (pfn >= max_pfn) {
                /*
                 * This can happen with kdump kernels when accessing
                 * firmware tables:
                 */
-               if (pfn < end_pfn_map)
-                       return;
+               if (pfn < max_pfn_mapped)
+                       return -EFAULT;
 
-               printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
+               printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n",
                                phys, len);
-               return;
+               return -EFAULT;
        }
 
        /* Should check here against the e820 map to avoid double free */
 #ifdef CONFIG_NUMA
-       reserve_bootmem_node(NODE_DATA(nid), phys, len);
+       nid = phys_to_nid(phys);
+       next_nid = phys_to_nid(phys + len - 1);
+       if (nid == next_nid)
+               ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags);
+       else
+               ret = reserve_bootmem(phys, len, flags);
+
+       if (ret != 0)
+               return ret;
+
 #else
-       reserve_bootmem(phys, len);
+       reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
 #endif
+
        if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
                dma_reserve += len / PAGE_SIZE;
                set_dma_reserve(dma_reserve);
        }
+
+       return 0;
 }
 
 int kern_addr_valid(unsigned long addr)
@@ -777,6 +901,10 @@ const char *arch_vma_name(struct vm_area_struct *vma)
 /*
  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
  */
+static long __meminitdata addr_start, addr_end;
+static void __meminitdata *p_start, *p_end;
+static int __meminitdata node_start;
+
 int __meminit
 vmemmap_populate(struct page *start_page, unsigned long size, int node)
 {
@@ -788,7 +916,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
        pmd_t *pmd;
 
        for (; addr < end; addr = next) {
-               next = pmd_addr_end(addr, end);
+               void *p = NULL;
 
                pgd = vmemmap_pgd_populate(addr, node);
                if (!pgd)
@@ -798,25 +926,63 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
                if (!pud)
                        return -ENOMEM;
 
-               pmd = pmd_offset(pud, addr);
-               if (pmd_none(*pmd)) {
-                       pte_t entry;
-                       void *p;
+               if (!cpu_has_pse) {
+                       next = (addr + PAGE_SIZE) & PAGE_MASK;
+                       pmd = vmemmap_pmd_populate(pud, addr, node);
 
-                       p = vmemmap_alloc_block(PMD_SIZE, node);
-                       if (!p)
+                       if (!pmd)
                                return -ENOMEM;
 
-                       entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
-                                                       PAGE_KERNEL_LARGE);
-                       set_pmd(pmd, __pmd(pte_val(entry)));
+                       p = vmemmap_pte_populate(pmd, addr, node);
 
-                       printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
-                               addr, addr + PMD_SIZE - 1, p, node);
+                       if (!p)
+                               return -ENOMEM;
+
+                       addr_end = addr + PAGE_SIZE;
+                       p_end = p + PAGE_SIZE;
                } else {
-                       vmemmap_verify((pte_t *)pmd, node, addr, next);
+                       next = pmd_addr_end(addr, end);
+
+                       pmd = pmd_offset(pud, addr);
+                       if (pmd_none(*pmd)) {
+                               pte_t entry;
+
+                               p = vmemmap_alloc_block(PMD_SIZE, node);
+                               if (!p)
+                                       return -ENOMEM;
+
+                               entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
+                                               PAGE_KERNEL_LARGE);
+                               set_pmd(pmd, __pmd(pte_val(entry)));
+
+                               /* check to see if we have contiguous blocks */
+                               if (p_end != p || node_start != node) {
+                                       if (p_start)
+                                               printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+                                                      addr_start, addr_end-1, p_start, p_end-1, node_start);
+                                       addr_start = addr;
+                                       node_start = node;
+                                       p_start = p;
+                               }
+
+                               addr_end = addr + PMD_SIZE;
+                               p_end = p + PMD_SIZE;
+                       } else
+                               vmemmap_verify((pte_t *)pmd, node, addr, next);
                }
+
        }
        return 0;
 }
+
+void __meminit vmemmap_populate_print_last(void)
+{
+       if (p_start) {
+               printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+                       addr_start, addr_end-1, p_start, p_end-1, node_start);
+               p_start = NULL;
+               p_end = NULL;
+               node_start = 0;
+       }
+}
 #endif