x86: use the right protections for split-up pagetables
[safe/jmp/linux-2.6] / arch / x86 / mm / init_32.c
index 27b8293..2cef050 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
+#include <linux/pci.h>
 #include <linux/pfn.h>
 #include <linux/poison.h>
 #include <linux/bootmem.h>
@@ -31,6 +32,7 @@
 #include <linux/cpumask.h>
 
 #include <asm/asm.h>
+#include <asm/bios_ebda.h>
 #include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/paravirt.h>
 #include <asm/setup.h>
 #include <asm/cacheflush.h>
+#include <asm/smp.h>
 
 unsigned int __VMALLOC_RESERVE = 128 << 20;
 
+unsigned long max_low_pfn_mapped;
 unsigned long max_pfn_mapped;
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -57,6 +61,26 @@ unsigned long highstart_pfn, highend_pfn;
 
 static noinline int do_test_wp_bit(void);
 
+
+static unsigned long __initdata table_start;
+static unsigned long __meminitdata table_end;
+static unsigned long __meminitdata table_top;
+
+static int __initdata after_init_bootmem;
+
+static __init void *alloc_low_page(void)
+{
+       unsigned long pfn = table_end++;
+       void *adr;
+
+       if (pfn >= table_top)
+               panic("alloc_low_page: ran out of memory");
+
+       adr = __va(pfn * PAGE_SIZE);
+       memset(adr, 0, PAGE_SIZE);
+       return adr;
+}
+
 /*
  * Creates a middle page table and puts a pointer to it in the
  * given global directory entry. This only returns the gd entry
@@ -69,12 +93,16 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
 
 #ifdef CONFIG_X86_PAE
        if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
-               pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-
+               if (after_init_bootmem)
+                       pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+               else
+                       pmd_table = (pmd_t *)alloc_low_page();
                paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
                set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
                pud = pud_offset(pgd, 0);
                BUG_ON(pmd_table != pmd_offset(pud, 0));
+
+               return pmd_table;
        }
 #endif
        pud = pud_offset(pgd, 0);
@@ -92,13 +120,15 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
        if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
                pte_t *page_table = NULL;
 
+               if (after_init_bootmem) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-               page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
+                       page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
 #endif
-               if (!page_table) {
-                       page_table =
+                       if (!page_table)
+                               page_table =
                                (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
-               }
+               } else
+                       page_table = (pte_t *)alloc_low_page();
 
                paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
                set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
@@ -108,6 +138,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
        return pte_offset_kernel(pmd, 0);
 }
 
+static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
+                                          unsigned long vaddr, pte_t *lastpte)
+{
+#ifdef CONFIG_HIGHMEM
+       /*
+        * Something (early fixmap) may already have put a pte
+        * page here, which causes the page table allocation
+        * to become nonlinear. Attempt to fix it, and if it
+        * is still nonlinear then we have to bug.
+        */
+       int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
+       int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
+
+       if (pmd_idx_kmap_begin != pmd_idx_kmap_end
+           && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
+           && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
+           && ((__pa(pte) >> PAGE_SHIFT) < table_start
+               || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
+               pte_t *newpte;
+               int i;
+
+               BUG_ON(after_init_bootmem);
+               newpte = alloc_low_page();
+               for (i = 0; i < PTRS_PER_PTE; i++)
+                       set_pte(newpte + i, pte[i]);
+
+               paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
+               set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
+               BUG_ON(newpte != pte_offset_kernel(pmd, 0));
+               __flush_tlb_all();
+
+               paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
+               pte = newpte;
+       }
+       BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
+              && vaddr > fix_to_virt(FIX_KMAP_END)
+              && lastpte && lastpte + PTRS_PER_PTE != pte);
+#endif
+       return pte;
+}
+
 /*
  * This function initializes a certain range of kernel virtual memory
  * with new bootmem page tables, everywhere page tables are missing in
@@ -124,6 +195,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
        unsigned long vaddr;
        pgd_t *pgd;
        pmd_t *pmd;
+       pte_t *pte = NULL;
 
        vaddr = start;
        pgd_idx = pgd_index(vaddr);
@@ -135,7 +207,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
                pmd = pmd + pmd_index(vaddr);
                for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
                                                        pmd++, pmd_idx++) {
-                       one_page_table_init(pmd);
+                       pte = page_table_kmap_check(one_page_table_init(pmd),
+                                                   pmd, vaddr, pte);
 
                        vaddr += PMD_SIZE;
                }
@@ -155,41 +228,72 @@ static inline int is_kernel_text(unsigned long addr)
  * of max_low_pfn pages, by creating page tables starting from address
  * PAGE_OFFSET:
  */
-static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
+static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
+                                               unsigned long start_pfn,
+                                               unsigned long end_pfn,
+                                               int use_pse)
 {
        int pgd_idx, pmd_idx, pte_ofs;
        unsigned long pfn;
        pgd_t *pgd;
        pmd_t *pmd;
        pte_t *pte;
-       unsigned pages_2m = 0, pages_4k = 0;
+       unsigned pages_2m, pages_4k;
+       int mapping_iter;
 
-       pgd_idx = pgd_index(PAGE_OFFSET);
-       pgd = pgd_base + pgd_idx;
-       pfn = 0;
+       /*
+        * First iteration will setup identity mapping using large/small pages
+        * based on use_pse, with other attributes same as set by
+        * the early code in head_32.S
+        *
+        * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
+        * as desired for the kernel identity mapping.
+        *
+        * This two pass mechanism conforms to the TLB app note which says:
+        *
+        *     "Software should not write to a paging-structure entry in a way
+        *      that would change, for any linear address, both the page size
+        *      and either the page frame or attributes."
+        */
+       mapping_iter = 1;
+
+       if (!cpu_has_pse)
+               use_pse = 0;
 
+repeat:
+       pages_2m = pages_4k = 0;
+       pfn = start_pfn;
+       pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+       pgd = pgd_base + pgd_idx;
        for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
                pmd = one_md_table_init(pgd);
-               if (pfn >= max_low_pfn)
-                       continue;
 
-               for (pmd_idx = 0;
-                    pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
+               if (pfn >= end_pfn)
+                       continue;
+#ifdef CONFIG_X86_PAE
+               pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+               pmd += pmd_idx;
+#else
+               pmd_idx = 0;
+#endif
+               for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
                     pmd++, pmd_idx++) {
                        unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
 
                        /*
                         * Map with big pages if possible, otherwise
                         * create normal page tables:
-                        *
-                        * Don't use a large page for the first 2/4MB of memory
-                        * because there are often fixed size MTRRs in there
-                        * and overlapping MTRRs into large pages can cause
-                        * slowdowns.
                         */
-                       if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
+                       if (use_pse) {
                                unsigned int addr2;
                                pgprot_t prot = PAGE_KERNEL_LARGE;
+                               /*
+                                * first pass will use the same initial
+                                * identity mapping attribute + _PAGE_PSE.
+                                */
+                               pgprot_t init_prot =
+                                       __pgprot(PTE_IDENT_ATTR |
+                                                _PAGE_PSE);
 
                                addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
                                        PAGE_OFFSET + PAGE_SIZE-1;
@@ -199,30 +303,58 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
                                        prot = PAGE_KERNEL_LARGE_EXEC;
 
                                pages_2m++;
-                               set_pmd(pmd, pfn_pmd(pfn, prot));
+                               if (mapping_iter == 1)
+                                       set_pmd(pmd, pfn_pmd(pfn, init_prot));
+                               else
+                                       set_pmd(pmd, pfn_pmd(pfn, prot));
 
                                pfn += PTRS_PER_PTE;
-                               max_pfn_mapped = pfn;
                                continue;
                        }
                        pte = one_page_table_init(pmd);
 
-                       for (pte_ofs = 0;
-                            pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
+                       pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+                       pte += pte_ofs;
+                       for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
                             pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
                                pgprot_t prot = PAGE_KERNEL;
+                               /*
+                                * first pass will use the same initial
+                                * identity mapping attribute.
+                                */
+                               pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
 
                                if (is_kernel_text(addr))
                                        prot = PAGE_KERNEL_EXEC;
 
                                pages_4k++;
-                               set_pte(pte, pfn_pte(pfn, prot));
+                               if (mapping_iter == 1)
+                                       set_pte(pte, pfn_pte(pfn, init_prot));
+                               else
+                                       set_pte(pte, pfn_pte(pfn, prot));
                        }
-                       max_pfn_mapped = pfn;
                }
        }
-       update_page_count(PG_LEVEL_2M, pages_2m);
-       update_page_count(PG_LEVEL_4K, pages_4k);
+       if (mapping_iter == 1) {
+               /*
+                * update direct mapping page count only in the first
+                * iteration.
+                */
+               update_page_count(PG_LEVEL_2M, pages_2m);
+               update_page_count(PG_LEVEL_4K, pages_4k);
+
+               /*
+                * local global flush tlb, which will flush the previous
+                * mappings present in both small and large page TLB's.
+                */
+               __flush_tlb_all();
+
+               /*
+                * Second iteration will set the actual desired PTE attributes.
+                */
+               mapping_iter = 2;
+               goto repeat;
+       }
 }
 
 /*
@@ -239,12 +371,13 @@ int devmem_is_allowed(unsigned long pagenr)
 {
        if (pagenr <= 256)
                return 1;
+       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+               return 0;
        if (!page_is_ram(pagenr))
                return 1;
        return 0;
 }
 
-#ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
 pgprot_t kmap_prot;
 
@@ -267,6 +400,7 @@ static void __init kmap_init(void)
        kmap_prot = PAGE_KERNEL;
 }
 
+#ifdef CONFIG_HIGHMEM
 static void __init permanent_kmaps_init(pgd_t *pgd_base)
 {
        unsigned long vaddr;
@@ -346,16 +480,14 @@ static void __init set_highmem_pages_init(void)
 #endif /* !CONFIG_NUMA */
 
 #else
-# define kmap_init()                           do { } while (0)
-# define permanent_kmaps_init(pgd_base)                do { } while (0)
-# define set_highmem_pages_init()      do { } while (0)
+static inline void permanent_kmaps_init(pgd_t *pgd_base)
+{
+}
+static inline void set_highmem_pages_init(void)
+{
+}
 #endif /* CONFIG_HIGHMEM */
 
-pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
-EXPORT_SYMBOL(__PAGE_KERNEL);
-
-pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
-
 void __init native_pagetable_setup_start(pgd_t *base)
 {
        unsigned long pfn, va;
@@ -411,40 +543,25 @@ void __init native_pagetable_setup_done(pgd_t *base)
  * be partially populated, and so it avoids stomping on any existing
  * mappings.
  */
-static void __init pagetable_init(void)
+static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
 {
-       pgd_t *pgd_base = swapper_pg_dir;
        unsigned long vaddr, end;
 
-       paravirt_pagetable_setup_start(pgd_base);
-
-       /* Enable PSE if available */
-       if (cpu_has_pse)
-               set_in_cr4(X86_CR4_PSE);
-
-       /* Enable PGE if available */
-       if (cpu_has_pge) {
-               set_in_cr4(X86_CR4_PGE);
-               __PAGE_KERNEL |= _PAGE_GLOBAL;
-               __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
-       }
-
-       kernel_physical_mapping_init(pgd_base);
-       remap_numa_kva();
-
        /*
         * Fixed mappings, only the page table structure has to be
         * created - mappings will be set by set_fixmap():
         */
-       early_ioremap_clear();
        vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
        end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
        page_table_range_init(vaddr, end, pgd_base);
        early_ioremap_reset();
+}
 
-       permanent_kmaps_init(pgd_base);
+static void __init pagetable_init(void)
+{
+       pgd_t *pgd_base = swapper_pg_dir;
 
-       paravirt_pagetable_setup_done(pgd_base);
+       permanent_kmaps_init(pgd_base);
 }
 
 #ifdef CONFIG_ACPI_SLEEP
@@ -487,7 +604,7 @@ void zap_low_mappings(void)
 
 int nx_enabled;
 
-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
+pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 #ifdef CONFIG_X86_PAE
@@ -565,11 +682,7 @@ void __init find_low_pfn_range(void)
 {
        /* it could update max_pfn */
 
-       /*
-        * partially used pages are not usable - thus
-        * we are rounding upwards:
-        */
-       min_low_pfn = PFN_UP(init_pg_tables_end);
+       /* max_low_pfn is 0, we already have early_res support */
 
        max_low_pfn = max_pfn;
        if (max_low_pfn > MAXMEM_PFN) {
@@ -642,12 +755,14 @@ void __init initmem_init(unsigned long start_pfn,
        if (max_pfn > max_low_pfn)
                highstart_pfn = max_low_pfn;
        memory_present(0, 0, highend_pfn);
+       e820_register_active_regions(0, 0, highend_pfn);
        printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
                pages_to_mb(highend_pfn - highstart_pfn));
        num_physpages = highend_pfn;
        high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
 #else
        memory_present(0, 0, max_low_pfn);
+       e820_register_active_regions(0, 0, max_low_pfn);
        num_physpages = max_low_pfn;
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
 #endif
@@ -659,25 +774,21 @@ void __init initmem_init(unsigned long start_pfn,
 
        setup_bootmem_allocator();
 }
+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
 
-void __init zone_sizes_init(void)
+static void __init zone_sizes_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES];
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
        max_zone_pfns[ZONE_DMA] =
                virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
-       remove_all_active_ranges();
 #ifdef CONFIG_HIGHMEM
        max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
-       e820_register_active_regions(0, 0, highend_pfn);
-#else
-       e820_register_active_regions(0, 0, max_low_pfn);
 #endif
 
        free_area_init_nodes(max_zone_pfns);
 }
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
 
 void __init setup_bootmem_allocator(void)
 {
@@ -694,7 +805,9 @@ void __init setup_bootmem_allocator(void)
                panic("Cannot find bootmem map of size %ld\n", bootmap_size);
        reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
 
-       bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, max_low_pfn);
+       /* don't touch min_low_pfn */
+       bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
+                                        min_low_pfn, max_low_pfn);
        printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
                 max_pfn_mapped<<PAGE_SHIFT);
        printk(KERN_INFO "  low ram: %08lx - %08lx\n",
@@ -705,6 +818,145 @@ void __init setup_bootmem_allocator(void)
                free_bootmem_with_active_regions(i, max_low_pfn);
        early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
 
+       after_init_bootmem = 1;
+}
+
+static void __init find_early_table_space(unsigned long end, int use_pse)
+{
+       unsigned long puds, pmds, ptes, tables, start;
+
+       puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+       tables = PAGE_ALIGN(puds * sizeof(pud_t));
+
+       pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+       tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
+
+       if (use_pse) {
+               unsigned long extra;
+
+               extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
+               extra += PMD_SIZE;
+               ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       } else
+               ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       tables += PAGE_ALIGN(ptes * sizeof(pte_t));
+
+       /* for fixmap */
+       tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t));
+
+       /*
+        * RED-PEN putting page tables only on node 0 could
+        * cause a hotspot and fill up ZONE_DMA. The page tables
+        * need roughly 0.5KB per GB.
+        */
+       start = 0x7000;
+       table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
+                                       tables, PAGE_SIZE);
+       if (table_start == -1UL)
+               panic("Cannot find space for the kernel page tables");
+
+       table_start >>= PAGE_SHIFT;
+       table_end = table_start;
+       table_top = table_start + (tables>>PAGE_SHIFT);
+
+       printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
+               end, table_start << PAGE_SHIFT,
+               (table_start << PAGE_SHIFT) + tables);
+}
+
+unsigned long __init_refok init_memory_mapping(unsigned long start,
+                                               unsigned long end)
+{
+       pgd_t *pgd_base = swapper_pg_dir;
+       unsigned long start_pfn, end_pfn;
+       unsigned long big_page_start;
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       /*
+        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
+        * This will simplify cpa(), which otherwise needs to support splitting
+        * large pages into small in interrupt context, etc.
+        */
+       int use_pse = 0;
+#else
+       int use_pse = cpu_has_pse;
+#endif
+
+       /*
+        * Find space for the kernel direct mapping tables.
+        */
+       if (!after_init_bootmem)
+               find_early_table_space(end, use_pse);
+
+#ifdef CONFIG_X86_PAE
+       set_nx();
+       if (nx_enabled)
+               printk(KERN_INFO "NX (Execute Disable) protection: active\n");
+#endif
+
+       /* Enable PSE if available */
+       if (cpu_has_pse)
+               set_in_cr4(X86_CR4_PSE);
+
+       /* Enable PGE if available */
+       if (cpu_has_pge) {
+               set_in_cr4(X86_CR4_PGE);
+               __supported_pte_mask |= _PAGE_GLOBAL;
+       }
+
+       /*
+        * Don't use a large page for the first 2/4MB of memory
+        * because there are often fixed size MTRRs in there
+        * and overlapping MTRRs into large pages can cause
+        * slowdowns.
+        */
+       big_page_start = PMD_SIZE;
+
+       if (start < big_page_start) {
+               start_pfn = start >> PAGE_SHIFT;
+               end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
+       } else {
+               /* head is not big page alignment ? */
+               start_pfn = start >> PAGE_SHIFT;
+               end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
+                                << (PMD_SHIFT - PAGE_SHIFT);
+       }
+       if (start_pfn < end_pfn)
+               kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
+
+       /* big page range */
+       start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
+                        << (PMD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < (big_page_start >> PAGE_SHIFT))
+               start_pfn =  big_page_start >> PAGE_SHIFT;
+       end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < end_pfn)
+               kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
+                                            use_pse);
+
+       /* tail is not big page alignment ? */
+       start_pfn = end_pfn;
+       if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
+               end_pfn = end >> PAGE_SHIFT;
+               if (start_pfn < end_pfn)
+                       kernel_physical_mapping_init(pgd_base, start_pfn,
+                                                        end_pfn, 0);
+       }
+
+       early_ioremap_page_table_range_init(pgd_base);
+
+       load_cr3(swapper_pg_dir);
+
+       __flush_tlb_all();
+
+       if (!after_init_bootmem)
+               reserve_early(table_start << PAGE_SHIFT,
+                                table_end << PAGE_SHIFT, "PGTABLE");
+
+       if (!after_init_bootmem)
+               early_memtest(start, end);
+
+       return end >> PAGE_SHIFT;
 }
 
 
@@ -717,18 +969,17 @@ void __init setup_bootmem_allocator(void)
  */
 void __init paging_init(void)
 {
-#ifdef CONFIG_X86_PAE
-       set_nx();
-       if (nx_enabled)
-               printk(KERN_INFO "NX (Execute Disable) protection: active\n");
-#endif
        pagetable_init();
 
-       load_cr3(swapper_pg_dir);
-
        __flush_tlb_all();
 
        kmap_init();
+
+       /*
+        * NOTE: at this point the bootmem allocator is fully available.
+        */
+       sparse_init();
+       zone_sizes_init();
 }
 
 /*
@@ -765,6 +1016,8 @@ void __init mem_init(void)
        int codesize, reservedpages, datasize, initsize;
        int tmp;
 
+       pci_iommu_alloc();
+
 #ifdef CONFIG_FLATMEM
        BUG_ON(!mem_map);
 #endif
@@ -834,17 +1087,30 @@ void __init mem_init(void)
                (unsigned long)&_text, (unsigned long)&_etext,
                ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
 
+       /*
+        * Check boundaries twice: Some fundamental inconsistencies can
+        * be detected at build time already.
+        */
+#define __FIXADDR_TOP (-PAGE_SIZE)
+#ifdef CONFIG_HIGHMEM
+       BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE  > FIXADDR_START);
+       BUILD_BUG_ON(VMALLOC_END                        > PKMAP_BASE);
+#endif
+#define high_memory (-128UL << 20)
+       BUILD_BUG_ON(VMALLOC_START                      >= VMALLOC_END);
+#undef high_memory
+#undef __FIXADDR_TOP
+
 #ifdef CONFIG_HIGHMEM
        BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE        > FIXADDR_START);
        BUG_ON(VMALLOC_END                              > PKMAP_BASE);
 #endif
-       BUG_ON(VMALLOC_START                            > VMALLOC_END);
+       BUG_ON(VMALLOC_START                            >= VMALLOC_END);
        BUG_ON((unsigned long)high_memory               > VMALLOC_START);
 
        if (boot_cpu_data.wp_works_ok < 0)
                test_wp_bit();
 
-       cpa_init();
        save_pg_dir();
        zap_low_mappings();
 }
@@ -857,7 +1123,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
 
-       return __add_pages(zone, start_pfn, nr_pages);
+       return __add_pages(nid, zone, start_pfn, nr_pages);
 }
 #endif
 
@@ -894,6 +1160,8 @@ void mark_rodata_ro(void)
        unsigned long start = PFN_ALIGN(_text);
        unsigned long size = PFN_ALIGN(_etext) - start;
 
+#ifndef CONFIG_DYNAMIC_FTRACE
+       /* Dynamic tracing modifies the kernel text section */
        set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
        printk(KERN_INFO "Write protecting the kernel text: %luk\n",
                size >> 10);
@@ -906,6 +1174,8 @@ void mark_rodata_ro(void)
        printk(KERN_INFO "Testing CPA: write protecting again\n");
        set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
 #endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
        start += size;
        size = (unsigned long)__end_rodata - start;
        set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);