Move round_up/down to kernel.h
[safe/jmp/linux-2.6] / arch / x86 / mm / init.c
index 15219e0..d406c52 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/initrd.h>
 #include <linux/ioport.h>
 #include <linux/swap.h>
 
@@ -7,8 +8,13 @@
 #include <asm/page.h>
 #include <asm/page_types.h>
 #include <asm/sections.h>
+#include <asm/setup.h>
 #include <asm/system.h>
 #include <asm/tlbflush.h>
+#include <asm/tlb.h>
+#include <asm/proto.h>
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 unsigned long __initdata e820_table_start;
 unsigned long __meminitdata e820_table_end;
@@ -65,12 +71,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
         */
 #ifdef CONFIG_X86_32
        start = 0x7000;
-       e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
-                                       tables, PAGE_SIZE);
-#else /* CONFIG_X86_64 */
+#else
        start = 0x8000;
-       e820_table_start = find_e820_area(start, end, tables, PAGE_SIZE);
 #endif
+       e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
+                                       tables, PAGE_SIZE);
        if (e820_table_start == -1UL)
                panic("Cannot find space for the kernel page tables");
 
@@ -94,9 +99,9 @@ struct map_range {
 #define NR_RANGE_MR 5
 #endif
 
-static int save_mr(struct map_range *mr, int nr_range,
-                  unsigned long start_pfn, unsigned long end_pfn,
-                  unsigned long page_size_mask)
+static int __meminit save_mr(struct map_range *mr, int nr_range,
+                            unsigned long start_pfn, unsigned long end_pfn,
+                            unsigned long page_size_mask)
 {
        if (start_pfn < end_pfn) {
                if (nr_range >= NR_RANGE_MR)
@@ -110,20 +115,6 @@ static int save_mr(struct map_range *mr, int nr_range,
        return nr_range;
 }
 
-#ifdef CONFIG_X86_64
-static void __init init_gbpages(void)
-{
-       if (direct_gbpages && cpu_has_gbpages)
-               printk(KERN_INFO "Using GB pages for direct mapping\n");
-       else
-               direct_gbpages = 0;
-}
-#else
-static inline void init_gbpages(void)
-{
-}
-#endif
-
 /*
  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
  * This runs before bootmem is initialized and gets pages directly from
@@ -143,10 +134,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
 
        printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
 
-       if (!after_bootmem)
-               init_gbpages();
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
        /*
         * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
         * This will simplify cpa(), which otherwise needs to support splitting
@@ -158,13 +146,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
        use_gbpages = direct_gbpages;
 #endif
 
-#ifdef CONFIG_X86_32
-#ifdef CONFIG_X86_PAE
-       set_nx();
-       if (nx_enabled)
-               printk(KERN_INFO "NX (Execute Disable) protection: active\n");
-#endif
-
        /* Enable PSE if available */
        if (cpu_has_pse)
                set_in_cr4(X86_CR4_PSE);
@@ -174,7 +155,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
                set_in_cr4(X86_CR4_PGE);
                __supported_pte_mask |= _PAGE_GLOBAL;
        }
-#endif
 
        if (use_gbpages)
                page_size_mask |= 1 << PG_LEVEL_1G;
@@ -304,8 +284,23 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
 #endif
 
 #ifdef CONFIG_X86_64
-       if (!after_bootmem)
+       if (!after_bootmem && !start) {
+               pud_t *pud;
+               pmd_t *pmd;
+
                mmu_cr4_features = read_cr4();
+
+               /*
+                * _brk_end cannot change anymore, but it and _end may be
+                * located on different 2M pages. cleanup_highmap(), however,
+                * can only consider _end when it runs, so destroy any
+                * mappings beyond _brk_end here.
+                */
+               pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
+               pmd = pmd_offset(pud, _brk_end - 1);
+               while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
+                       pmd_clear(pmd);
+       }
 #endif
        __flush_tlb_all();