static unsigned long __meminitdata table_end;
static unsigned long __meminitdata table_top;
-static int __initdata after_init_bootmem;
+int after_bootmem;
int direct_gbpages;
#ifdef CONFIG_X86_PAE
if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
- if (after_init_bootmem)
+ if (after_bootmem)
pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
else
pmd_table = (pmd_t *)alloc_low_page();
if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
pte_t *page_table = NULL;
- if (after_init_bootmem) {
+ if (after_bootmem) {
#ifdef CONFIG_DEBUG_PAGEALLOC
page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
pte_t *newpte;
int i;
- BUG_ON(after_init_bootmem);
+ BUG_ON(after_bootmem);
newpte = alloc_low_page();
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(newpte + i, pte[i]);
* of max_low_pfn pages, by creating page tables starting from address
* PAGE_OFFSET:
*/
-static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
- unsigned long start_pfn,
+static void __init kernel_physical_mapping_init(unsigned long start_pfn,
unsigned long end_pfn,
int use_pse)
{
+ pgd_t *pgd_base = swapper_pg_dir;
int pgd_idx, pmd_idx, pte_ofs;
unsigned long pfn;
pgd_t *pgd;
* be partially populated, and so it avoids stomping on any existing
* mappings.
*/
-static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
+static void __init early_ioremap_page_table_range_init(void)
{
+ pgd_t *pgd_base = swapper_pg_dir;
unsigned long vaddr, end;
/*
bootmap = setup_node_bootmem(0, 0, max_low_pfn, bootmap);
#endif
- after_init_bootmem = 1;
+ after_bootmem = 1;
}
static void __init find_early_table_space(unsigned long end, int use_pse,
unsigned long extra;
extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
+#ifdef CONFIG_X86_32
extra += PMD_SIZE;
+#endif
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else
ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
+#ifdef CONFIG_X86_32
/* for fixmap */
tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
+#endif
/*
* RED-PEN putting page tables only on node 0 could
* cause a hotspot and fill up ZONE_DMA. The page tables
* need roughly 0.5KB per GB.
*/
+#ifdef CONFIG_X86_32
start = 0x7000;
table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
tables, PAGE_SIZE);
+#else /* CONFIG_X86_64 */
+ start = 0x8000;
+ table_start = find_e820_area(start, end, tables, PAGE_SIZE);
+#endif
if (table_start == -1UL)
panic("Cannot find space for the kernel page tables");
unsigned page_size_mask;
};
+#ifdef CONFIG_X86_32
#define NR_RANGE_MR 3
+#else /* CONFIG_X86_64 */
+#define NR_RANGE_MR 5
+#endif
static int save_mr(struct map_range *mr, int nr_range,
unsigned long start_pfn, unsigned long end_pfn,
unsigned long __init_refok init_memory_mapping(unsigned long start,
unsigned long end)
{
- pgd_t *pgd_base = swapper_pg_dir;
unsigned long page_size_mask = 0;
unsigned long start_pfn, end_pfn;
unsigned long pos;
+ unsigned long ret;
struct map_range mr[NR_RANGE_MR];
int nr_range, i;
use_gbpages = direct_gbpages;
#endif
+#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_PAE
set_nx();
if (nx_enabled)
set_in_cr4(X86_CR4_PGE);
__supported_pte_mask |= _PAGE_GLOBAL;
}
+#endif
if (use_gbpages)
page_size_mask |= 1 << PG_LEVEL_1G;
memset(mr, 0, sizeof(mr));
nr_range = 0;
+ /* head if not big page alignment ? */
+ start_pfn = start >> PAGE_SHIFT;
+ pos = start_pfn << PAGE_SHIFT;
+#ifdef CONFIG_X86_32
/*
* Don't use a large page for the first 2/4MB of memory
* because there are often fixed size MTRRs in there
* and overlapping MTRRs into large pages can cause
* slowdowns.
*/
- /* head if not big page alignment ? */
- start_pfn = start >> PAGE_SHIFT;
- pos = start_pfn << PAGE_SHIFT;
if (pos == 0)
end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
else
end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
<< (PMD_SHIFT - PAGE_SHIFT);
+#else /* CONFIG_X86_64 */
+ end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
+ << (PMD_SHIFT - PAGE_SHIFT);
+#endif
if (end_pfn > (end >> PAGE_SHIFT))
end_pfn = end >> PAGE_SHIFT;
if (start_pfn < end_pfn) {
/* big page (2M) range */
start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
<< (PMD_SHIFT - PAGE_SHIFT);
+#ifdef CONFIG_X86_32
end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+#else /* CONFIG_X86_64 */
+ end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
+ << (PUD_SHIFT - PAGE_SHIFT);
+ if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
+ end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
+#endif
+
+ if (start_pfn < end_pfn) {
+ nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+ page_size_mask & (1<<PG_LEVEL_2M));
+ pos = end_pfn << PAGE_SHIFT;
+ }
+
+#ifdef CONFIG_X86_64
+ /* big page (1G) range */
+ start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
+ << (PUD_SHIFT - PAGE_SHIFT);
+ end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
+ if (start_pfn < end_pfn) {
+ nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+ page_size_mask &
+ ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
+ pos = end_pfn << PAGE_SHIFT;
+ }
+
+ /* tail is not big page (1G) alignment */
+ start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
+ << (PMD_SHIFT - PAGE_SHIFT);
+ end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask & (1<<PG_LEVEL_2M));
pos = end_pfn << PAGE_SHIFT;
}
+#endif
/* tail is not big page (2M) alignment */
start_pfn = pos>>PAGE_SHIFT;
end_pfn = end>>PAGE_SHIFT;
- if (start_pfn < end_pfn)
- nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+ nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
/* try to merge same page size and continuous */
for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
* memory mapped. Unfortunately this is done currently before the
* nodes are discovered.
*/
- if (!after_init_bootmem)
+ if (!after_bootmem)
find_early_table_space(end, use_pse, use_gbpages);
+#ifdef CONFIG_X86_32
for (i = 0; i < nr_range; i++)
- kernel_physical_mapping_init(pgd_base,
+ kernel_physical_mapping_init(
mr[i].start >> PAGE_SHIFT,
mr[i].end >> PAGE_SHIFT,
mr[i].page_size_mask == (1<<PG_LEVEL_2M));
+ ret = end;
+#else /* CONFIG_X86_64 */
+ for (i = 0; i < nr_range; i++)
+ ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
+ mr[i].page_size_mask);
+#endif
- early_ioremap_page_table_range_init(pgd_base);
+#ifdef CONFIG_X86_32
+ early_ioremap_page_table_range_init();
load_cr3(swapper_pg_dir);
+#endif
+#ifdef CONFIG_X86_64
+ if (!after_bootmem)
+ mmu_cr4_features = read_cr4();
+#endif
__flush_tlb_all();
- if (!after_init_bootmem)
+ if (!after_bootmem && table_end > table_start)
reserve_early(table_start << PAGE_SHIFT,
table_end << PAGE_SHIFT, "PGTABLE");
- if (!after_init_bootmem)
+ if (!after_bootmem)
early_memtest(start, end);
- return end >> PAGE_SHIFT;
+ return ret >> PAGE_SHIFT;
}