2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
26 #include <asm/processor.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
32 #include <asm/fixmap.h>
36 #include <asm/mmu_context.h>
37 #include <asm/proto.h>
44 #ifdef CONFIG_GART_IOMMU
50 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
53 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
54 * physical space so we can cache the place of the first one and move
55 * around without checking the pgd every time.
60 long i, total = 0, reserved = 0;
61 long shared = 0, cached = 0;
65 printk(KERN_INFO "Mem-info:\n");
67 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
69 for_each_pgdat(pgdat) {
70 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
71 page = pfn_to_page(pgdat->node_start_pfn + i);
73 if (PageReserved(page))
75 else if (PageSwapCache(page))
77 else if (page_count(page))
78 shared += page_count(page) - 1;
81 printk(KERN_INFO "%lu pages of RAM\n", total);
82 printk(KERN_INFO "%lu reserved pages\n",reserved);
83 printk(KERN_INFO "%lu pages shared\n",shared);
84 printk(KERN_INFO "%lu pages swap cached\n",cached);
87 /* References to section boundaries */
89 extern char _text, _etext, _edata, __bss_start, _end[];
90 extern char __init_begin, __init_end;
94 static void *spp_getpage(void)
98 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
100 ptr = alloc_bootmem_pages(PAGE_SIZE);
101 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
102 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
104 Dprintk("spp_getpage %p\n", ptr);
108 static void set_pte_phys(unsigned long vaddr,
109 unsigned long phys, pgprot_t prot)
116 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
118 pgd = pgd_offset_k(vaddr);
119 if (pgd_none(*pgd)) {
120 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
123 pud = pud_offset(pgd, vaddr);
124 if (pud_none(*pud)) {
125 pmd = (pmd_t *) spp_getpage();
126 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
127 if (pmd != pmd_offset(pud, 0)) {
128 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
132 pmd = pmd_offset(pud, vaddr);
133 if (pmd_none(*pmd)) {
134 pte = (pte_t *) spp_getpage();
135 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
136 if (pte != pte_offset_kernel(pmd, 0)) {
137 printk("PAGETABLE BUG #02!\n");
141 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
143 pte = pte_offset_kernel(pmd, vaddr);
144 if (!pte_none(*pte) &&
145 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
147 set_pte(pte, new_pte);
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
153 __flush_tlb_one(vaddr);
156 /* NOTE: this is meant to be run only at boot */
157 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159 unsigned long address = __fix_to_virt(idx);
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
165 set_pte_phys(address, phys, prot);
168 unsigned long __initdata table_start, table_end;
170 extern pmd_t temp_boot_pmds[];
172 static struct temp_map {
176 } temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
182 static __init void *alloc_low_page(int *index, unsigned long *phys)
186 unsigned long pfn = table_end++, paddr;
190 panic("alloc_low_page: ran out of memory");
191 for (i = 0; temp_mappings[i].allocated; i++) {
192 if (!temp_mappings[i].pmd)
193 panic("alloc_low_page: ran out of temp mappings");
195 ti = &temp_mappings[i];
196 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
197 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
200 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
202 *phys = pfn * PAGE_SIZE;
206 static __init void unmap_low_page(int i)
208 struct temp_map *ti = &temp_mappings[i];
209 set_pmd(ti->pmd, __pmd(0));
213 static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
217 i = pud_index(address);
219 for (; i < PTRS_PER_PUD; pud++, i++) {
221 unsigned long paddr, pmd_phys;
224 paddr = address + i*PUD_SIZE;
226 for (; i < PTRS_PER_PUD; i++, pud++)
227 set_pud(pud, __pud(0));
231 if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
232 set_pud(pud, __pud(0));
236 pmd = alloc_low_page(&map, &pmd_phys);
237 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
238 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
242 for (; j < PTRS_PER_PMD; j++, pmd++)
243 set_pmd(pmd, __pmd(0));
246 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
247 pe &= __supported_pte_mask;
248 set_pmd(pmd, __pmd(pe));
255 static void __init find_early_table_space(unsigned long end)
257 unsigned long puds, pmds, tables;
259 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
260 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
261 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
262 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
264 table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
265 if (table_start == -1UL)
266 panic("Cannot find space for the kernel page tables");
268 table_start >>= PAGE_SHIFT;
269 table_end = table_start;
272 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
273 This runs before bootmem is initialized and gets pages directly from the
274 physical memory. To access them they are temporarily mapped. */
275 void __init init_memory_mapping(unsigned long start, unsigned long end)
279 Dprintk("init_memory_mapping\n");
282 * Find space for the kernel direct mapping tables.
283 * Later we should allocate these tables in the local node of the memory
284 * mapped. Unfortunately this is done currently before the nodes are
287 find_early_table_space(end);
289 start = (unsigned long)__va(start);
290 end = (unsigned long)__va(end);
292 for (; start < end; start = next) {
294 unsigned long pud_phys;
295 pud_t *pud = alloc_low_page(&map, &pud_phys);
296 next = start + PGDIR_SIZE;
299 phys_pud_init(pud, __pa(start), __pa(next));
300 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
304 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
306 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
307 table_start<<PAGE_SHIFT,
308 table_end<<PAGE_SHIFT);
311 extern struct x8664_pda cpu_pda[NR_CPUS];
313 /* Assumes all CPUs still execute in init_mm */
314 void zap_low_mappings(void)
316 pgd_t *pgd = pgd_offset_k(0UL);
321 /* Compute zone sizes for the DMA and DMA32 zones in a node. */
323 size_zones(unsigned long *z, unsigned long *h,
324 unsigned long start_pfn, unsigned long end_pfn)
329 for (i = 0; i < MAX_NR_ZONES; i++)
332 if (start_pfn < MAX_DMA_PFN)
333 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
334 if (start_pfn < MAX_DMA32_PFN) {
335 unsigned long dma32_pfn = MAX_DMA32_PFN;
336 if (dma32_pfn > end_pfn)
338 z[ZONE_DMA32] = dma32_pfn - start_pfn;
340 z[ZONE_NORMAL] = end_pfn - start_pfn;
342 /* Remove lower zones from higher ones. */
344 for (i = 0; i < MAX_NR_ZONES; i++) {
352 for (i = 0; i < MAX_NR_ZONES; i++) {
355 h[i] = e820_hole_size(s, w);
360 void __init paging_init(void)
362 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
363 size_zones(zones, holes, 0, end_pfn);
364 free_area_init_node(0, NODE_DATA(0), zones,
365 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
369 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
370 from the CPU leading to inconsistent cache lines. address and size
371 must be aligned to 2MB boundaries.
372 Does nothing when the mapping doesn't exist. */
373 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
375 unsigned long end = address + size;
377 BUG_ON(address & ~LARGE_PAGE_MASK);
378 BUG_ON(size & ~LARGE_PAGE_MASK);
380 for (; address < end; address += LARGE_PAGE_SIZE) {
381 pgd_t *pgd = pgd_offset_k(address);
386 pud = pud_offset(pgd, address);
389 pmd = pmd_offset(pud, address);
390 if (!pmd || pmd_none(*pmd))
392 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
393 /* Could handle this, but it should not happen currently. */
395 "clear_kernel_mapping: mapping has been split. will leak memory\n");
398 set_pmd(pmd, __pmd(0));
403 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
406 void __init mem_init(void)
408 long codesize, reservedpages, datasize, initsize;
410 #ifdef CONFIG_SWIOTLB
411 if (!iommu_aperture &&
412 (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
418 /* How many end-of-memory variables you have, grandma! */
419 max_low_pfn = end_pfn;
421 num_physpages = end_pfn;
422 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
424 /* clear the zero-page */
425 memset(empty_zero_page, 0, PAGE_SIZE);
429 /* this will put all low memory onto the freelists */
431 totalram_pages = numa_free_all_bootmem();
433 totalram_pages = free_all_bootmem();
435 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
439 codesize = (unsigned long) &_etext - (unsigned long) &_text;
440 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
441 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
443 /* Register memory areas for /proc/kcore */
444 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
445 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
446 VMALLOC_END-VMALLOC_START);
447 kclist_add(&kcore_kernel, &_stext, _end - _stext);
448 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
449 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
450 VSYSCALL_END - VSYSCALL_START);
452 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
453 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
454 end_pfn << (PAGE_SHIFT-10),
456 reservedpages << (PAGE_SHIFT-10),
461 * Subtle. SMP is doing its boot stuff late (because it has to
462 * fork idle threads) - but it also needs low mappings for the
463 * protected-mode entry to work. We zap these entries only after
464 * the WP-bit has been tested.
471 extern char __initdata_begin[], __initdata_end[];
473 void free_initmem(void)
477 addr = (unsigned long)(&__init_begin);
478 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
479 ClearPageReserved(virt_to_page(addr));
480 set_page_count(virt_to_page(addr), 1);
481 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
485 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
486 printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
489 #ifdef CONFIG_BLK_DEV_INITRD
490 void free_initrd_mem(unsigned long start, unsigned long end)
492 if (start < (unsigned long)&_end)
494 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
495 for (; start < end; start += PAGE_SIZE) {
496 ClearPageReserved(virt_to_page(start));
497 set_page_count(virt_to_page(start), 1);
504 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
506 /* Should check here against the e820 map to avoid double free */
508 int nid = phys_to_nid(phys);
509 reserve_bootmem_node(NODE_DATA(nid), phys, len);
511 reserve_bootmem(phys, len);
515 int kern_addr_valid(unsigned long addr)
517 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
523 if (above != 0 && above != -1UL)
526 pgd = pgd_offset_k(addr);
530 pud = pud_offset(pgd, addr);
534 pmd = pmd_offset(pud, addr);
538 return pfn_valid(pmd_pfn(*pmd));
540 pte = pte_offset_kernel(pmd, addr);
543 return pfn_valid(pte_pfn(*pte));
547 #include <linux/sysctl.h>
549 extern int exception_trace, page_fault_trace;
551 static ctl_table debug_table2[] = {
552 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
554 #ifdef CONFIG_CHECKING
555 { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
561 static ctl_table debug_root_table2[] = {
562 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
563 .child = debug_table2 },
567 static __init int x8664_sysctl_init(void)
569 register_sysctl_table(debug_root_table2, 1);
572 __initcall(x8664_sysctl_init);
575 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
576 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
577 not need special handling anymore. */
579 static struct vm_area_struct gate_vma = {
580 .vm_start = VSYSCALL_START,
581 .vm_end = VSYSCALL_END,
582 .vm_page_prot = PAGE_READONLY
585 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
587 #ifdef CONFIG_IA32_EMULATION
588 if (test_tsk_thread_flag(tsk, TIF_IA32))
594 int in_gate_area(struct task_struct *task, unsigned long addr)
596 struct vm_area_struct *vma = get_gate_vma(task);
599 return (addr >= vma->vm_start) && (addr < vma->vm_end);
602 /* Use this when you have no reliable task/vma, typically from interrupt
603 * context. It is less reliable than using the task's vma and may give
606 int in_gate_area_no_task(unsigned long addr)
608 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);