2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
6 #include <linux/stddef.h>
7 #include <linux/bootmem.h>
9 #include <linux/highmem.h>
11 #include <linux/swap.h>
12 #include <asm/fixmap.h>
14 #include "as-layout.h"
17 #include "kern_util.h"
21 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
22 unsigned long *empty_zero_page = NULL;
23 /* allocated in paging_init and unchanged thereafter */
24 unsigned long *empty_bad_page = NULL;
27 * Initialized during boot, and readonly for initializing page tables
30 pgd_t swapper_pg_dir[PTRS_PER_PGD];
32 /* Initialized at boot time, and readonly after that */
33 unsigned long long highmem;
36 /* Used during early boot */
37 static unsigned long brk_end;
39 static void map_cb(void *unused)
41 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
45 static void setup_highmem(unsigned long highmem_start,
46 unsigned long highmem_len)
49 unsigned long highmem_pfn;
52 highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
53 for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) {
54 page = &mem_map[highmem_pfn + i];
55 ClearPageReserved(page);
56 init_page_count(page);
62 void __init mem_init(void)
64 /* clear the zero-page */
65 memset(empty_zero_page, 0, PAGE_SIZE);
67 /* Map in the area just after the brk now that kmalloc is about
70 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
72 initial_thread_cb(map_cb, NULL);
73 free_bootmem(__pa(brk_end), uml_reserved - brk_end);
74 uml_reserved = brk_end;
76 /* this will put all low memory onto the freelists */
77 totalram_pages = free_all_bootmem();
78 max_low_pfn = totalram_pages;
80 totalhigh_pages = highmem >> PAGE_SHIFT;
81 totalram_pages += totalhigh_pages;
83 num_physpages = totalram_pages;
84 max_pfn = totalram_pages;
85 printk(KERN_INFO "Memory: %luk available\n",
86 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
90 setup_highmem(end_iomem, highmem);
95 * Create a page table and place a pointer to it in a middle page
98 static void __init one_page_table_init(pmd_t *pmd)
100 if (pmd_none(*pmd)) {
101 pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
102 set_pmd(pmd, __pmd(_KERNPG_TABLE +
103 (unsigned long) __pa(pte)));
104 if (pte != pte_offset_kernel(pmd, 0))
109 static void __init one_md_table_init(pud_t *pud)
111 #ifdef CONFIG_3_LEVEL_PGTABLES
112 pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
113 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
114 if (pmd_table != pmd_offset(pud, 0))
119 static void __init fixrange_init(unsigned long start, unsigned long end,
129 i = pgd_index(vaddr);
130 j = pmd_index(vaddr);
133 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
134 pud = pud_offset(pgd, vaddr);
136 one_md_table_init(pud);
137 pmd = pmd_offset(pud, vaddr);
138 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
139 one_page_table_init(pmd);
146 #ifdef CONFIG_HIGHMEM
150 #define kmap_get_fixmap_pte(vaddr) \
151 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
154 static void __init kmap_init(void)
156 unsigned long kmap_vstart;
158 /* cache the first kmap pte */
159 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
160 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
162 kmap_prot = PAGE_KERNEL;
165 static void __init init_highmem(void)
177 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
179 pgd = swapper_pg_dir + pgd_index(vaddr);
180 pud = pud_offset(pgd, vaddr);
181 pmd = pmd_offset(pud, vaddr);
182 pte = pte_offset_kernel(pmd, vaddr);
183 pkmap_page_table = pte;
187 #endif /* CONFIG_HIGHMEM */
189 static void __init fixaddr_user_init( void)
191 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
192 long size = FIXADDR_USER_END - FIXADDR_USER_START;
198 unsigned long v, vaddr = FIXADDR_USER_START;
203 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
204 v = (unsigned long) alloc_bootmem_low_pages(size);
205 memcpy((void *) v , (void *) FIXADDR_USER_START, size);
207 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
209 pgd = swapper_pg_dir + pgd_index(vaddr);
210 pud = pud_offset(pgd, vaddr);
211 pmd = pmd_offset(pud, vaddr);
212 pte = pte_offset_kernel(pmd, vaddr);
213 pte_set_val(*pte, p, PAGE_READONLY);
218 void __init paging_init(void)
220 unsigned long zones_size[MAX_NR_ZONES], vaddr;
223 empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
224 empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
225 for (i = 0; i < ARRAY_SIZE(zones_size); i++)
228 zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
229 (uml_physmem >> PAGE_SHIFT);
230 #ifdef CONFIG_HIGHMEM
231 zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT;
233 free_area_init(zones_size);
236 * Fixed mappings, only the page table structure has to be
237 * created - mappings will be set by set_fixmap():
239 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
240 fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
244 #ifdef CONFIG_HIGHMEM
249 struct page *arch_validate(struct page *page, gfp_t mask, int order)
251 unsigned long addr, zero = 0;
257 if (PageHighMem(page))
260 addr = (unsigned long) page_address(page);
261 for (i = 0; i < (1 << order); i++) {
262 current->thread.fault_addr = (void *) addr;
263 if (__do_copy_to_user((void __user *) addr, &zero,
265 ¤t->thread.fault_addr,
266 ¤t->thread.fault_catcher)) {
267 if (!(mask & __GFP_WAIT))
274 if (i == (1 << order))
276 page = alloc_pages(mask, order);
281 * This can't do anything because nothing in the kernel image can be freed
282 * since it's not in kernel physical memory.
285 void free_initmem(void)
289 #ifdef CONFIG_BLK_DEV_INITRD
290 void free_initrd_mem(unsigned long start, unsigned long end)
293 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
294 (end - start) >> 10);
295 for (; start < end; start += PAGE_SIZE) {
296 ClearPageReserved(virt_to_page(start));
297 init_page_count(virt_to_page(start));
306 int pfn, total = 0, reserved = 0;
307 int shared = 0, cached = 0;
311 printk(KERN_INFO "Mem-info:\n");
313 printk(KERN_INFO "Free swap: %6ldkB\n",
314 nr_swap_pages<<(PAGE_SHIFT-10));
317 page = pfn_to_page(pfn);
319 if (PageHighMem(page))
321 if (PageReserved(page))
323 else if (PageSwapCache(page))
325 else if (page_count(page))
326 shared += page_count(page) - 1;
328 printk(KERN_INFO "%d pages of RAM\n", total);
329 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
330 printk(KERN_INFO "%d reserved pages\n", reserved);
331 printk(KERN_INFO "%d pages shared\n", shared);
332 printk(KERN_INFO "%d pages swap cached\n", cached);
335 /* Allocate and free page tables. */
337 pgd_t *pgd_alloc(struct mm_struct *mm)
339 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
342 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
343 memcpy(pgd + USER_PTRS_PER_PGD,
344 swapper_pg_dir + USER_PTRS_PER_PGD,
345 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
350 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
352 free_page((unsigned long) pgd);
355 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
359 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
363 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
367 pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
371 #ifdef CONFIG_3_LEVEL_PGTABLES
372 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
374 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
377 memset(pmd, 0, PAGE_SIZE);