x86: move some func calling from setup_arch to paging_init
[safe/jmp/linux-2.6] / arch / x86 / mm / init_32.c
1 /*
2  *
3  *  Copyright (C) 1995  Linus Torvalds
4  *
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  */
7
8 #include <linux/module.h>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/hugetlb.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/pfn.h>
25 #include <linux/poison.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memory_hotplug.h>
30 #include <linux/initrd.h>
31 #include <linux/cpumask.h>
32
33 #include <asm/asm.h>
34 #include <asm/processor.h>
35 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <asm/pgtable.h>
38 #include <asm/dma.h>
39 #include <asm/fixmap.h>
40 #include <asm/e820.h>
41 #include <asm/apic.h>
42 #include <asm/bugs.h>
43 #include <asm/tlb.h>
44 #include <asm/tlbflush.h>
45 #include <asm/pgalloc.h>
46 #include <asm/sections.h>
47 #include <asm/paravirt.h>
48 #include <asm/setup.h>
49 #include <asm/cacheflush.h>
50
51 unsigned int __VMALLOC_RESERVE = 128 << 20;
52
53 unsigned long max_pfn_mapped;
54
55 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
56 unsigned long highstart_pfn, highend_pfn;
57
58 static noinline int do_test_wp_bit(void);
59
60 /*
61  * Creates a middle page table and puts a pointer to it in the
62  * given global directory entry. This only returns the gd entry
63  * in non-PAE compilation mode, since the middle layer is folded.
64  */
65 static pmd_t * __init one_md_table_init(pgd_t *pgd)
66 {
67         pud_t *pud;
68         pmd_t *pmd_table;
69
70 #ifdef CONFIG_X86_PAE
71         if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
72                 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
73
74                 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
75                 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
76                 pud = pud_offset(pgd, 0);
77                 BUG_ON(pmd_table != pmd_offset(pud, 0));
78         }
79 #endif
80         pud = pud_offset(pgd, 0);
81         pmd_table = pmd_offset(pud, 0);
82
83         return pmd_table;
84 }
85
86 /*
87  * Create a page table and place a pointer to it in a middle page
88  * directory entry:
89  */
90 static pte_t * __init one_page_table_init(pmd_t *pmd)
91 {
92         if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
93                 pte_t *page_table = NULL;
94
95 #ifdef CONFIG_DEBUG_PAGEALLOC
96                 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
97 #endif
98                 if (!page_table) {
99                         page_table =
100                                 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
101                 }
102
103                 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
104                 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
105                 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
106         }
107
108         return pte_offset_kernel(pmd, 0);
109 }
110
111 /*
112  * This function initializes a certain range of kernel virtual memory
113  * with new bootmem page tables, everywhere page tables are missing in
114  * the given range.
115  *
116  * NOTE: The pagetables are allocated contiguous on the physical space
117  * so we can cache the place of the first one and move around without
118  * checking the pgd every time.
119  */
120 static void __init
121 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
122 {
123         int pgd_idx, pmd_idx;
124         unsigned long vaddr;
125         pgd_t *pgd;
126         pmd_t *pmd;
127
128         vaddr = start;
129         pgd_idx = pgd_index(vaddr);
130         pmd_idx = pmd_index(vaddr);
131         pgd = pgd_base + pgd_idx;
132
133         for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
134                 pmd = one_md_table_init(pgd);
135                 pmd = pmd + pmd_index(vaddr);
136                 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
137                                                         pmd++, pmd_idx++) {
138                         one_page_table_init(pmd);
139
140                         vaddr += PMD_SIZE;
141                 }
142                 pmd_idx = 0;
143         }
144 }
145
146 static inline int is_kernel_text(unsigned long addr)
147 {
148         if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
149                 return 1;
150         return 0;
151 }
152
153 /*
154  * This maps the physical memory to kernel virtual address space, a total
155  * of max_low_pfn pages, by creating page tables starting from address
156  * PAGE_OFFSET:
157  */
158 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
159 {
160         int pgd_idx, pmd_idx, pte_ofs;
161         unsigned long pfn;
162         pgd_t *pgd;
163         pmd_t *pmd;
164         pte_t *pte;
165         unsigned pages_2m = 0, pages_4k = 0;
166
167         pgd_idx = pgd_index(PAGE_OFFSET);
168         pgd = pgd_base + pgd_idx;
169         pfn = 0;
170
171         for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
172                 pmd = one_md_table_init(pgd);
173                 if (pfn >= max_low_pfn)
174                         continue;
175
176                 for (pmd_idx = 0;
177                      pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
178                      pmd++, pmd_idx++) {
179                         unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
180
181                         /*
182                          * Map with big pages if possible, otherwise
183                          * create normal page tables:
184                          *
185                          * Don't use a large page for the first 2/4MB of memory
186                          * because there are often fixed size MTRRs in there
187                          * and overlapping MTRRs into large pages can cause
188                          * slowdowns.
189                          */
190                         if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
191                                 unsigned int addr2;
192                                 pgprot_t prot = PAGE_KERNEL_LARGE;
193
194                                 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
195                                         PAGE_OFFSET + PAGE_SIZE-1;
196
197                                 if (is_kernel_text(addr) ||
198                                     is_kernel_text(addr2))
199                                         prot = PAGE_KERNEL_LARGE_EXEC;
200
201                                 pages_2m++;
202                                 set_pmd(pmd, pfn_pmd(pfn, prot));
203
204                                 pfn += PTRS_PER_PTE;
205                                 max_pfn_mapped = pfn;
206                                 continue;
207                         }
208                         pte = one_page_table_init(pmd);
209
210                         for (pte_ofs = 0;
211                              pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
212                              pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
213                                 pgprot_t prot = PAGE_KERNEL;
214
215                                 if (is_kernel_text(addr))
216                                         prot = PAGE_KERNEL_EXEC;
217
218                                 pages_4k++;
219                                 set_pte(pte, pfn_pte(pfn, prot));
220                         }
221                         max_pfn_mapped = pfn;
222                 }
223         }
224         update_page_count(PG_LEVEL_2M, pages_2m);
225         update_page_count(PG_LEVEL_4K, pages_4k);
226 }
227
228 /*
229  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
230  * is valid. The argument is a physical page number.
231  *
232  *
233  * On x86, access has to be given to the first megabyte of ram because that area
234  * contains bios code and data regions used by X and dosemu and similar apps.
235  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
236  * mmio resources as well as potential bios/acpi data regions.
237  */
238 int devmem_is_allowed(unsigned long pagenr)
239 {
240         if (pagenr <= 256)
241                 return 1;
242         if (!page_is_ram(pagenr))
243                 return 1;
244         return 0;
245 }
246
247 #ifdef CONFIG_HIGHMEM
248 pte_t *kmap_pte;
249 pgprot_t kmap_prot;
250
251 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
252 {
253         return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
254                         vaddr), vaddr), vaddr);
255 }
256
257 static void __init kmap_init(void)
258 {
259         unsigned long kmap_vstart;
260
261         /*
262          * Cache the first kmap pte:
263          */
264         kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
265         kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
266
267         kmap_prot = PAGE_KERNEL;
268 }
269
270 static void __init permanent_kmaps_init(pgd_t *pgd_base)
271 {
272         unsigned long vaddr;
273         pgd_t *pgd;
274         pud_t *pud;
275         pmd_t *pmd;
276         pte_t *pte;
277
278         vaddr = PKMAP_BASE;
279         page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
280
281         pgd = swapper_pg_dir + pgd_index(vaddr);
282         pud = pud_offset(pgd, vaddr);
283         pmd = pmd_offset(pud, vaddr);
284         pte = pte_offset_kernel(pmd, vaddr);
285         pkmap_page_table = pte;
286 }
287
288 static void __init add_one_highpage_init(struct page *page, int pfn)
289 {
290         ClearPageReserved(page);
291         init_page_count(page);
292         __free_page(page);
293         totalhigh_pages++;
294 }
295
296 struct add_highpages_data {
297         unsigned long start_pfn;
298         unsigned long end_pfn;
299 };
300
301 static int __init add_highpages_work_fn(unsigned long start_pfn,
302                                          unsigned long end_pfn, void *datax)
303 {
304         int node_pfn;
305         struct page *page;
306         unsigned long final_start_pfn, final_end_pfn;
307         struct add_highpages_data *data;
308
309         data = (struct add_highpages_data *)datax;
310
311         final_start_pfn = max(start_pfn, data->start_pfn);
312         final_end_pfn = min(end_pfn, data->end_pfn);
313         if (final_start_pfn >= final_end_pfn)
314                 return 0;
315
316         for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
317              node_pfn++) {
318                 if (!pfn_valid(node_pfn))
319                         continue;
320                 page = pfn_to_page(node_pfn);
321                 add_one_highpage_init(page, node_pfn);
322         }
323
324         return 0;
325
326 }
327
328 void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
329                                               unsigned long end_pfn)
330 {
331         struct add_highpages_data data;
332
333         data.start_pfn = start_pfn;
334         data.end_pfn = end_pfn;
335
336         work_with_active_regions(nid, add_highpages_work_fn, &data);
337 }
338
339 #ifndef CONFIG_NUMA
340 static void __init set_highmem_pages_init(void)
341 {
342         add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
343
344         totalram_pages += totalhigh_pages;
345 }
346 #endif /* !CONFIG_NUMA */
347
348 #else
349 # define kmap_init()                            do { } while (0)
350 # define permanent_kmaps_init(pgd_base)         do { } while (0)
351 # define set_highmem_pages_init()       do { } while (0)
352 #endif /* CONFIG_HIGHMEM */
353
354 pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
355 EXPORT_SYMBOL(__PAGE_KERNEL);
356
357 pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
358
359 void __init native_pagetable_setup_start(pgd_t *base)
360 {
361         unsigned long pfn, va;
362         pgd_t *pgd;
363         pud_t *pud;
364         pmd_t *pmd;
365         pte_t *pte;
366
367         /*
368          * Remove any mappings which extend past the end of physical
369          * memory from the boot time page table:
370          */
371         for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
372                 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
373                 pgd = base + pgd_index(va);
374                 if (!pgd_present(*pgd))
375                         break;
376
377                 pud = pud_offset(pgd, va);
378                 pmd = pmd_offset(pud, va);
379                 if (!pmd_present(*pmd))
380                         break;
381
382                 pte = pte_offset_kernel(pmd, va);
383                 if (!pte_present(*pte))
384                         break;
385
386                 pte_clear(NULL, va, pte);
387         }
388         paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
389 }
390
391 void __init native_pagetable_setup_done(pgd_t *base)
392 {
393 }
394
395 /*
396  * Build a proper pagetable for the kernel mappings.  Up until this
397  * point, we've been running on some set of pagetables constructed by
398  * the boot process.
399  *
400  * If we're booting on native hardware, this will be a pagetable
401  * constructed in arch/x86/kernel/head_32.S.  The root of the
402  * pagetable will be swapper_pg_dir.
403  *
404  * If we're booting paravirtualized under a hypervisor, then there are
405  * more options: we may already be running PAE, and the pagetable may
406  * or may not be based in swapper_pg_dir.  In any case,
407  * paravirt_pagetable_setup_start() will set up swapper_pg_dir
408  * appropriately for the rest of the initialization to work.
409  *
410  * In general, pagetable_init() assumes that the pagetable may already
411  * be partially populated, and so it avoids stomping on any existing
412  * mappings.
413  */
414 static void __init pagetable_init(void)
415 {
416         pgd_t *pgd_base = swapper_pg_dir;
417         unsigned long vaddr, end;
418
419         paravirt_pagetable_setup_start(pgd_base);
420
421         /* Enable PSE if available */
422         if (cpu_has_pse)
423                 set_in_cr4(X86_CR4_PSE);
424
425         /* Enable PGE if available */
426         if (cpu_has_pge) {
427                 set_in_cr4(X86_CR4_PGE);
428                 __PAGE_KERNEL |= _PAGE_GLOBAL;
429                 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
430         }
431
432         kernel_physical_mapping_init(pgd_base);
433         remap_numa_kva();
434
435         /*
436          * Fixed mappings, only the page table structure has to be
437          * created - mappings will be set by set_fixmap():
438          */
439         early_ioremap_clear();
440         vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
441         end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
442         page_table_range_init(vaddr, end, pgd_base);
443         early_ioremap_reset();
444
445         permanent_kmaps_init(pgd_base);
446
447         paravirt_pagetable_setup_done(pgd_base);
448 }
449
450 #ifdef CONFIG_ACPI_SLEEP
451 /*
452  * ACPI suspend needs this for resume, because things like the intel-agp
453  * driver might have split up a kernel 4MB mapping.
454  */
455 char swsusp_pg_dir[PAGE_SIZE]
456         __attribute__ ((aligned(PAGE_SIZE)));
457
458 static inline void save_pg_dir(void)
459 {
460         memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
461 }
462 #else /* !CONFIG_ACPI_SLEEP */
463 static inline void save_pg_dir(void)
464 {
465 }
466 #endif /* !CONFIG_ACPI_SLEEP */
467
468 void zap_low_mappings(void)
469 {
470         int i;
471
472         /*
473          * Zap initial low-memory mappings.
474          *
475          * Note that "pgd_clear()" doesn't do it for
476          * us, because pgd_clear() is a no-op on i386.
477          */
478         for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
479 #ifdef CONFIG_X86_PAE
480                 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
481 #else
482                 set_pgd(swapper_pg_dir+i, __pgd(0));
483 #endif
484         }
485         flush_tlb_all();
486 }
487
488 int nx_enabled;
489
490 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
491 EXPORT_SYMBOL_GPL(__supported_pte_mask);
492
493 #ifdef CONFIG_X86_PAE
494
495 static int disable_nx __initdata;
496
497 /*
498  * noexec = on|off
499  *
500  * Control non executable mappings.
501  *
502  * on      Enable
503  * off     Disable
504  */
505 static int __init noexec_setup(char *str)
506 {
507         if (!str || !strcmp(str, "on")) {
508                 if (cpu_has_nx) {
509                         __supported_pte_mask |= _PAGE_NX;
510                         disable_nx = 0;
511                 }
512         } else {
513                 if (!strcmp(str, "off")) {
514                         disable_nx = 1;
515                         __supported_pte_mask &= ~_PAGE_NX;
516                 } else {
517                         return -EINVAL;
518                 }
519         }
520
521         return 0;
522 }
523 early_param("noexec", noexec_setup);
524
525 static void __init set_nx(void)
526 {
527         unsigned int v[4], l, h;
528
529         if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
530                 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
531
532                 if ((v[3] & (1 << 20)) && !disable_nx) {
533                         rdmsr(MSR_EFER, l, h);
534                         l |= EFER_NX;
535                         wrmsr(MSR_EFER, l, h);
536                         nx_enabled = 1;
537                         __supported_pte_mask |= _PAGE_NX;
538                 }
539         }
540 }
541 #endif
542
543 /* user-defined highmem size */
544 static unsigned int highmem_pages = -1;
545
546 /*
547  * highmem=size forces highmem to be exactly 'size' bytes.
548  * This works even on boxes that have no highmem otherwise.
549  * This also works to reduce highmem size on bigger boxes.
550  */
551 static int __init parse_highmem(char *arg)
552 {
553         if (!arg)
554                 return -EINVAL;
555
556         highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
557         return 0;
558 }
559 early_param("highmem", parse_highmem);
560
561 /*
562  * Determine low and high memory ranges:
563  */
564 void __init find_low_pfn_range(void)
565 {
566         /* it could update max_pfn */
567
568         /* max_low_pfn is 0, we already have early_res support */
569
570         max_low_pfn = max_pfn;
571         if (max_low_pfn > MAXMEM_PFN) {
572                 if (highmem_pages == -1)
573                         highmem_pages = max_pfn - MAXMEM_PFN;
574                 if (highmem_pages + MAXMEM_PFN < max_pfn)
575                         max_pfn = MAXMEM_PFN + highmem_pages;
576                 if (highmem_pages + MAXMEM_PFN > max_pfn) {
577                         printk(KERN_WARNING "only %luMB highmem pages "
578                                 "available, ignoring highmem size of %uMB.\n",
579                                 pages_to_mb(max_pfn - MAXMEM_PFN),
580                                 pages_to_mb(highmem_pages));
581                         highmem_pages = 0;
582                 }
583                 max_low_pfn = MAXMEM_PFN;
584 #ifndef CONFIG_HIGHMEM
585                 /* Maximum memory usable is what is directly addressable */
586                 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
587                                         MAXMEM>>20);
588                 if (max_pfn > MAX_NONPAE_PFN)
589                         printk(KERN_WARNING
590                                  "Use a HIGHMEM64G enabled kernel.\n");
591                 else
592                         printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
593                 max_pfn = MAXMEM_PFN;
594 #else /* !CONFIG_HIGHMEM */
595 #ifndef CONFIG_HIGHMEM64G
596                 if (max_pfn > MAX_NONPAE_PFN) {
597                         max_pfn = MAX_NONPAE_PFN;
598                         printk(KERN_WARNING "Warning only 4GB will be used."
599                                 "Use a HIGHMEM64G enabled kernel.\n");
600                 }
601 #endif /* !CONFIG_HIGHMEM64G */
602 #endif /* !CONFIG_HIGHMEM */
603         } else {
604                 if (highmem_pages == -1)
605                         highmem_pages = 0;
606 #ifdef CONFIG_HIGHMEM
607                 if (highmem_pages >= max_pfn) {
608                         printk(KERN_ERR "highmem size specified (%uMB) is "
609                                 "bigger than pages available (%luMB)!.\n",
610                                 pages_to_mb(highmem_pages),
611                                 pages_to_mb(max_pfn));
612                         highmem_pages = 0;
613                 }
614                 if (highmem_pages) {
615                         if (max_low_pfn - highmem_pages <
616                             64*1024*1024/PAGE_SIZE){
617                                 printk(KERN_ERR "highmem size %uMB results in "
618                                 "smaller than 64MB lowmem, ignoring it.\n"
619                                         , pages_to_mb(highmem_pages));
620                                 highmem_pages = 0;
621                         }
622                         max_low_pfn -= highmem_pages;
623                 }
624 #else
625                 if (highmem_pages)
626                         printk(KERN_ERR "ignoring highmem size on non-highmem"
627                                         " kernel!\n");
628 #endif
629         }
630 }
631
632 #ifndef CONFIG_NEED_MULTIPLE_NODES
633 void __init initmem_init(unsigned long start_pfn,
634                                   unsigned long end_pfn)
635 {
636 #ifdef CONFIG_HIGHMEM
637         highstart_pfn = highend_pfn = max_pfn;
638         if (max_pfn > max_low_pfn)
639                 highstart_pfn = max_low_pfn;
640         memory_present(0, 0, highend_pfn);
641         printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
642                 pages_to_mb(highend_pfn - highstart_pfn));
643         num_physpages = highend_pfn;
644         high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
645 #else
646         memory_present(0, 0, max_low_pfn);
647         num_physpages = max_low_pfn;
648         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
649 #endif
650 #ifdef CONFIG_FLATMEM
651         max_mapnr = num_physpages;
652 #endif
653         printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
654                         pages_to_mb(max_low_pfn));
655
656         setup_bootmem_allocator();
657 }
658
659 void __init zone_sizes_init(void)
660 {
661         unsigned long max_zone_pfns[MAX_NR_ZONES];
662         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
663         max_zone_pfns[ZONE_DMA] =
664                 virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
665         max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
666         remove_all_active_ranges();
667 #ifdef CONFIG_HIGHMEM
668         max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
669         e820_register_active_regions(0, 0, highend_pfn);
670 #else
671         e820_register_active_regions(0, 0, max_low_pfn);
672 #endif
673
674         free_area_init_nodes(max_zone_pfns);
675 }
676 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
677
678 void __init setup_bootmem_allocator(void)
679 {
680         int i;
681         unsigned long bootmap_size, bootmap;
682         /*
683          * Initialize the boot-time allocator (with low memory only):
684          */
685         bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
686         bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
687                                  max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
688                                  PAGE_SIZE);
689         if (bootmap == -1L)
690                 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
691         reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
692
693         /* don't touch min_low_pfn */
694         bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
695                                          min_low_pfn, max_low_pfn);
696         printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
697                  max_pfn_mapped<<PAGE_SHIFT);
698         printk(KERN_INFO "  low ram: %08lx - %08lx\n",
699                  min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
700         printk(KERN_INFO "  bootmap %08lx - %08lx\n",
701                  bootmap, bootmap + bootmap_size);
702         for_each_online_node(i)
703                 free_bootmem_with_active_regions(i, max_low_pfn);
704         early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
705
706 }
707
708 /*
709  * The node 0 pgdat is initialized before all of these because
710  * it's needed for bootmem.  node>0 pgdats have their virtual
711  * space allocated before the pagetables are in place to access
712  * them, so they can't be cleared then.
713  *
714  * This should all compile down to nothing when NUMA is off.
715  */
716 static void __init remapped_pgdat_init(void)
717 {
718         int nid;
719
720         for_each_online_node(nid) {
721                 if (nid != 0)
722                         memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
723         }
724 }
725
726 /*
727  * paging_init() sets up the page tables - note that the first 8MB are
728  * already mapped by head.S.
729  *
730  * This routines also unmaps the page at virtual kernel address 0, so
731  * that we can trap those pesky NULL-reference errors in the kernel.
732  */
733 void __init paging_init(void)
734 {
735 #ifdef CONFIG_X86_PAE
736         set_nx();
737         if (nx_enabled)
738                 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
739 #endif
740         pagetable_init();
741
742         load_cr3(swapper_pg_dir);
743
744         __flush_tlb_all();
745
746         kmap_init();
747
748         /*
749          * NOTE: at this point the bootmem allocator is fully available.
750          */
751
752         post_reserve_initrd();
753
754         remapped_pgdat_init();
755         sparse_init();
756         zone_sizes_init();
757
758         paravirt_post_allocator_init();
759 }
760
761 /*
762  * Test if the WP bit works in supervisor mode. It isn't supported on 386's
763  * and also on some strange 486's. All 586+'s are OK. This used to involve
764  * black magic jumps to work around some nasty CPU bugs, but fortunately the
765  * switch to using exceptions got rid of all that.
766  */
767 static void __init test_wp_bit(void)
768 {
769         printk(KERN_INFO
770   "Checking if this processor honours the WP bit even in supervisor mode...");
771
772         /* Any page-aligned address will do, the test is non-destructive */
773         __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
774         boot_cpu_data.wp_works_ok = do_test_wp_bit();
775         clear_fixmap(FIX_WP_TEST);
776
777         if (!boot_cpu_data.wp_works_ok) {
778                 printk(KERN_CONT "No.\n");
779 #ifdef CONFIG_X86_WP_WORKS_OK
780                 panic(
781   "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
782 #endif
783         } else {
784                 printk(KERN_CONT "Ok.\n");
785         }
786 }
787
788 static struct kcore_list kcore_mem, kcore_vmalloc;
789
790 void __init mem_init(void)
791 {
792         int codesize, reservedpages, datasize, initsize;
793         int tmp;
794
795 #ifdef CONFIG_FLATMEM
796         BUG_ON(!mem_map);
797 #endif
798         /* this will put all low memory onto the freelists */
799         totalram_pages += free_all_bootmem();
800
801         reservedpages = 0;
802         for (tmp = 0; tmp < max_low_pfn; tmp++)
803                 /*
804                  * Only count reserved RAM pages:
805                  */
806                 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
807                         reservedpages++;
808
809         set_highmem_pages_init();
810
811         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
812         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
813         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
814
815         kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
816         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
817                    VMALLOC_END-VMALLOC_START);
818
819         printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
820                         "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
821                 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
822                 num_physpages << (PAGE_SHIFT-10),
823                 codesize >> 10,
824                 reservedpages << (PAGE_SHIFT-10),
825                 datasize >> 10,
826                 initsize >> 10,
827                 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
828                );
829
830         printk(KERN_INFO "virtual kernel memory layout:\n"
831                 "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
832 #ifdef CONFIG_HIGHMEM
833                 "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
834 #endif
835                 "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
836                 "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
837                 "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
838                 "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
839                 "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
840                 FIXADDR_START, FIXADDR_TOP,
841                 (FIXADDR_TOP - FIXADDR_START) >> 10,
842
843 #ifdef CONFIG_HIGHMEM
844                 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
845                 (LAST_PKMAP*PAGE_SIZE) >> 10,
846 #endif
847
848                 VMALLOC_START, VMALLOC_END,
849                 (VMALLOC_END - VMALLOC_START) >> 20,
850
851                 (unsigned long)__va(0), (unsigned long)high_memory,
852                 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
853
854                 (unsigned long)&__init_begin, (unsigned long)&__init_end,
855                 ((unsigned long)&__init_end -
856                  (unsigned long)&__init_begin) >> 10,
857
858                 (unsigned long)&_etext, (unsigned long)&_edata,
859                 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
860
861                 (unsigned long)&_text, (unsigned long)&_etext,
862                 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
863
864 #ifdef CONFIG_HIGHMEM
865         BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE        > FIXADDR_START);
866         BUG_ON(VMALLOC_END                              > PKMAP_BASE);
867 #endif
868         BUG_ON(VMALLOC_START                            > VMALLOC_END);
869         BUG_ON((unsigned long)high_memory               > VMALLOC_START);
870
871         if (boot_cpu_data.wp_works_ok < 0)
872                 test_wp_bit();
873
874         cpa_init();
875         save_pg_dir();
876         zap_low_mappings();
877 }
878
879 #ifdef CONFIG_MEMORY_HOTPLUG
880 int arch_add_memory(int nid, u64 start, u64 size)
881 {
882         struct pglist_data *pgdata = NODE_DATA(nid);
883         struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
884         unsigned long start_pfn = start >> PAGE_SHIFT;
885         unsigned long nr_pages = size >> PAGE_SHIFT;
886
887         return __add_pages(zone, start_pfn, nr_pages);
888 }
889 #endif
890
891 /*
892  * This function cannot be __init, since exceptions don't work in that
893  * section.  Put this after the callers, so that it cannot be inlined.
894  */
895 static noinline int do_test_wp_bit(void)
896 {
897         char tmp_reg;
898         int flag;
899
900         __asm__ __volatile__(
901                 "       movb %0, %1     \n"
902                 "1:     movb %1, %0     \n"
903                 "       xorl %2, %2     \n"
904                 "2:                     \n"
905                 _ASM_EXTABLE(1b,2b)
906                 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
907                  "=q" (tmp_reg),
908                  "=r" (flag)
909                 :"2" (1)
910                 :"memory");
911
912         return flag;
913 }
914
915 #ifdef CONFIG_DEBUG_RODATA
916 const int rodata_test_data = 0xC3;
917 EXPORT_SYMBOL_GPL(rodata_test_data);
918
919 void mark_rodata_ro(void)
920 {
921         unsigned long start = PFN_ALIGN(_text);
922         unsigned long size = PFN_ALIGN(_etext) - start;
923
924         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
925         printk(KERN_INFO "Write protecting the kernel text: %luk\n",
926                 size >> 10);
927
928 #ifdef CONFIG_CPA_DEBUG
929         printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
930                 start, start+size);
931         set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
932
933         printk(KERN_INFO "Testing CPA: write protecting again\n");
934         set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
935 #endif
936         start += size;
937         size = (unsigned long)__end_rodata - start;
938         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
939         printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
940                 size >> 10);
941         rodata_test();
942
943 #ifdef CONFIG_CPA_DEBUG
944         printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
945         set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
946
947         printk(KERN_INFO "Testing CPA: write protecting again\n");
948         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
949 #endif
950 }
951 #endif
952
953 void free_init_pages(char *what, unsigned long begin, unsigned long end)
954 {
955 #ifdef CONFIG_DEBUG_PAGEALLOC
956         /*
957          * If debugging page accesses then do not free this memory but
958          * mark them not present - any buggy init-section access will
959          * create a kernel page fault:
960          */
961         printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
962                 begin, PAGE_ALIGN(end));
963         set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
964 #else
965         unsigned long addr;
966
967         /*
968          * We just marked the kernel text read only above, now that
969          * we are going to free part of that, we need to make that
970          * writeable first.
971          */
972         set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
973
974         for (addr = begin; addr < end; addr += PAGE_SIZE) {
975                 ClearPageReserved(virt_to_page(addr));
976                 init_page_count(virt_to_page(addr));
977                 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
978                 free_page(addr);
979                 totalram_pages++;
980         }
981         printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
982 #endif
983 }
984
985 void free_initmem(void)
986 {
987         free_init_pages("unused kernel memory",
988                         (unsigned long)(&__init_begin),
989                         (unsigned long)(&__init_end));
990 }
991
992 #ifdef CONFIG_BLK_DEV_INITRD
993 void free_initrd_mem(unsigned long start, unsigned long end)
994 {
995         free_init_pages("initrd memory", start, end);
996 }
997 #endif
998
999 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1000                                    int flags)
1001 {
1002         return reserve_bootmem(phys, len, flags);
1003 }