Introduce flags for reserve_bootmem()
[safe/jmp/linux-2.6] / arch / parisc / mm / init.c
1 /*
2  *  linux/arch/parisc/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Copyright 1999 SuSE GmbH
6  *    changed by Philipp Rumpf
7  *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8  *  Copyright 2004 Randolph Chung (tausq@debian.org)
9  *  Copyright 2006-2007 Helge Deller (deller@gmx.de)
10  *
11  */
12
13
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/bootmem.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>          /* for hppa_dma_ops and pcxl_dma_ops */
20 #include <linux/initrd.h>
21 #include <linux/swap.h>
22 #include <linux/unistd.h>
23 #include <linux/nodemask.h>     /* for node_online_map */
24 #include <linux/pagemap.h>      /* for release_pages and page_cache_release */
25
26 #include <asm/pgalloc.h>
27 #include <asm/pgtable.h>
28 #include <asm/tlb.h>
29 #include <asm/pdc_chassis.h>
30 #include <asm/mmzone.h>
31 #include <asm/sections.h>
32
33 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
34
35 extern int  data_start;
36
37 #ifdef CONFIG_DISCONTIGMEM
38 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
39 bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly;
40 unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
41 #endif
42
43 static struct resource data_resource = {
44         .name   = "Kernel data",
45         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
46 };
47
48 static struct resource code_resource = {
49         .name   = "Kernel code",
50         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
51 };
52
53 static struct resource pdcdata_resource = {
54         .name   = "PDC data (Page Zero)",
55         .start  = 0,
56         .end    = 0x9ff,
57         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
58 };
59
60 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
61
62 /* The following array is initialized from the firmware specific
63  * information retrieved in kernel/inventory.c.
64  */
65
66 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
67 int npmem_ranges __read_mostly;
68
69 #ifdef CONFIG_64BIT
70 #define MAX_MEM         (~0UL)
71 #else /* !CONFIG_64BIT */
72 #define MAX_MEM         (3584U*1024U*1024U)
73 #endif /* !CONFIG_64BIT */
74
75 static unsigned long mem_limit __read_mostly = MAX_MEM;
76
77 static void __init mem_limit_func(void)
78 {
79         char *cp, *end;
80         unsigned long limit;
81
82         /* We need this before __setup() functions are called */
83
84         limit = MAX_MEM;
85         for (cp = boot_command_line; *cp; ) {
86                 if (memcmp(cp, "mem=", 4) == 0) {
87                         cp += 4;
88                         limit = memparse(cp, &end);
89                         if (end != cp)
90                                 break;
91                         cp = end;
92                 } else {
93                         while (*cp != ' ' && *cp)
94                                 ++cp;
95                         while (*cp == ' ')
96                                 ++cp;
97                 }
98         }
99
100         if (limit < mem_limit)
101                 mem_limit = limit;
102 }
103
104 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
105
106 static void __init setup_bootmem(void)
107 {
108         unsigned long bootmap_size;
109         unsigned long mem_max;
110         unsigned long bootmap_pages;
111         unsigned long bootmap_start_pfn;
112         unsigned long bootmap_pfn;
113 #ifndef CONFIG_DISCONTIGMEM
114         physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
115         int npmem_holes;
116 #endif
117         int i, sysram_resource_count;
118
119         disable_sr_hashing(); /* Turn off space register hashing */
120
121         /*
122          * Sort the ranges. Since the number of ranges is typically
123          * small, and performance is not an issue here, just do
124          * a simple insertion sort.
125          */
126
127         for (i = 1; i < npmem_ranges; i++) {
128                 int j;
129
130                 for (j = i; j > 0; j--) {
131                         unsigned long tmp;
132
133                         if (pmem_ranges[j-1].start_pfn <
134                             pmem_ranges[j].start_pfn) {
135
136                                 break;
137                         }
138                         tmp = pmem_ranges[j-1].start_pfn;
139                         pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
140                         pmem_ranges[j].start_pfn = tmp;
141                         tmp = pmem_ranges[j-1].pages;
142                         pmem_ranges[j-1].pages = pmem_ranges[j].pages;
143                         pmem_ranges[j].pages = tmp;
144                 }
145         }
146
147 #ifndef CONFIG_DISCONTIGMEM
148         /*
149          * Throw out ranges that are too far apart (controlled by
150          * MAX_GAP).
151          */
152
153         for (i = 1; i < npmem_ranges; i++) {
154                 if (pmem_ranges[i].start_pfn -
155                         (pmem_ranges[i-1].start_pfn +
156                          pmem_ranges[i-1].pages) > MAX_GAP) {
157                         npmem_ranges = i;
158                         printk("Large gap in memory detected (%ld pages). "
159                                "Consider turning on CONFIG_DISCONTIGMEM\n",
160                                pmem_ranges[i].start_pfn -
161                                (pmem_ranges[i-1].start_pfn +
162                                 pmem_ranges[i-1].pages));
163                         break;
164                 }
165         }
166 #endif
167
168         if (npmem_ranges > 1) {
169
170                 /* Print the memory ranges */
171
172                 printk(KERN_INFO "Memory Ranges:\n");
173
174                 for (i = 0; i < npmem_ranges; i++) {
175                         unsigned long start;
176                         unsigned long size;
177
178                         size = (pmem_ranges[i].pages << PAGE_SHIFT);
179                         start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
180                         printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
181                                 i,start, start + (size - 1), size >> 20);
182                 }
183         }
184
185         sysram_resource_count = npmem_ranges;
186         for (i = 0; i < sysram_resource_count; i++) {
187                 struct resource *res = &sysram_resources[i];
188                 res->name = "System RAM";
189                 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
190                 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
191                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
192                 request_resource(&iomem_resource, res);
193         }
194
195         /*
196          * For 32 bit kernels we limit the amount of memory we can
197          * support, in order to preserve enough kernel address space
198          * for other purposes. For 64 bit kernels we don't normally
199          * limit the memory, but this mechanism can be used to
200          * artificially limit the amount of memory (and it is written
201          * to work with multiple memory ranges).
202          */
203
204         mem_limit_func();       /* check for "mem=" argument */
205
206         mem_max = 0;
207         num_physpages = 0;
208         for (i = 0; i < npmem_ranges; i++) {
209                 unsigned long rsize;
210
211                 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
212                 if ((mem_max + rsize) > mem_limit) {
213                         printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
214                         if (mem_max == mem_limit)
215                                 npmem_ranges = i;
216                         else {
217                                 pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
218                                                        - (mem_max >> PAGE_SHIFT);
219                                 npmem_ranges = i + 1;
220                                 mem_max = mem_limit;
221                         }
222                 num_physpages += pmem_ranges[i].pages;
223                         break;
224                 }
225             num_physpages += pmem_ranges[i].pages;
226                 mem_max += rsize;
227         }
228
229         printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
230
231 #ifndef CONFIG_DISCONTIGMEM
232         /* Merge the ranges, keeping track of the holes */
233
234         {
235                 unsigned long end_pfn;
236                 unsigned long hole_pages;
237
238                 npmem_holes = 0;
239                 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
240                 for (i = 1; i < npmem_ranges; i++) {
241
242                         hole_pages = pmem_ranges[i].start_pfn - end_pfn;
243                         if (hole_pages) {
244                                 pmem_holes[npmem_holes].start_pfn = end_pfn;
245                                 pmem_holes[npmem_holes++].pages = hole_pages;
246                                 end_pfn += hole_pages;
247                         }
248                         end_pfn += pmem_ranges[i].pages;
249                 }
250
251                 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
252                 npmem_ranges = 1;
253         }
254 #endif
255
256         bootmap_pages = 0;
257         for (i = 0; i < npmem_ranges; i++)
258                 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
259
260         bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
261
262 #ifdef CONFIG_DISCONTIGMEM
263         for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
264                 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
265                 NODE_DATA(i)->bdata = &bmem_data[i];
266         }
267         memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
268
269         for (i = 0; i < npmem_ranges; i++)
270                 node_set_online(i);
271 #endif
272
273         /*
274          * Initialize and free the full range of memory in each range.
275          * Note that the only writing these routines do are to the bootmap,
276          * and we've made sure to locate the bootmap properly so that they
277          * won't be writing over anything important.
278          */
279
280         bootmap_pfn = bootmap_start_pfn;
281         max_pfn = 0;
282         for (i = 0; i < npmem_ranges; i++) {
283                 unsigned long start_pfn;
284                 unsigned long npages;
285
286                 start_pfn = pmem_ranges[i].start_pfn;
287                 npages = pmem_ranges[i].pages;
288
289                 bootmap_size = init_bootmem_node(NODE_DATA(i),
290                                                 bootmap_pfn,
291                                                 start_pfn,
292                                                 (start_pfn + npages) );
293                 free_bootmem_node(NODE_DATA(i),
294                                   (start_pfn << PAGE_SHIFT),
295                                   (npages << PAGE_SHIFT) );
296                 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
297                 if ((start_pfn + npages) > max_pfn)
298                         max_pfn = start_pfn + npages;
299         }
300
301         /* IOMMU is always used to access "high mem" on those boxes
302          * that can support enough mem that a PCI device couldn't
303          * directly DMA to any physical addresses.
304          * ISA DMA support will need to revisit this.
305          */
306         max_low_pfn = max_pfn;
307
308         if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
309                 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n");
310                 BUG();
311         }
312
313         /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
314
315 #define PDC_CONSOLE_IO_IODC_SIZE 32768
316
317         reserve_bootmem_node(NODE_DATA(0), 0UL,
318                         (unsigned long)(PAGE0->mem_free +
319                                 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
320         reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
321                         (unsigned long)(_end - _text), BOOTMEM_DEFAULT);
322         reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
323                         ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
324                         BOOTMEM_DEFAULT);
325
326 #ifndef CONFIG_DISCONTIGMEM
327
328         /* reserve the holes */
329
330         for (i = 0; i < npmem_holes; i++) {
331                 reserve_bootmem_node(NODE_DATA(0),
332                                 (pmem_holes[i].start_pfn << PAGE_SHIFT),
333                                 (pmem_holes[i].pages << PAGE_SHIFT),
334                                 BOOTMEM_DEFAULT);
335         }
336 #endif
337
338 #ifdef CONFIG_BLK_DEV_INITRD
339         if (initrd_start) {
340                 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
341                 if (__pa(initrd_start) < mem_max) {
342                         unsigned long initrd_reserve;
343
344                         if (__pa(initrd_end) > mem_max) {
345                                 initrd_reserve = mem_max - __pa(initrd_start);
346                         } else {
347                                 initrd_reserve = initrd_end - initrd_start;
348                         }
349                         initrd_below_start_ok = 1;
350                         printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
351
352                         reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
353                                         initrd_reserve, BOOTMEM_DEFAULT);
354                 }
355         }
356 #endif
357
358         data_resource.start =  virt_to_phys(&data_start);
359         data_resource.end = virt_to_phys(_end) - 1;
360         code_resource.start = virt_to_phys(_text);
361         code_resource.end = virt_to_phys(&data_start)-1;
362
363         /* We don't know which region the kernel will be in, so try
364          * all of them.
365          */
366         for (i = 0; i < sysram_resource_count; i++) {
367                 struct resource *res = &sysram_resources[i];
368                 request_resource(res, &code_resource);
369                 request_resource(res, &data_resource);
370         }
371         request_resource(&sysram_resources[0], &pdcdata_resource);
372 }
373
374 void free_initmem(void)
375 {
376         unsigned long addr, init_begin, init_end;
377
378         printk(KERN_INFO "Freeing unused kernel memory: ");
379
380 #ifdef CONFIG_DEBUG_KERNEL
381         /* Attempt to catch anyone trying to execute code here
382          * by filling the page with BRK insns.
383          * 
384          * If we disable interrupts for all CPUs, then IPI stops working.
385          * Kinda breaks the global cache flushing.
386          */
387         local_irq_disable();
388
389         memset(__init_begin, 0x00,
390                 (unsigned long)__init_end - (unsigned long)__init_begin);
391
392         flush_data_cache();
393         asm volatile("sync" : : );
394         flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
395         asm volatile("sync" : : );
396
397         local_irq_enable();
398 #endif
399         
400         /* align __init_begin and __init_end to page size,
401            ignoring linker script where we might have tried to save RAM */
402         init_begin = PAGE_ALIGN((unsigned long)(__init_begin));
403         init_end   = PAGE_ALIGN((unsigned long)(__init_end));
404         for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
405                 ClearPageReserved(virt_to_page(addr));
406                 init_page_count(virt_to_page(addr));
407                 free_page(addr);
408                 num_physpages++;
409                 totalram_pages++;
410         }
411
412         /* set up a new led state on systems shipped LED State panel */
413         pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
414         
415         printk("%luk freed\n", (init_end - init_begin) >> 10);
416 }
417
418
419 #ifdef CONFIG_DEBUG_RODATA
420 void mark_rodata_ro(void)
421 {
422         /* rodata memory was already mapped with KERNEL_RO access rights by
423            pagetable_init() and map_pages(). No need to do additional stuff here */
424         printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
425                 (unsigned long)(__end_rodata - __start_rodata) >> 10);
426 }
427 #endif
428
429
430 /*
431  * Just an arbitrary offset to serve as a "hole" between mapping areas
432  * (between top of physical memory and a potential pcxl dma mapping
433  * area, and below the vmalloc mapping area).
434  *
435  * The current 32K value just means that there will be a 32K "hole"
436  * between mapping areas. That means that  any out-of-bounds memory
437  * accesses will hopefully be caught. The vmalloc() routines leaves
438  * a hole of 4kB between each vmalloced area for the same reason.
439  */
440
441  /* Leave room for gateway page expansion */
442 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
443 #error KERNEL_MAP_START is in gateway reserved region
444 #endif
445 #define MAP_START (KERNEL_MAP_START)
446
447 #define VM_MAP_OFFSET  (32*1024)
448 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
449                                      & ~(VM_MAP_OFFSET-1)))
450
451 void *vmalloc_start __read_mostly;
452 EXPORT_SYMBOL(vmalloc_start);
453
454 #ifdef CONFIG_PA11
455 unsigned long pcxl_dma_start __read_mostly;
456 #endif
457
458 void __init mem_init(void)
459 {
460         int codesize, reservedpages, datasize, initsize;
461
462         high_memory = __va((max_pfn << PAGE_SHIFT));
463
464 #ifndef CONFIG_DISCONTIGMEM
465         max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
466         totalram_pages += free_all_bootmem();
467 #else
468         {
469                 int i;
470
471                 for (i = 0; i < npmem_ranges; i++)
472                         totalram_pages += free_all_bootmem_node(NODE_DATA(i));
473         }
474 #endif
475
476         codesize = (unsigned long)_etext - (unsigned long)_text;
477         datasize = (unsigned long)_edata - (unsigned long)_etext;
478         initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
479
480         reservedpages = 0;
481 {
482         unsigned long pfn;
483 #ifdef CONFIG_DISCONTIGMEM
484         int i;
485
486         for (i = 0; i < npmem_ranges; i++) {
487                 for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) {
488                         if (PageReserved(pfn_to_page(pfn)))
489                                 reservedpages++;
490                 }
491         }
492 #else /* !CONFIG_DISCONTIGMEM */
493         for (pfn = 0; pfn < max_pfn; pfn++) {
494                 /*
495                  * Only count reserved RAM pages
496                  */
497                 if (PageReserved(pfn_to_page(pfn)))
498                         reservedpages++;
499         }
500 #endif
501 }
502
503 #ifdef CONFIG_PA11
504         if (hppa_dma_ops == &pcxl_dma_ops) {
505                 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
506                 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
507         } else {
508                 pcxl_dma_start = 0;
509                 vmalloc_start = SET_MAP_OFFSET(MAP_START);
510         }
511 #else
512         vmalloc_start = SET_MAP_OFFSET(MAP_START);
513 #endif
514
515         printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
516                 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
517                 num_physpages << (PAGE_SHIFT-10),
518                 codesize >> 10,
519                 reservedpages << (PAGE_SHIFT-10),
520                 datasize >> 10,
521                 initsize >> 10
522         );
523
524 #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
525         printk("virtual kernel memory layout:\n"
526                "    vmalloc : 0x%p - 0x%p   (%4ld MB)\n"
527                "    memory  : 0x%p - 0x%p   (%4ld MB)\n"
528                "      .init : 0x%p - 0x%p   (%4ld kB)\n"
529                "      .data : 0x%p - 0x%p   (%4ld kB)\n"
530                "      .text : 0x%p - 0x%p   (%4ld kB)\n",
531
532                (void*)VMALLOC_START, (void*)VMALLOC_END,
533                (VMALLOC_END - VMALLOC_START) >> 20,
534
535                __va(0), high_memory,
536                ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
537
538                __init_begin, __init_end,
539                ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
540
541                _etext, _edata,
542                ((unsigned long)_edata - (unsigned long)_etext) >> 10,
543
544                _text, _etext,
545                ((unsigned long)_etext - (unsigned long)_text) >> 10);
546 #endif
547 }
548
549 unsigned long *empty_zero_page __read_mostly;
550
551 void show_mem(void)
552 {
553         int i,free = 0,total = 0,reserved = 0;
554         int shared = 0, cached = 0;
555
556         printk(KERN_INFO "Mem-info:\n");
557         show_free_areas();
558         printk(KERN_INFO "Free swap:     %6ldkB\n",
559                                 nr_swap_pages<<(PAGE_SHIFT-10));
560 #ifndef CONFIG_DISCONTIGMEM
561         i = max_mapnr;
562         while (i-- > 0) {
563                 total++;
564                 if (PageReserved(mem_map+i))
565                         reserved++;
566                 else if (PageSwapCache(mem_map+i))
567                         cached++;
568                 else if (!page_count(&mem_map[i]))
569                         free++;
570                 else
571                         shared += page_count(&mem_map[i]) - 1;
572         }
573 #else
574         for (i = 0; i < npmem_ranges; i++) {
575                 int j;
576
577                 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
578                         struct page *p;
579                         unsigned long flags;
580
581                         pgdat_resize_lock(NODE_DATA(i), &flags);
582                         p = nid_page_nr(i, j) - node_start_pfn(i);
583
584                         total++;
585                         if (PageReserved(p))
586                                 reserved++;
587                         else if (PageSwapCache(p))
588                                 cached++;
589                         else if (!page_count(p))
590                                 free++;
591                         else
592                                 shared += page_count(p) - 1;
593                         pgdat_resize_unlock(NODE_DATA(i), &flags);
594                 }
595         }
596 #endif
597         printk(KERN_INFO "%d pages of RAM\n", total);
598         printk(KERN_INFO "%d reserved pages\n", reserved);
599         printk(KERN_INFO "%d pages shared\n", shared);
600         printk(KERN_INFO "%d pages swap cached\n", cached);
601
602
603 #ifdef CONFIG_DISCONTIGMEM
604         {
605                 struct zonelist *zl;
606                 int i, j, k;
607
608                 for (i = 0; i < npmem_ranges; i++) {
609                         for (j = 0; j < MAX_NR_ZONES; j++) {
610                                 zl = NODE_DATA(i)->node_zonelists + j;
611
612                                 printk("Zone list for zone %d on node %d: ", j, i);
613                                 for (k = 0; zl->zones[k] != NULL; k++) 
614                                         printk("[%d/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name);
615                                 printk("\n");
616                         }
617                 }
618         }
619 #endif
620 }
621
622
623 static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
624 {
625         pgd_t *pg_dir;
626         pmd_t *pmd;
627         pte_t *pg_table;
628         unsigned long end_paddr;
629         unsigned long start_pmd;
630         unsigned long start_pte;
631         unsigned long tmp1;
632         unsigned long tmp2;
633         unsigned long address;
634         unsigned long ro_start;
635         unsigned long ro_end;
636         unsigned long fv_addr;
637         unsigned long gw_addr;
638         extern const unsigned long fault_vector_20;
639         extern void * const linux_gateway_page;
640
641         ro_start = __pa((unsigned long)_text);
642         ro_end   = __pa((unsigned long)&data_start);
643         fv_addr  = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
644         gw_addr  = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
645
646         end_paddr = start_paddr + size;
647
648         pg_dir = pgd_offset_k(start_vaddr);
649
650 #if PTRS_PER_PMD == 1
651         start_pmd = 0;
652 #else
653         start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
654 #endif
655         start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
656
657         address = start_paddr;
658         while (address < end_paddr) {
659 #if PTRS_PER_PMD == 1
660                 pmd = (pmd_t *)__pa(pg_dir);
661 #else
662                 pmd = (pmd_t *)pgd_address(*pg_dir);
663
664                 /*
665                  * pmd is physical at this point
666                  */
667
668                 if (!pmd) {
669                         pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
670                         pmd = (pmd_t *) __pa(pmd);
671                 }
672
673                 pgd_populate(NULL, pg_dir, __va(pmd));
674 #endif
675                 pg_dir++;
676
677                 /* now change pmd to kernel virtual addresses */
678
679                 pmd = (pmd_t *)__va(pmd) + start_pmd;
680                 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
681
682                         /*
683                          * pg_table is physical at this point
684                          */
685
686                         pg_table = (pte_t *)pmd_address(*pmd);
687                         if (!pg_table) {
688                                 pg_table = (pte_t *)
689                                         alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
690                                 pg_table = (pte_t *) __pa(pg_table);
691                         }
692
693                         pmd_populate_kernel(NULL, pmd, __va(pg_table));
694
695                         /* now change pg_table to kernel virtual addresses */
696
697                         pg_table = (pte_t *) __va(pg_table) + start_pte;
698                         for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
699                                 pte_t pte;
700
701                                 /*
702                                  * Map the fault vector writable so we can
703                                  * write the HPMC checksum.
704                                  */
705 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
706                                 if (address >= ro_start && address < ro_end
707                                                         && address != fv_addr
708                                                         && address != gw_addr)
709                                     pte = __mk_pte(address, PAGE_KERNEL_RO);
710                                 else
711 #endif
712                                     pte = __mk_pte(address, pgprot);
713
714                                 if (address >= end_paddr)
715                                         pte_val(pte) = 0;
716
717                                 set_pte(pg_table, pte);
718
719                                 address += PAGE_SIZE;
720                         }
721                         start_pte = 0;
722
723                         if (address >= end_paddr)
724                             break;
725                 }
726                 start_pmd = 0;
727         }
728 }
729
730 /*
731  * pagetable_init() sets up the page tables
732  *
733  * Note that gateway_init() places the Linux gateway page at page 0.
734  * Since gateway pages cannot be dereferenced this has the desirable
735  * side effect of trapping those pesky NULL-reference errors in the
736  * kernel.
737  */
738 static void __init pagetable_init(void)
739 {
740         int range;
741
742         /* Map each physical memory range to its kernel vaddr */
743
744         for (range = 0; range < npmem_ranges; range++) {
745                 unsigned long start_paddr;
746                 unsigned long end_paddr;
747                 unsigned long size;
748
749                 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
750                 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
751                 size = pmem_ranges[range].pages << PAGE_SHIFT;
752
753                 map_pages((unsigned long)__va(start_paddr), start_paddr,
754                         size, PAGE_KERNEL);
755         }
756
757 #ifdef CONFIG_BLK_DEV_INITRD
758         if (initrd_end && initrd_end > mem_limit) {
759                 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
760                 map_pages(initrd_start, __pa(initrd_start),
761                         initrd_end - initrd_start, PAGE_KERNEL);
762         }
763 #endif
764
765         empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
766         memset(empty_zero_page, 0, PAGE_SIZE);
767 }
768
769 static void __init gateway_init(void)
770 {
771         unsigned long linux_gateway_page_addr;
772         /* FIXME: This is 'const' in order to trick the compiler
773            into not treating it as DP-relative data. */
774         extern void * const linux_gateway_page;
775
776         linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
777
778         /*
779          * Setup Linux Gateway page.
780          *
781          * The Linux gateway page will reside in kernel space (on virtual
782          * page 0), so it doesn't need to be aliased into user space.
783          */
784
785         map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
786                 PAGE_SIZE, PAGE_GATEWAY);
787 }
788
789 #ifdef CONFIG_HPUX
790 void
791 map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
792 {
793         pgd_t *pg_dir;
794         pmd_t *pmd;
795         pte_t *pg_table;
796         unsigned long start_pmd;
797         unsigned long start_pte;
798         unsigned long address;
799         unsigned long hpux_gw_page_addr;
800         /* FIXME: This is 'const' in order to trick the compiler
801            into not treating it as DP-relative data. */
802         extern void * const hpux_gateway_page;
803
804         hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
805
806         /*
807          * Setup HP-UX Gateway page.
808          *
809          * The HP-UX gateway page resides in the user address space,
810          * so it needs to be aliased into each process.
811          */
812
813         pg_dir = pgd_offset(mm,hpux_gw_page_addr);
814
815 #if PTRS_PER_PMD == 1
816         start_pmd = 0;
817 #else
818         start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
819 #endif
820         start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
821
822         address = __pa(&hpux_gateway_page);
823 #if PTRS_PER_PMD == 1
824         pmd = (pmd_t *)__pa(pg_dir);
825 #else
826         pmd = (pmd_t *) pgd_address(*pg_dir);
827
828         /*
829          * pmd is physical at this point
830          */
831
832         if (!pmd) {
833                 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
834                 pmd = (pmd_t *) __pa(pmd);
835         }
836
837         __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
838 #endif
839         /* now change pmd to kernel virtual addresses */
840
841         pmd = (pmd_t *)__va(pmd) + start_pmd;
842
843         /*
844          * pg_table is physical at this point
845          */
846
847         pg_table = (pte_t *) pmd_address(*pmd);
848         if (!pg_table)
849                 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
850
851         __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
852
853         /* now change pg_table to kernel virtual addresses */
854
855         pg_table = (pte_t *) __va(pg_table) + start_pte;
856         set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
857 }
858 EXPORT_SYMBOL(map_hpux_gateway_page);
859 #endif
860
861 void __init paging_init(void)
862 {
863         int i;
864
865         setup_bootmem();
866         pagetable_init();
867         gateway_init();
868         flush_cache_all_local(); /* start with known state */
869         flush_tlb_all_local(NULL);
870
871         for (i = 0; i < npmem_ranges; i++) {
872                 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
873
874                 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
875
876 #ifdef CONFIG_DISCONTIGMEM
877                 /* Need to initialize the pfnnid_map before we can initialize
878                    the zone */
879                 {
880                     int j;
881                     for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
882                          j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
883                          j++) {
884                         pfnnid_map[j] = i;
885                     }
886                 }
887 #endif
888
889                 free_area_init_node(i, NODE_DATA(i), zones_size,
890                                 pmem_ranges[i].start_pfn, NULL);
891         }
892 }
893
894 #ifdef CONFIG_PA20
895
896 /*
897  * Currently, all PA20 chips have 18 bit protection IDs, which is the
898  * limiting factor (space ids are 32 bits).
899  */
900
901 #define NR_SPACE_IDS 262144
902
903 #else
904
905 /*
906  * Currently we have a one-to-one relationship between space IDs and
907  * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
908  * support 15 bit protection IDs, so that is the limiting factor.
909  * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
910  * probably not worth the effort for a special case here.
911  */
912
913 #define NR_SPACE_IDS 32768
914
915 #endif  /* !CONFIG_PA20 */
916
917 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
918 #define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
919
920 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
921 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
922 static unsigned long space_id_index;
923 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
924 static unsigned long dirty_space_ids = 0;
925
926 static DEFINE_SPINLOCK(sid_lock);
927
928 unsigned long alloc_sid(void)
929 {
930         unsigned long index;
931
932         spin_lock(&sid_lock);
933
934         if (free_space_ids == 0) {
935                 if (dirty_space_ids != 0) {
936                         spin_unlock(&sid_lock);
937                         flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
938                         spin_lock(&sid_lock);
939                 }
940                 BUG_ON(free_space_ids == 0);
941         }
942
943         free_space_ids--;
944
945         index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
946         space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
947         space_id_index = index;
948
949         spin_unlock(&sid_lock);
950
951         return index << SPACEID_SHIFT;
952 }
953
954 void free_sid(unsigned long spaceid)
955 {
956         unsigned long index = spaceid >> SPACEID_SHIFT;
957         unsigned long *dirty_space_offset;
958
959         dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
960         index &= (BITS_PER_LONG - 1);
961
962         spin_lock(&sid_lock);
963
964         BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
965
966         *dirty_space_offset |= (1L << index);
967         dirty_space_ids++;
968
969         spin_unlock(&sid_lock);
970 }
971
972
973 #ifdef CONFIG_SMP
974 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
975 {
976         int i;
977
978         /* NOTE: sid_lock must be held upon entry */
979
980         *ndirtyptr = dirty_space_ids;
981         if (dirty_space_ids != 0) {
982             for (i = 0; i < SID_ARRAY_SIZE; i++) {
983                 dirty_array[i] = dirty_space_id[i];
984                 dirty_space_id[i] = 0;
985             }
986             dirty_space_ids = 0;
987         }
988
989         return;
990 }
991
992 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
993 {
994         int i;
995
996         /* NOTE: sid_lock must be held upon entry */
997
998         if (ndirty != 0) {
999                 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1000                         space_id[i] ^= dirty_array[i];
1001                 }
1002
1003                 free_space_ids += ndirty;
1004                 space_id_index = 0;
1005         }
1006 }
1007
1008 #else /* CONFIG_SMP */
1009
1010 static void recycle_sids(void)
1011 {
1012         int i;
1013
1014         /* NOTE: sid_lock must be held upon entry */
1015
1016         if (dirty_space_ids != 0) {
1017                 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1018                         space_id[i] ^= dirty_space_id[i];
1019                         dirty_space_id[i] = 0;
1020                 }
1021
1022                 free_space_ids += dirty_space_ids;
1023                 dirty_space_ids = 0;
1024                 space_id_index = 0;
1025         }
1026 }
1027 #endif
1028
1029 /*
1030  * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
1031  * purged, we can safely reuse the space ids that were released but
1032  * not flushed from the tlb.
1033  */
1034
1035 #ifdef CONFIG_SMP
1036
1037 static unsigned long recycle_ndirty;
1038 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
1039 static unsigned int recycle_inuse;
1040
1041 void flush_tlb_all(void)
1042 {
1043         int do_recycle;
1044
1045         do_recycle = 0;
1046         spin_lock(&sid_lock);
1047         if (dirty_space_ids > RECYCLE_THRESHOLD) {
1048             BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
1049             get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
1050             recycle_inuse++;
1051             do_recycle++;
1052         }
1053         spin_unlock(&sid_lock);
1054         on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
1055         if (do_recycle) {
1056             spin_lock(&sid_lock);
1057             recycle_sids(recycle_ndirty,recycle_dirty_array);
1058             recycle_inuse = 0;
1059             spin_unlock(&sid_lock);
1060         }
1061 }
1062 #else
1063 void flush_tlb_all(void)
1064 {
1065         spin_lock(&sid_lock);
1066         flush_tlb_all_local(NULL);
1067         recycle_sids();
1068         spin_unlock(&sid_lock);
1069 }
1070 #endif
1071
1072 #ifdef CONFIG_BLK_DEV_INITRD
1073 void free_initrd_mem(unsigned long start, unsigned long end)
1074 {
1075         if (start >= end)
1076                 return;
1077         printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1078         for (; start < end; start += PAGE_SIZE) {
1079                 ClearPageReserved(virt_to_page(start));
1080                 init_page_count(virt_to_page(start));
1081                 free_page(start);
1082                 num_physpages++;
1083                 totalram_pages++;
1084         }
1085 }
1086 #endif