[SPARC64]: Rewrite bootup sequence.
[safe/jmp/linux-2.6] / arch / sparc64 / mm / init.c
1 /*  $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2  *  arch/sparc64/mm/init.c
3  *
4  *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5  *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7  
8 #include <linux/config.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/slab.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
24
25 #include <asm/head.h>
26 #include <asm/system.h>
27 #include <asm/page.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/oplib.h>
31 #include <asm/iommu.h>
32 #include <asm/io.h>
33 #include <asm/uaccess.h>
34 #include <asm/mmu_context.h>
35 #include <asm/tlbflush.h>
36 #include <asm/dma.h>
37 #include <asm/starfire.h>
38 #include <asm/tlb.h>
39 #include <asm/spitfire.h>
40 #include <asm/sections.h>
41
42 extern void device_scan(void);
43
44 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
45
46 unsigned long *sparc64_valid_addr_bitmap __read_mostly;
47
48 /* Ugly, but necessary... -DaveM */
49 unsigned long phys_base __read_mostly;
50 unsigned long kern_base __read_mostly;
51 unsigned long kern_size __read_mostly;
52 unsigned long pfn_base __read_mostly;
53
54 /* get_new_mmu_context() uses "cache + 1".  */
55 DEFINE_SPINLOCK(ctx_alloc_lock);
56 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
57 #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
58 unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
59
60 /* References to special section boundaries */
61 extern char  _start[], _end[];
62
63 /* Initial ramdisk setup */
64 extern unsigned long sparc_ramdisk_image64;
65 extern unsigned int sparc_ramdisk_image;
66 extern unsigned int sparc_ramdisk_size;
67
68 struct page *mem_map_zero __read_mostly;
69
70 int bigkernel = 0;
71
72 /* XXX Tune this... */
73 #define PGT_CACHE_LOW   25
74 #define PGT_CACHE_HIGH  50
75
76 void check_pgt_cache(void)
77 {
78         preempt_disable();
79         if (pgtable_cache_size > PGT_CACHE_HIGH) {
80                 do {
81                         if (pgd_quicklist)
82                                 free_pgd_slow(get_pgd_fast());
83                         if (pte_quicklist[0])
84                                 free_pte_slow(pte_alloc_one_fast(NULL, 0));
85                         if (pte_quicklist[1])
86                                 free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
87                 } while (pgtable_cache_size > PGT_CACHE_LOW);
88         }
89         preempt_enable();
90 }
91
92 #ifdef CONFIG_DEBUG_DCFLUSH
93 atomic_t dcpage_flushes = ATOMIC_INIT(0);
94 #ifdef CONFIG_SMP
95 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
96 #endif
97 #endif
98
99 __inline__ void flush_dcache_page_impl(struct page *page)
100 {
101 #ifdef CONFIG_DEBUG_DCFLUSH
102         atomic_inc(&dcpage_flushes);
103 #endif
104
105 #ifdef DCACHE_ALIASING_POSSIBLE
106         __flush_dcache_page(page_address(page),
107                             ((tlb_type == spitfire) &&
108                              page_mapping(page) != NULL));
109 #else
110         if (page_mapping(page) != NULL &&
111             tlb_type == spitfire)
112                 __flush_icache_page(__pa(page_address(page)));
113 #endif
114 }
115
116 #define PG_dcache_dirty         PG_arch_1
117 #define PG_dcache_cpu_shift     24
118 #define PG_dcache_cpu_mask      (256 - 1)
119
120 #if NR_CPUS > 256
121 #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
122 #endif
123
124 #define dcache_dirty_cpu(page) \
125         (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
126
127 static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
128 {
129         unsigned long mask = this_cpu;
130         unsigned long non_cpu_bits;
131
132         non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
133         mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
134
135         __asm__ __volatile__("1:\n\t"
136                              "ldx       [%2], %%g7\n\t"
137                              "and       %%g7, %1, %%g1\n\t"
138                              "or        %%g1, %0, %%g1\n\t"
139                              "casx      [%2], %%g7, %%g1\n\t"
140                              "cmp       %%g7, %%g1\n\t"
141                              "membar    #StoreLoad | #StoreStore\n\t"
142                              "bne,pn    %%xcc, 1b\n\t"
143                              " nop"
144                              : /* no outputs */
145                              : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
146                              : "g1", "g7");
147 }
148
149 static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
150 {
151         unsigned long mask = (1UL << PG_dcache_dirty);
152
153         __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
154                              "1:\n\t"
155                              "ldx       [%2], %%g7\n\t"
156                              "srlx      %%g7, %4, %%g1\n\t"
157                              "and       %%g1, %3, %%g1\n\t"
158                              "cmp       %%g1, %0\n\t"
159                              "bne,pn    %%icc, 2f\n\t"
160                              " andn     %%g7, %1, %%g1\n\t"
161                              "casx      [%2], %%g7, %%g1\n\t"
162                              "cmp       %%g7, %%g1\n\t"
163                              "membar    #StoreLoad | #StoreStore\n\t"
164                              "bne,pn    %%xcc, 1b\n\t"
165                              " nop\n"
166                              "2:"
167                              : /* no outputs */
168                              : "r" (cpu), "r" (mask), "r" (&page->flags),
169                                "i" (PG_dcache_cpu_mask),
170                                "i" (PG_dcache_cpu_shift)
171                              : "g1", "g7");
172 }
173
174 extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
175
176 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
177 {
178         struct page *page;
179         unsigned long pfn;
180         unsigned long pg_flags;
181
182         pfn = pte_pfn(pte);
183         if (pfn_valid(pfn) &&
184             (page = pfn_to_page(pfn), page_mapping(page)) &&
185             ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
186                 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
187                            PG_dcache_cpu_mask);
188                 int this_cpu = get_cpu();
189
190                 /* This is just to optimize away some function calls
191                  * in the SMP case.
192                  */
193                 if (cpu == this_cpu)
194                         flush_dcache_page_impl(page);
195                 else
196                         smp_flush_dcache_page_impl(page, cpu);
197
198                 clear_dcache_dirty_cpu(page, cpu);
199
200                 put_cpu();
201         }
202
203         if (get_thread_fault_code())
204                 __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
205                                    address, pte, get_thread_fault_code());
206 }
207
208 void flush_dcache_page(struct page *page)
209 {
210         struct address_space *mapping;
211         int this_cpu;
212
213         /* Do not bother with the expensive D-cache flush if it
214          * is merely the zero page.  The 'bigcore' testcase in GDB
215          * causes this case to run millions of times.
216          */
217         if (page == ZERO_PAGE(0))
218                 return;
219
220         this_cpu = get_cpu();
221
222         mapping = page_mapping(page);
223         if (mapping && !mapping_mapped(mapping)) {
224                 int dirty = test_bit(PG_dcache_dirty, &page->flags);
225                 if (dirty) {
226                         int dirty_cpu = dcache_dirty_cpu(page);
227
228                         if (dirty_cpu == this_cpu)
229                                 goto out;
230                         smp_flush_dcache_page_impl(page, dirty_cpu);
231                 }
232                 set_dcache_dirty(page, this_cpu);
233         } else {
234                 /* We could delay the flush for the !page_mapping
235                  * case too.  But that case is for exec env/arg
236                  * pages and those are %99 certainly going to get
237                  * faulted into the tlb (and thus flushed) anyways.
238                  */
239                 flush_dcache_page_impl(page);
240         }
241
242 out:
243         put_cpu();
244 }
245
246 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
247 {
248         /* Cheetah has coherent I-cache. */
249         if (tlb_type == spitfire) {
250                 unsigned long kaddr;
251
252                 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
253                         __flush_icache_page(__get_phys(kaddr));
254         }
255 }
256
257 unsigned long page_to_pfn(struct page *page)
258 {
259         return (unsigned long) ((page - mem_map) + pfn_base);
260 }
261
262 struct page *pfn_to_page(unsigned long pfn)
263 {
264         return (mem_map + (pfn - pfn_base));
265 }
266
267 void show_mem(void)
268 {
269         printk("Mem-info:\n");
270         show_free_areas();
271         printk("Free swap:       %6ldkB\n",
272                nr_swap_pages << (PAGE_SHIFT-10));
273         printk("%ld pages of RAM\n", num_physpages);
274         printk("%d free pages\n", nr_free_pages());
275         printk("%d pages in page table cache\n",pgtable_cache_size);
276 }
277
278 void mmu_info(struct seq_file *m)
279 {
280         if (tlb_type == cheetah)
281                 seq_printf(m, "MMU Type\t: Cheetah\n");
282         else if (tlb_type == cheetah_plus)
283                 seq_printf(m, "MMU Type\t: Cheetah+\n");
284         else if (tlb_type == spitfire)
285                 seq_printf(m, "MMU Type\t: Spitfire\n");
286         else
287                 seq_printf(m, "MMU Type\t: ???\n");
288
289 #ifdef CONFIG_DEBUG_DCFLUSH
290         seq_printf(m, "DCPageFlushes\t: %d\n",
291                    atomic_read(&dcpage_flushes));
292 #ifdef CONFIG_SMP
293         seq_printf(m, "DCPageFlushesXC\t: %d\n",
294                    atomic_read(&dcpage_flushes_xcall));
295 #endif /* CONFIG_SMP */
296 #endif /* CONFIG_DEBUG_DCFLUSH */
297 }
298
299 struct linux_prom_translation {
300         unsigned long virt;
301         unsigned long size;
302         unsigned long data;
303 };
304 static struct linux_prom_translation prom_trans[512] __initdata;
305
306 extern unsigned long prom_boot_page;
307 extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
308 extern int prom_get_mmu_ihandle(void);
309 extern void register_prom_callbacks(void);
310
311 /* Exported for SMP bootup purposes. */
312 unsigned long kern_locked_tte_data;
313
314 /* Exported for kernel TLB miss handling in ktlb.S */
315 unsigned long prom_pmd_phys __read_mostly;
316 unsigned int swapper_pgd_zero __read_mostly;
317
318 /* Allocate power-of-2 aligned chunks from the end of the
319  * kernel image.  Return physical address.
320  */
321 static inline unsigned long early_alloc_phys(unsigned long size)
322 {
323         unsigned long base;
324
325         BUILD_BUG_ON(size & (size - 1));
326
327         kern_size = (kern_size + (size - 1)) & ~(size - 1);
328         base = kern_base + kern_size;
329         kern_size += size;
330
331         return base;
332 }
333
334 static inline unsigned long load_phys32(unsigned long pa)
335 {
336         unsigned long val;
337
338         __asm__ __volatile__("lduwa     [%1] %2, %0"
339                              : "=&r" (val)
340                              : "r" (pa), "i" (ASI_PHYS_USE_EC));
341
342         return val;
343 }
344
345 static inline unsigned long load_phys64(unsigned long pa)
346 {
347         unsigned long val;
348
349         __asm__ __volatile__("ldxa      [%1] %2, %0"
350                              : "=&r" (val)
351                              : "r" (pa), "i" (ASI_PHYS_USE_EC));
352
353         return val;
354 }
355
356 static inline void store_phys32(unsigned long pa, unsigned long val)
357 {
358         __asm__ __volatile__("stwa      %0, [%1] %2"
359                              : /* no outputs */
360                              : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
361 }
362
363 static inline void store_phys64(unsigned long pa, unsigned long val)
364 {
365         __asm__ __volatile__("stxa      %0, [%1] %2"
366                              : /* no outputs */
367                              : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
368 }
369
370 #define BASE_PAGE_SIZE 8192
371
372 /*
373  * Translate PROM's mapping we capture at boot time into physical address.
374  * The second parameter is only set from prom_callback() invocations.
375  */
376 unsigned long prom_virt_to_phys(unsigned long promva, int *error)
377 {
378         unsigned long pmd_phys = (prom_pmd_phys +
379                                   ((promva >> 23) & 0x7ff) * sizeof(pmd_t));
380         unsigned long pte_phys;
381         pmd_t pmd_ent;
382         pte_t pte_ent;
383         unsigned long base;
384
385         pmd_val(pmd_ent) = load_phys32(pmd_phys);
386         if (pmd_none(pmd_ent)) {
387                 if (error)
388                         *error = 1;
389                 return 0;
390         }
391
392         pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
393         pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t);
394         pte_val(pte_ent) = load_phys64(pte_phys);
395         if (!pte_present(pte_ent)) {
396                 if (error)
397                         *error = 1;
398                 return 0;
399         }
400         if (error) {
401                 *error = 0;
402                 return pte_val(pte_ent);
403         }
404         base = pte_val(pte_ent) & _PAGE_PADDR;
405         return (base + (promva & (BASE_PAGE_SIZE - 1)));
406 }
407
408 /* The obp translations are saved based on 8k pagesize, since obp can
409  * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
410  * HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte
411  * scheme (also, see rant in inherit_locked_prom_mappings()).
412  */
413 static void build_obp_range(unsigned long start, unsigned long end, unsigned long data)
414 {
415         unsigned long vaddr;
416
417         for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
418                 unsigned long val, pte_phys, pmd_phys;
419                 pmd_t pmd_ent;
420                 int i;
421
422                 pmd_phys = (prom_pmd_phys +
423                             (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t)));
424                 pmd_val(pmd_ent) = load_phys32(pmd_phys);
425                 if (pmd_none(pmd_ent)) {
426                         pte_phys = early_alloc_phys(BASE_PAGE_SIZE);
427
428                         for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++)
429                                 store_phys64(pte_phys+i*sizeof(pte_t),0);
430
431                         pmd_val(pmd_ent) = pte_phys >> 11UL;
432                         store_phys32(pmd_phys, pmd_val(pmd_ent));
433                 }
434
435                 pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
436                 pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t));
437
438                 val = data;
439
440                 /* Clear diag TTE bits. */
441                 if (tlb_type == spitfire)
442                         val &= ~0x0003fe0000000000UL;
443
444                 store_phys64(pte_phys, val | _PAGE_MODIFIED);
445
446                 data += BASE_PAGE_SIZE;
447         }
448 }
449
450 static inline int in_obp_range(unsigned long vaddr)
451 {
452         return (vaddr >= LOW_OBP_ADDRESS &&
453                 vaddr < HI_OBP_ADDRESS);
454 }
455
456 #define OBP_PMD_SIZE 2048
457 static void build_obp_pgtable(int prom_trans_ents)
458 {
459         unsigned long i;
460
461         prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE);
462         for (i = 0; i < OBP_PMD_SIZE; i += 4)
463                 store_phys32(prom_pmd_phys + i, 0);
464
465         for (i = 0; i < prom_trans_ents; i++) {
466                 unsigned long start, end;
467
468                 if (!in_obp_range(prom_trans[i].virt))
469                         continue;
470
471                 start = prom_trans[i].virt;
472                 end = start + prom_trans[i].size;
473                 if (end > HI_OBP_ADDRESS)
474                         end = HI_OBP_ADDRESS;
475
476                 build_obp_range(start, end, prom_trans[i].data);
477         }
478 }
479
480 /* Read OBP translations property into 'prom_trans[]'.
481  * Return the number of entries.
482  */
483 static int read_obp_translations(void)
484 {
485         int n, node;
486
487         node = prom_finddevice("/virtual-memory");
488         n = prom_getproplen(node, "translations");
489         if (unlikely(n == 0 || n == -1)) {
490                 prom_printf("prom_mappings: Couldn't get size.\n");
491                 prom_halt();
492         }
493         if (unlikely(n > sizeof(prom_trans))) {
494                 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
495                 prom_halt();
496         }
497
498         if ((n = prom_getproperty(node, "translations",
499                                   (char *)&prom_trans[0],
500                                   sizeof(prom_trans))) == -1) {
501                 prom_printf("prom_mappings: Couldn't get property.\n");
502                 prom_halt();
503         }
504         n = n / sizeof(struct linux_prom_translation);
505         return n;
506 }
507
508 static void remap_kernel(void)
509 {
510         unsigned long phys_page, tte_vaddr, tte_data;
511         int tlb_ent = sparc64_highest_locked_tlbent();
512
513         tte_vaddr = (unsigned long) KERNBASE;
514         phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
515         tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
516                                  _PAGE_CP | _PAGE_CV | _PAGE_P |
517                                  _PAGE_L | _PAGE_W));
518
519         kern_locked_tte_data = tte_data;
520
521         /* Now lock us into the TLBs via OBP. */
522         prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
523         prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
524         if (bigkernel) {
525                 prom_dtlb_load(tlb_ent - 1,
526                                tte_data + 0x400000, 
527                                tte_vaddr + 0x400000);
528                 prom_itlb_load(tlb_ent - 1,
529                                tte_data + 0x400000, 
530                                tte_vaddr + 0x400000);
531         }
532 }
533
534 static void inherit_prom_mappings(void)
535 {
536         int n;
537
538         n = read_obp_translations();
539         build_obp_pgtable(n);
540
541         /* Now fixup OBP's idea about where we really are mapped. */
542         prom_printf("Remapping the kernel... ");
543         remap_kernel();
544
545         prom_printf("done.\n");
546
547         register_prom_callbacks();
548 }
549
550 /* The OBP specifications for sun4u mark 0xfffffffc00000000 and
551  * upwards as reserved for use by the firmware (I wonder if this
552  * will be the same on Cheetah...).  We use this virtual address
553  * range for the VPTE table mappings of the nucleus so we need
554  * to zap them when we enter the PROM.  -DaveM
555  */
556 static void __flush_nucleus_vptes(void)
557 {
558         unsigned long prom_reserved_base = 0xfffffffc00000000UL;
559         int i;
560
561         /* Only DTLB must be checked for VPTE entries. */
562         if (tlb_type == spitfire) {
563                 for (i = 0; i < 63; i++) {
564                         unsigned long tag;
565
566                         /* Spitfire Errata #32 workaround */
567                         /* NOTE: Always runs on spitfire, so no cheetah+
568                          *       page size encodings.
569                          */
570                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
571                                              "flush     %%g6"
572                                              : /* No outputs */
573                                              : "r" (0),
574                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
575
576                         tag = spitfire_get_dtlb_tag(i);
577                         if (((tag & ~(PAGE_MASK)) == 0) &&
578                             ((tag &  (PAGE_MASK)) >= prom_reserved_base)) {
579                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
580                                                      "membar #Sync"
581                                                      : /* no outputs */
582                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
583                                 spitfire_put_dtlb_data(i, 0x0UL);
584                         }
585                 }
586         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
587                 for (i = 0; i < 512; i++) {
588                         unsigned long tag = cheetah_get_dtlb_tag(i, 2);
589
590                         if ((tag & ~PAGE_MASK) == 0 &&
591                             (tag & PAGE_MASK) >= prom_reserved_base) {
592                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
593                                                      "membar #Sync"
594                                                      : /* no outputs */
595                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
596                                 cheetah_put_dtlb_data(i, 0x0UL, 2);
597                         }
598
599                         if (tlb_type != cheetah_plus)
600                                 continue;
601
602                         tag = cheetah_get_dtlb_tag(i, 3);
603
604                         if ((tag & ~PAGE_MASK) == 0 &&
605                             (tag & PAGE_MASK) >= prom_reserved_base) {
606                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
607                                                      "membar #Sync"
608                                                      : /* no outputs */
609                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
610                                 cheetah_put_dtlb_data(i, 0x0UL, 3);
611                         }
612                 }
613         } else {
614                 /* Implement me :-) */
615                 BUG();
616         }
617 }
618
619 static int prom_ditlb_set;
620 struct prom_tlb_entry {
621         int             tlb_ent;
622         unsigned long   tlb_tag;
623         unsigned long   tlb_data;
624 };
625 struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
626
627 void prom_world(int enter)
628 {
629         unsigned long pstate;
630         int i;
631
632         if (!enter)
633                 set_fs((mm_segment_t) { get_thread_current_ds() });
634
635         if (!prom_ditlb_set)
636                 return;
637
638         /* Make sure the following runs atomically. */
639         __asm__ __volatile__("flushw\n\t"
640                              "rdpr      %%pstate, %0\n\t"
641                              "wrpr      %0, %1, %%pstate"
642                              : "=r" (pstate)
643                              : "i" (PSTATE_IE));
644
645         if (enter) {
646                 /* Kick out nucleus VPTEs. */
647                 __flush_nucleus_vptes();
648
649                 /* Install PROM world. */
650                 for (i = 0; i < 16; i++) {
651                         if (prom_dtlb[i].tlb_ent != -1) {
652                                 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
653                                                      "membar #Sync"
654                                         : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
655                                         "i" (ASI_DMMU));
656                                 if (tlb_type == spitfire)
657                                         spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
658                                                                prom_dtlb[i].tlb_data);
659                                 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
660                                         cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
661                                                                prom_dtlb[i].tlb_data);
662                         }
663                         if (prom_itlb[i].tlb_ent != -1) {
664                                 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
665                                                      "membar #Sync"
666                                                      : : "r" (prom_itlb[i].tlb_tag),
667                                                      "r" (TLB_TAG_ACCESS),
668                                                      "i" (ASI_IMMU));
669                                 if (tlb_type == spitfire)
670                                         spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
671                                                                prom_itlb[i].tlb_data);
672                                 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
673                                         cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
674                                                                prom_itlb[i].tlb_data);
675                         }
676                 }
677         } else {
678                 for (i = 0; i < 16; i++) {
679                         if (prom_dtlb[i].tlb_ent != -1) {
680                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
681                                                      "membar #Sync"
682                                         : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
683                                 if (tlb_type == spitfire)
684                                         spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
685                                 else
686                                         cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
687                         }
688                         if (prom_itlb[i].tlb_ent != -1) {
689                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
690                                                      "membar #Sync"
691                                                      : : "r" (TLB_TAG_ACCESS),
692                                                      "i" (ASI_IMMU));
693                                 if (tlb_type == spitfire)
694                                         spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
695                                 else
696                                         cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
697                         }
698                 }
699         }
700         __asm__ __volatile__("wrpr      %0, 0, %%pstate"
701                              : : "r" (pstate));
702 }
703
704 void inherit_locked_prom_mappings(int save_p)
705 {
706         int i;
707         int dtlb_seen = 0;
708         int itlb_seen = 0;
709
710         /* Fucking losing PROM has more mappings in the TLB, but
711          * it (conveniently) fails to mention any of these in the
712          * translations property.  The only ones that matter are
713          * the locked PROM tlb entries, so we impose the following
714          * irrecovable rule on the PROM, it is allowed 8 locked
715          * entries in the ITLB and 8 in the DTLB.
716          *
717          * Supposedly the upper 16GB of the address space is
718          * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
719          * SOMEWHERE!!!!!!!!!!!!!!!!!  Furthermore the entire interface
720          * used between the client program and the firmware on sun5
721          * systems to coordinate mmu mappings is also COMPLETELY
722          * UNDOCUMENTED!!!!!! Thanks S(t)un!
723          */
724         if (save_p) {
725                 for (i = 0; i < 16; i++) {
726                         prom_itlb[i].tlb_ent = -1;
727                         prom_dtlb[i].tlb_ent = -1;
728                 }
729         }
730         if (tlb_type == spitfire) {
731                 int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
732                 for (i = 0; i < high; i++) {
733                         unsigned long data;
734
735                         /* Spitfire Errata #32 workaround */
736                         /* NOTE: Always runs on spitfire, so no cheetah+
737                          *       page size encodings.
738                          */
739                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
740                                              "flush     %%g6"
741                                              : /* No outputs */
742                                              : "r" (0),
743                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
744
745                         data = spitfire_get_dtlb_data(i);
746                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
747                                 unsigned long tag;
748
749                                 /* Spitfire Errata #32 workaround */
750                                 /* NOTE: Always runs on spitfire, so no
751                                  *       cheetah+ page size encodings.
752                                  */
753                                 __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
754                                                      "flush     %%g6"
755                                                      : /* No outputs */
756                                                      : "r" (0),
757                                                      "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
758
759                                 tag = spitfire_get_dtlb_tag(i);
760                                 if (save_p) {
761                                         prom_dtlb[dtlb_seen].tlb_ent = i;
762                                         prom_dtlb[dtlb_seen].tlb_tag = tag;
763                                         prom_dtlb[dtlb_seen].tlb_data = data;
764                                 }
765                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
766                                                      "membar #Sync"
767                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
768                                 spitfire_put_dtlb_data(i, 0x0UL);
769
770                                 dtlb_seen++;
771                                 if (dtlb_seen > 15)
772                                         break;
773                         }
774                 }
775
776                 for (i = 0; i < high; i++) {
777                         unsigned long data;
778
779                         /* Spitfire Errata #32 workaround */
780                         /* NOTE: Always runs on spitfire, so no
781                          *       cheetah+ page size encodings.
782                          */
783                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
784                                              "flush     %%g6"
785                                              : /* No outputs */
786                                              : "r" (0),
787                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
788
789                         data = spitfire_get_itlb_data(i);
790                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
791                                 unsigned long tag;
792
793                                 /* Spitfire Errata #32 workaround */
794                                 /* NOTE: Always runs on spitfire, so no
795                                  *       cheetah+ page size encodings.
796                                  */
797                                 __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
798                                                      "flush     %%g6"
799                                                      : /* No outputs */
800                                                      : "r" (0),
801                                                      "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
802
803                                 tag = spitfire_get_itlb_tag(i);
804                                 if (save_p) {
805                                         prom_itlb[itlb_seen].tlb_ent = i;
806                                         prom_itlb[itlb_seen].tlb_tag = tag;
807                                         prom_itlb[itlb_seen].tlb_data = data;
808                                 }
809                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
810                                                      "membar #Sync"
811                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
812                                 spitfire_put_itlb_data(i, 0x0UL);
813
814                                 itlb_seen++;
815                                 if (itlb_seen > 15)
816                                         break;
817                         }
818                 }
819         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
820                 int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
821
822                 for (i = 0; i < high; i++) {
823                         unsigned long data;
824
825                         data = cheetah_get_ldtlb_data(i);
826                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
827                                 unsigned long tag;
828
829                                 tag = cheetah_get_ldtlb_tag(i);
830                                 if (save_p) {
831                                         prom_dtlb[dtlb_seen].tlb_ent = i;
832                                         prom_dtlb[dtlb_seen].tlb_tag = tag;
833                                         prom_dtlb[dtlb_seen].tlb_data = data;
834                                 }
835                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
836                                                      "membar #Sync"
837                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
838                                 cheetah_put_ldtlb_data(i, 0x0UL);
839
840                                 dtlb_seen++;
841                                 if (dtlb_seen > 15)
842                                         break;
843                         }
844                 }
845
846                 for (i = 0; i < high; i++) {
847                         unsigned long data;
848
849                         data = cheetah_get_litlb_data(i);
850                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
851                                 unsigned long tag;
852
853                                 tag = cheetah_get_litlb_tag(i);
854                                 if (save_p) {
855                                         prom_itlb[itlb_seen].tlb_ent = i;
856                                         prom_itlb[itlb_seen].tlb_tag = tag;
857                                         prom_itlb[itlb_seen].tlb_data = data;
858                                 }
859                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
860                                                      "membar #Sync"
861                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
862                                 cheetah_put_litlb_data(i, 0x0UL);
863
864                                 itlb_seen++;
865                                 if (itlb_seen > 15)
866                                         break;
867                         }
868                 }
869         } else {
870                 /* Implement me :-) */
871                 BUG();
872         }
873         if (save_p)
874                 prom_ditlb_set = 1;
875 }
876
877 /* Give PROM back his world, done during reboots... */
878 void prom_reload_locked(void)
879 {
880         int i;
881
882         for (i = 0; i < 16; i++) {
883                 if (prom_dtlb[i].tlb_ent != -1) {
884                         __asm__ __volatile__("stxa %0, [%1] %2\n\t"
885                                              "membar #Sync"
886                                 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
887                                 "i" (ASI_DMMU));
888                         if (tlb_type == spitfire)
889                                 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
890                                                        prom_dtlb[i].tlb_data);
891                         else if (tlb_type == cheetah || tlb_type == cheetah_plus)
892                                 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
893                                                       prom_dtlb[i].tlb_data);
894                 }
895
896                 if (prom_itlb[i].tlb_ent != -1) {
897                         __asm__ __volatile__("stxa %0, [%1] %2\n\t"
898                                              "membar #Sync"
899                                              : : "r" (prom_itlb[i].tlb_tag),
900                                              "r" (TLB_TAG_ACCESS),
901                                              "i" (ASI_IMMU));
902                         if (tlb_type == spitfire)
903                                 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
904                                                        prom_itlb[i].tlb_data);
905                         else
906                                 cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
907                                                        prom_itlb[i].tlb_data);
908                 }
909         }
910 }
911
912 #ifdef DCACHE_ALIASING_POSSIBLE
913 void __flush_dcache_range(unsigned long start, unsigned long end)
914 {
915         unsigned long va;
916
917         if (tlb_type == spitfire) {
918                 int n = 0;
919
920                 for (va = start; va < end; va += 32) {
921                         spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
922                         if (++n >= 512)
923                                 break;
924                 }
925         } else {
926                 start = __pa(start);
927                 end = __pa(end);
928                 for (va = start; va < end; va += 32)
929                         __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
930                                              "membar #Sync"
931                                              : /* no outputs */
932                                              : "r" (va),
933                                                "i" (ASI_DCACHE_INVALIDATE));
934         }
935 }
936 #endif /* DCACHE_ALIASING_POSSIBLE */
937
938 /* If not locked, zap it. */
939 void __flush_tlb_all(void)
940 {
941         unsigned long pstate;
942         int i;
943
944         __asm__ __volatile__("flushw\n\t"
945                              "rdpr      %%pstate, %0\n\t"
946                              "wrpr      %0, %1, %%pstate"
947                              : "=r" (pstate)
948                              : "i" (PSTATE_IE));
949         if (tlb_type == spitfire) {
950                 for (i = 0; i < 64; i++) {
951                         /* Spitfire Errata #32 workaround */
952                         /* NOTE: Always runs on spitfire, so no
953                          *       cheetah+ page size encodings.
954                          */
955                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
956                                              "flush     %%g6"
957                                              : /* No outputs */
958                                              : "r" (0),
959                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
960
961                         if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
962                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
963                                                      "membar #Sync"
964                                                      : /* no outputs */
965                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
966                                 spitfire_put_dtlb_data(i, 0x0UL);
967                         }
968
969                         /* Spitfire Errata #32 workaround */
970                         /* NOTE: Always runs on spitfire, so no
971                          *       cheetah+ page size encodings.
972                          */
973                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
974                                              "flush     %%g6"
975                                              : /* No outputs */
976                                              : "r" (0),
977                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
978
979                         if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
980                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
981                                                      "membar #Sync"
982                                                      : /* no outputs */
983                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
984                                 spitfire_put_itlb_data(i, 0x0UL);
985                         }
986                 }
987         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
988                 cheetah_flush_dtlb_all();
989                 cheetah_flush_itlb_all();
990         }
991         __asm__ __volatile__("wrpr      %0, 0, %%pstate"
992                              : : "r" (pstate));
993 }
994
995 /* Caller does TLB context flushing on local CPU if necessary.
996  * The caller also ensures that CTX_VALID(mm->context) is false.
997  *
998  * We must be careful about boundary cases so that we never
999  * let the user have CTX 0 (nucleus) or we ever use a CTX
1000  * version of zero (and thus NO_CONTEXT would not be caught
1001  * by version mis-match tests in mmu_context.h).
1002  */
1003 void get_new_mmu_context(struct mm_struct *mm)
1004 {
1005         unsigned long ctx, new_ctx;
1006         unsigned long orig_pgsz_bits;
1007         
1008
1009         spin_lock(&ctx_alloc_lock);
1010         orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
1011         ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
1012         new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
1013         if (new_ctx >= (1 << CTX_NR_BITS)) {
1014                 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
1015                 if (new_ctx >= ctx) {
1016                         int i;
1017                         new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
1018                                 CTX_FIRST_VERSION;
1019                         if (new_ctx == 1)
1020                                 new_ctx = CTX_FIRST_VERSION;
1021
1022                         /* Don't call memset, for 16 entries that's just
1023                          * plain silly...
1024                          */
1025                         mmu_context_bmap[0] = 3;
1026                         mmu_context_bmap[1] = 0;
1027                         mmu_context_bmap[2] = 0;
1028                         mmu_context_bmap[3] = 0;
1029                         for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
1030                                 mmu_context_bmap[i + 0] = 0;
1031                                 mmu_context_bmap[i + 1] = 0;
1032                                 mmu_context_bmap[i + 2] = 0;
1033                                 mmu_context_bmap[i + 3] = 0;
1034                         }
1035                         goto out;
1036                 }
1037         }
1038         mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
1039         new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
1040 out:
1041         tlb_context_cache = new_ctx;
1042         mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
1043         spin_unlock(&ctx_alloc_lock);
1044 }
1045
1046 #ifndef CONFIG_SMP
1047 struct pgtable_cache_struct pgt_quicklists;
1048 #endif
1049
1050 /* OK, we have to color these pages. The page tables are accessed
1051  * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
1052  * code, as well as by PAGE_OFFSET range direct-mapped addresses by 
1053  * other parts of the kernel. By coloring, we make sure that the tlbmiss 
1054  * fast handlers do not get data from old/garbage dcache lines that 
1055  * correspond to an old/stale virtual address (user/kernel) that 
1056  * previously mapped the pagetable page while accessing vpte range 
1057  * addresses. The idea is that if the vpte color and PAGE_OFFSET range 
1058  * color is the same, then when the kernel initializes the pagetable 
1059  * using the later address range, accesses with the first address
1060  * range will see the newly initialized data rather than the garbage.
1061  */
1062 #ifdef DCACHE_ALIASING_POSSIBLE
1063 #define DC_ALIAS_SHIFT  1
1064 #else
1065 #define DC_ALIAS_SHIFT  0
1066 #endif
1067 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
1068 {
1069         struct page *page;
1070         unsigned long color;
1071
1072         {
1073                 pte_t *ptep = pte_alloc_one_fast(mm, address);
1074
1075                 if (ptep)
1076                         return ptep;
1077         }
1078
1079         color = VPTE_COLOR(address);
1080         page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
1081         if (page) {
1082                 unsigned long *to_free;
1083                 unsigned long paddr;
1084                 pte_t *pte;
1085
1086 #ifdef DCACHE_ALIASING_POSSIBLE
1087                 set_page_count(page, 1);
1088                 ClearPageCompound(page);
1089
1090                 set_page_count((page + 1), 1);
1091                 ClearPageCompound(page + 1);
1092 #endif
1093                 paddr = (unsigned long) page_address(page);
1094                 memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
1095
1096                 if (!color) {
1097                         pte = (pte_t *) paddr;
1098                         to_free = (unsigned long *) (paddr + PAGE_SIZE);
1099                 } else {
1100                         pte = (pte_t *) (paddr + PAGE_SIZE);
1101                         to_free = (unsigned long *) paddr;
1102                 }
1103
1104 #ifdef DCACHE_ALIASING_POSSIBLE
1105                 /* Now free the other one up, adjust cache size. */
1106                 preempt_disable();
1107                 *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
1108                 pte_quicklist[color ^ 0x1] = to_free;
1109                 pgtable_cache_size++;
1110                 preempt_enable();
1111 #endif
1112
1113                 return pte;
1114         }
1115         return NULL;
1116 }
1117
1118 void sparc_ultra_dump_itlb(void)
1119 {
1120         int slot;
1121
1122         if (tlb_type == spitfire) {
1123                 printk ("Contents of itlb: ");
1124                 for (slot = 0; slot < 14; slot++) printk ("    ");
1125                 printk ("%2x:%016lx,%016lx\n",
1126                         0,
1127                         spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
1128                 for (slot = 1; slot < 64; slot+=3) {
1129                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
1130                                 slot,
1131                                 spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
1132                                 slot+1,
1133                                 spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
1134                                 slot+2,
1135                                 spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
1136                 }
1137         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1138                 printk ("Contents of itlb0:\n");
1139                 for (slot = 0; slot < 16; slot+=2) {
1140                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1141                                 slot,
1142                                 cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
1143                                 slot+1,
1144                                 cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
1145                 }
1146                 printk ("Contents of itlb2:\n");
1147                 for (slot = 0; slot < 128; slot+=2) {
1148                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1149                                 slot,
1150                                 cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
1151                                 slot+1,
1152                                 cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
1153                 }
1154         }
1155 }
1156
1157 void sparc_ultra_dump_dtlb(void)
1158 {
1159         int slot;
1160
1161         if (tlb_type == spitfire) {
1162                 printk ("Contents of dtlb: ");
1163                 for (slot = 0; slot < 14; slot++) printk ("    ");
1164                 printk ("%2x:%016lx,%016lx\n", 0,
1165                         spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
1166                 for (slot = 1; slot < 64; slot+=3) {
1167                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
1168                                 slot,
1169                                 spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
1170                                 slot+1,
1171                                 spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
1172                                 slot+2,
1173                                 spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
1174                 }
1175         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1176                 printk ("Contents of dtlb0:\n");
1177                 for (slot = 0; slot < 16; slot+=2) {
1178                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1179                                 slot,
1180                                 cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
1181                                 slot+1,
1182                                 cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
1183                 }
1184                 printk ("Contents of dtlb2:\n");
1185                 for (slot = 0; slot < 512; slot+=2) {
1186                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1187                                 slot,
1188                                 cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
1189                                 slot+1,
1190                                 cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
1191                 }
1192                 if (tlb_type == cheetah_plus) {
1193                         printk ("Contents of dtlb3:\n");
1194                         for (slot = 0; slot < 512; slot+=2) {
1195                                 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1196                                         slot,
1197                                         cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
1198                                         slot+1,
1199                                         cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
1200                         }
1201                 }
1202         }
1203 }
1204
1205 extern unsigned long cmdline_memory_size;
1206
1207 unsigned long __init bootmem_init(unsigned long *pages_avail)
1208 {
1209         unsigned long bootmap_size, start_pfn, end_pfn;
1210         unsigned long end_of_phys_memory = 0UL;
1211         unsigned long bootmap_pfn, bytes_avail, size;
1212         int i;
1213
1214 #ifdef CONFIG_DEBUG_BOOTMEM
1215         prom_printf("bootmem_init: Scan sp_banks, ");
1216 #endif
1217
1218         bytes_avail = 0UL;
1219         for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1220                 end_of_phys_memory = sp_banks[i].base_addr +
1221                         sp_banks[i].num_bytes;
1222                 bytes_avail += sp_banks[i].num_bytes;
1223                 if (cmdline_memory_size) {
1224                         if (bytes_avail > cmdline_memory_size) {
1225                                 unsigned long slack = bytes_avail - cmdline_memory_size;
1226
1227                                 bytes_avail -= slack;
1228                                 end_of_phys_memory -= slack;
1229
1230                                 sp_banks[i].num_bytes -= slack;
1231                                 if (sp_banks[i].num_bytes == 0) {
1232                                         sp_banks[i].base_addr = 0xdeadbeef;
1233                                 } else {
1234                                         sp_banks[i+1].num_bytes = 0;
1235                                         sp_banks[i+1].base_addr = 0xdeadbeef;
1236                                 }
1237                                 break;
1238                         }
1239                 }
1240         }
1241
1242         *pages_avail = bytes_avail >> PAGE_SHIFT;
1243
1244         /* Start with page aligned address of last symbol in kernel
1245          * image.  The kernel is hard mapped below PAGE_OFFSET in a
1246          * 4MB locked TLB translation.
1247          */
1248         start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
1249
1250         bootmap_pfn = start_pfn;
1251
1252         end_pfn = end_of_phys_memory >> PAGE_SHIFT;
1253
1254 #ifdef CONFIG_BLK_DEV_INITRD
1255         /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
1256         if (sparc_ramdisk_image || sparc_ramdisk_image64) {
1257                 unsigned long ramdisk_image = sparc_ramdisk_image ?
1258                         sparc_ramdisk_image : sparc_ramdisk_image64;
1259                 if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
1260                         ramdisk_image -= KERNBASE;
1261                 initrd_start = ramdisk_image + phys_base;
1262                 initrd_end = initrd_start + sparc_ramdisk_size;
1263                 if (initrd_end > end_of_phys_memory) {
1264                         printk(KERN_CRIT "initrd extends beyond end of memory "
1265                                          "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
1266                                initrd_end, end_of_phys_memory);
1267                         initrd_start = 0;
1268                 }
1269                 if (initrd_start) {
1270                         if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
1271                             initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
1272                                 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
1273                 }
1274         }
1275 #endif  
1276         /* Initialize the boot-time allocator. */
1277         max_pfn = max_low_pfn = end_pfn;
1278         min_low_pfn = pfn_base;
1279
1280 #ifdef CONFIG_DEBUG_BOOTMEM
1281         prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
1282                     min_low_pfn, bootmap_pfn, max_low_pfn);
1283 #endif
1284         bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
1285
1286         /* Now register the available physical memory with the
1287          * allocator.
1288          */
1289         for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1290 #ifdef CONFIG_DEBUG_BOOTMEM
1291                 prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
1292                             i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
1293 #endif
1294                 free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
1295         }
1296
1297 #ifdef CONFIG_BLK_DEV_INITRD
1298         if (initrd_start) {
1299                 size = initrd_end - initrd_start;
1300
1301                 /* Resert the initrd image area. */
1302 #ifdef CONFIG_DEBUG_BOOTMEM
1303                 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
1304                         initrd_start, initrd_end);
1305 #endif
1306                 reserve_bootmem(initrd_start, size);
1307                 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1308
1309                 initrd_start += PAGE_OFFSET;
1310                 initrd_end += PAGE_OFFSET;
1311         }
1312 #endif
1313         /* Reserve the kernel text/data/bss. */
1314 #ifdef CONFIG_DEBUG_BOOTMEM
1315         prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
1316 #endif
1317         reserve_bootmem(kern_base, kern_size);
1318         *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
1319
1320         /* Reserve the bootmem map.   We do not account for it
1321          * in pages_avail because we will release that memory
1322          * in free_all_bootmem.
1323          */
1324         size = bootmap_size;
1325 #ifdef CONFIG_DEBUG_BOOTMEM
1326         prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
1327                     (bootmap_pfn << PAGE_SHIFT), size);
1328 #endif
1329         reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1330         *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1331
1332         return end_pfn;
1333 }
1334
1335 /* paging_init() sets up the page tables */
1336
1337 extern void cheetah_ecache_flush_init(void);
1338
1339 static unsigned long last_valid_pfn;
1340
1341 void __init paging_init(void)
1342 {
1343         extern pmd_t swapper_pmd_dir[1024];
1344         unsigned long end_pfn, pages_avail, shift;
1345         unsigned long real_end;
1346
1347         set_bit(0, mmu_context_bmap);
1348
1349         shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1350
1351         real_end = (unsigned long)_end;
1352         if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1353                 bigkernel = 1;
1354         if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1355                 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1356                 prom_halt();
1357         }
1358
1359         /* Set kernel pgd to upper alias so physical page computations
1360          * work.
1361          */
1362         init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1363         
1364         memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
1365
1366         /* Now can init the kernel/bad page tables. */
1367         pud_set(pud_offset(&swapper_pg_dir[0], 0),
1368                 swapper_pmd_dir + (shift / sizeof(pgd_t)));
1369         
1370         swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
1371         
1372         /* Inherit non-locked OBP mappings. */
1373         inherit_prom_mappings();
1374         
1375         /* Ok, we can use our TLB miss and window trap handlers safely.
1376          * We need to do a quick peek here to see if we are on StarFire
1377          * or not, so setup_tba can setup the IRQ globals correctly (it
1378          * needs to get the hard smp processor id correctly).
1379          */
1380         {
1381                 extern void setup_tba(int);
1382                 setup_tba(this_is_starfire);
1383         }
1384
1385         inherit_locked_prom_mappings(1);
1386
1387         __flush_tlb_all();
1388
1389         /* Setup bootmem... */
1390         pages_avail = 0;
1391         last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1392
1393         {
1394                 unsigned long zones_size[MAX_NR_ZONES];
1395                 unsigned long zholes_size[MAX_NR_ZONES];
1396                 unsigned long npages;
1397                 int znum;
1398
1399                 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1400                         zones_size[znum] = zholes_size[znum] = 0;
1401
1402                 npages = end_pfn - pfn_base;
1403                 zones_size[ZONE_DMA] = npages;
1404                 zholes_size[ZONE_DMA] = npages - pages_avail;
1405
1406                 free_area_init_node(0, &contig_page_data, zones_size,
1407                                     phys_base >> PAGE_SHIFT, zholes_size);
1408         }
1409
1410         device_scan();
1411 }
1412
1413 /* Ok, it seems that the prom can allocate some more memory chunks
1414  * as a side effect of some prom calls we perform during the
1415  * boot sequence.  My most likely theory is that it is from the
1416  * prom_set_traptable() call, and OBP is allocating a scratchpad
1417  * for saving client program register state etc.
1418  */
1419 static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
1420 {
1421         int swapi = 0;
1422         int i, mitr;
1423         unsigned long tmpaddr, tmpsize;
1424         unsigned long lowest;
1425
1426         for (i = 0; thislist[i].theres_more != 0; i++) {
1427                 lowest = thislist[i].start_adr;
1428                 for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
1429                         if (thislist[mitr].start_adr < lowest) {
1430                                 lowest = thislist[mitr].start_adr;
1431                                 swapi = mitr;
1432                         }
1433                 if (lowest == thislist[i].start_adr)
1434                         continue;
1435                 tmpaddr = thislist[swapi].start_adr;
1436                 tmpsize = thislist[swapi].num_bytes;
1437                 for (mitr = swapi; mitr > i; mitr--) {
1438                         thislist[mitr].start_adr = thislist[mitr-1].start_adr;
1439                         thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
1440                 }
1441                 thislist[i].start_adr = tmpaddr;
1442                 thislist[i].num_bytes = tmpsize;
1443         }
1444 }
1445
1446 void __init rescan_sp_banks(void)
1447 {
1448         struct linux_prom64_registers memlist[64];
1449         struct linux_mlist_p1275 avail[64], *mlist;
1450         unsigned long bytes, base_paddr;
1451         int num_regs, node = prom_finddevice("/memory");
1452         int i;
1453
1454         num_regs = prom_getproperty(node, "available",
1455                                     (char *) memlist, sizeof(memlist));
1456         num_regs = (num_regs / sizeof(struct linux_prom64_registers));
1457         for (i = 0; i < num_regs; i++) {
1458                 avail[i].start_adr = memlist[i].phys_addr;
1459                 avail[i].num_bytes = memlist[i].reg_size;
1460                 avail[i].theres_more = &avail[i + 1];
1461         }
1462         avail[i - 1].theres_more = NULL;
1463         sort_memlist(avail);
1464
1465         mlist = &avail[0];
1466         i = 0;
1467         bytes = mlist->num_bytes;
1468         base_paddr = mlist->start_adr;
1469   
1470         sp_banks[0].base_addr = base_paddr;
1471         sp_banks[0].num_bytes = bytes;
1472
1473         while (mlist->theres_more != NULL){
1474                 i++;
1475                 mlist = mlist->theres_more;
1476                 bytes = mlist->num_bytes;
1477                 if (i >= SPARC_PHYS_BANKS-1) {
1478                         printk ("The machine has more banks than "
1479                                 "this kernel can support\n"
1480                                 "Increase the SPARC_PHYS_BANKS "
1481                                 "setting (currently %d)\n",
1482                                 SPARC_PHYS_BANKS);
1483                         i = SPARC_PHYS_BANKS-1;
1484                         break;
1485                 }
1486     
1487                 sp_banks[i].base_addr = mlist->start_adr;
1488                 sp_banks[i].num_bytes = mlist->num_bytes;
1489         }
1490
1491         i++;
1492         sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
1493         sp_banks[i].num_bytes = 0;
1494
1495         for (i = 0; sp_banks[i].num_bytes != 0; i++)
1496                 sp_banks[i].num_bytes &= PAGE_MASK;
1497 }
1498
1499 static void __init taint_real_pages(void)
1500 {
1501         struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
1502         int i;
1503
1504         for (i = 0; i < SPARC_PHYS_BANKS; i++) {
1505                 saved_sp_banks[i].base_addr =
1506                         sp_banks[i].base_addr;
1507                 saved_sp_banks[i].num_bytes =
1508                         sp_banks[i].num_bytes;
1509         }
1510
1511         rescan_sp_banks();
1512
1513         /* Find changes discovered in the sp_bank rescan and
1514          * reserve the lost portions in the bootmem maps.
1515          */
1516         for (i = 0; saved_sp_banks[i].num_bytes; i++) {
1517                 unsigned long old_start, old_end;
1518
1519                 old_start = saved_sp_banks[i].base_addr;
1520                 old_end = old_start +
1521                         saved_sp_banks[i].num_bytes;
1522                 while (old_start < old_end) {
1523                         int n;
1524
1525                         for (n = 0; sp_banks[n].num_bytes; n++) {
1526                                 unsigned long new_start, new_end;
1527
1528                                 new_start = sp_banks[n].base_addr;
1529                                 new_end = new_start + sp_banks[n].num_bytes;
1530
1531                                 if (new_start <= old_start &&
1532                                     new_end >= (old_start + PAGE_SIZE)) {
1533                                         set_bit (old_start >> 22,
1534                                                  sparc64_valid_addr_bitmap);
1535                                         goto do_next_page;
1536                                 }
1537                         }
1538                         reserve_bootmem(old_start, PAGE_SIZE);
1539
1540                 do_next_page:
1541                         old_start += PAGE_SIZE;
1542                 }
1543         }
1544 }
1545
1546 void __init mem_init(void)
1547 {
1548         unsigned long codepages, datapages, initpages;
1549         unsigned long addr, last;
1550         int i;
1551
1552         i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1553         i += 1;
1554         sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1555         if (sparc64_valid_addr_bitmap == NULL) {
1556                 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1557                 prom_halt();
1558         }
1559         memset(sparc64_valid_addr_bitmap, 0, i << 3);
1560
1561         addr = PAGE_OFFSET + kern_base;
1562         last = PAGE_ALIGN(kern_size) + addr;
1563         while (addr < last) {
1564                 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1565                 addr += PAGE_SIZE;
1566         }
1567
1568         taint_real_pages();
1569
1570         max_mapnr = last_valid_pfn - pfn_base;
1571         high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1572
1573 #ifdef CONFIG_DEBUG_BOOTMEM
1574         prom_printf("mem_init: Calling free_all_bootmem().\n");
1575 #endif
1576         totalram_pages = num_physpages = free_all_bootmem() - 1;
1577
1578         /*
1579          * Set up the zero page, mark it reserved, so that page count
1580          * is not manipulated when freeing the page from user ptes.
1581          */
1582         mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1583         if (mem_map_zero == NULL) {
1584                 prom_printf("paging_init: Cannot alloc zero page.\n");
1585                 prom_halt();
1586         }
1587         SetPageReserved(mem_map_zero);
1588
1589         codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1590         codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1591         datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1592         datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1593         initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1594         initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1595
1596         printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1597                nr_free_pages() << (PAGE_SHIFT-10),
1598                codepages << (PAGE_SHIFT-10),
1599                datapages << (PAGE_SHIFT-10), 
1600                initpages << (PAGE_SHIFT-10), 
1601                PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1602
1603         if (tlb_type == cheetah || tlb_type == cheetah_plus)
1604                 cheetah_ecache_flush_init();
1605 }
1606
1607 void free_initmem (void)
1608 {
1609         unsigned long addr, initend;
1610
1611         /*
1612          * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1613          */
1614         addr = PAGE_ALIGN((unsigned long)(__init_begin));
1615         initend = (unsigned long)(__init_end) & PAGE_MASK;
1616         for (; addr < initend; addr += PAGE_SIZE) {
1617                 unsigned long page;
1618                 struct page *p;
1619
1620                 page = (addr +
1621                         ((unsigned long) __va(kern_base)) -
1622                         ((unsigned long) KERNBASE));
1623                 memset((void *)addr, 0xcc, PAGE_SIZE);
1624                 p = virt_to_page(page);
1625
1626                 ClearPageReserved(p);
1627                 set_page_count(p, 1);
1628                 __free_page(p);
1629                 num_physpages++;
1630                 totalram_pages++;
1631         }
1632 }
1633
1634 #ifdef CONFIG_BLK_DEV_INITRD
1635 void free_initrd_mem(unsigned long start, unsigned long end)
1636 {
1637         if (start < end)
1638                 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1639         for (; start < end; start += PAGE_SIZE) {
1640                 struct page *p = virt_to_page(start);
1641
1642                 ClearPageReserved(p);
1643                 set_page_count(p, 1);
1644                 __free_page(p);
1645                 num_physpages++;
1646                 totalram_pages++;
1647         }
1648 }
1649 #endif