[PATCH] powerpc: Kill _machine and hard-coded platform numbers
[safe/jmp/linux-2.6] / arch / powerpc / mm / hash_utils_64.c
1 /*
2  * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3  *   {mikejc|engebret}@us.ibm.com
4  *
5  *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6  *
7  * SMP scalability work:
8  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9  * 
10  *    Module name: htab.c
11  *
12  *    Description:
13  *      PowerPC Hashed Page Table functions
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  */
20
21 #undef DEBUG
22 #undef DEBUG_LOW
23
24 #include <linux/config.h>
25 #include <linux/spinlock.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/proc_fs.h>
29 #include <linux/stat.h>
30 #include <linux/sysctl.h>
31 #include <linux/ctype.h>
32 #include <linux/cache.h>
33 #include <linux/init.h>
34 #include <linux/signal.h>
35
36 #include <asm/processor.h>
37 #include <asm/pgtable.h>
38 #include <asm/mmu.h>
39 #include <asm/mmu_context.h>
40 #include <asm/page.h>
41 #include <asm/types.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
44 #include <asm/machdep.h>
45 #include <asm/lmb.h>
46 #include <asm/abs_addr.h>
47 #include <asm/tlbflush.h>
48 #include <asm/io.h>
49 #include <asm/eeh.h>
50 #include <asm/tlb.h>
51 #include <asm/cacheflush.h>
52 #include <asm/cputable.h>
53 #include <asm/abs_addr.h>
54 #include <asm/sections.h>
55
56 #ifdef DEBUG
57 #define DBG(fmt...) udbg_printf(fmt)
58 #else
59 #define DBG(fmt...)
60 #endif
61
62 #ifdef DEBUG_LOW
63 #define DBG_LOW(fmt...) udbg_printf(fmt)
64 #else
65 #define DBG_LOW(fmt...)
66 #endif
67
68 #define KB (1024)
69 #define MB (1024*KB)
70
71 /*
72  * Note:  pte   --> Linux PTE
73  *        HPTE  --> PowerPC Hashed Page Table Entry
74  *
75  * Execution context:
76  *   htab_initialize is called with the MMU off (of course), but
77  *   the kernel has been copied down to zero so it can directly
78  *   reference global data.  At this point it is very difficult
79  *   to print debug info.
80  *
81  */
82
83 #ifdef CONFIG_U3_DART
84 extern unsigned long dart_tablebase;
85 #endif /* CONFIG_U3_DART */
86
87 static unsigned long _SDR1;
88 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
89
90 hpte_t *htab_address;
91 unsigned long htab_size_bytes;
92 unsigned long htab_hash_mask;
93 int mmu_linear_psize = MMU_PAGE_4K;
94 int mmu_virtual_psize = MMU_PAGE_4K;
95 #ifdef CONFIG_HUGETLB_PAGE
96 int mmu_huge_psize = MMU_PAGE_16M;
97 unsigned int HPAGE_SHIFT;
98 #endif
99
100 /* There are definitions of page sizes arrays to be used when none
101  * is provided by the firmware.
102  */
103
104 /* Pre-POWER4 CPUs (4k pages only)
105  */
106 struct mmu_psize_def mmu_psize_defaults_old[] = {
107         [MMU_PAGE_4K] = {
108                 .shift  = 12,
109                 .sllp   = 0,
110                 .penc   = 0,
111                 .avpnm  = 0,
112                 .tlbiel = 0,
113         },
114 };
115
116 /* POWER4, GPUL, POWER5
117  *
118  * Support for 16Mb large pages
119  */
120 struct mmu_psize_def mmu_psize_defaults_gp[] = {
121         [MMU_PAGE_4K] = {
122                 .shift  = 12,
123                 .sllp   = 0,
124                 .penc   = 0,
125                 .avpnm  = 0,
126                 .tlbiel = 1,
127         },
128         [MMU_PAGE_16M] = {
129                 .shift  = 24,
130                 .sllp   = SLB_VSID_L,
131                 .penc   = 0,
132                 .avpnm  = 0x1UL,
133                 .tlbiel = 0,
134         },
135 };
136
137
138 int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
139                       unsigned long pstart, unsigned long mode, int psize)
140 {
141         unsigned long vaddr, paddr;
142         unsigned int step, shift;
143         unsigned long tmp_mode;
144         int ret = 0;
145
146         shift = mmu_psize_defs[psize].shift;
147         step = 1 << shift;
148
149         for (vaddr = vstart, paddr = pstart; vaddr < vend;
150              vaddr += step, paddr += step) {
151                 unsigned long vpn, hash, hpteg;
152                 unsigned long vsid = get_kernel_vsid(vaddr);
153                 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
154
155                 vpn = va >> shift;
156                 tmp_mode = mode;
157                 
158                 /* Make non-kernel text non-executable */
159                 if (!in_kernel_text(vaddr))
160                         tmp_mode = mode | HPTE_R_N;
161
162                 hash = hpt_hash(va, shift);
163                 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
164
165                 /* The crap below can be cleaned once ppd_md.probe() can
166                  * set up the hash callbacks, thus we can just used the
167                  * normal insert callback here.
168                  */
169 #ifdef CONFIG_PPC_ISERIES
170                 if (machine_is(iseries))
171                         ret = iSeries_hpte_insert(hpteg, va,
172                                                   paddr,
173                                                   tmp_mode,
174                                                   HPTE_V_BOLTED,
175                                                   psize);
176                 else
177 #endif
178 #ifdef CONFIG_PPC_PSERIES
179                 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR))
180                         ret = pSeries_lpar_hpte_insert(hpteg, va,
181                                                        paddr,
182                                                        tmp_mode,
183                                                        HPTE_V_BOLTED,
184                                                        psize);
185                 else
186 #endif
187 #ifdef CONFIG_PPC_MULTIPLATFORM
188                         ret = native_hpte_insert(hpteg, va,
189                                                  paddr,
190                                                  tmp_mode, HPTE_V_BOLTED,
191                                                  psize);
192 #endif
193                 if (ret < 0)
194                         break;
195         }
196         return ret < 0 ? ret : 0;
197 }
198
199 static int __init htab_dt_scan_page_sizes(unsigned long node,
200                                           const char *uname, int depth,
201                                           void *data)
202 {
203         char *type = of_get_flat_dt_prop(node, "device_type", NULL);
204         u32 *prop;
205         unsigned long size = 0;
206
207         /* We are scanning "cpu" nodes only */
208         if (type == NULL || strcmp(type, "cpu") != 0)
209                 return 0;
210
211         prop = (u32 *)of_get_flat_dt_prop(node,
212                                           "ibm,segment-page-sizes", &size);
213         if (prop != NULL) {
214                 DBG("Page sizes from device-tree:\n");
215                 size /= 4;
216                 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
217                 while(size > 0) {
218                         unsigned int shift = prop[0];
219                         unsigned int slbenc = prop[1];
220                         unsigned int lpnum = prop[2];
221                         unsigned int lpenc = 0;
222                         struct mmu_psize_def *def;
223                         int idx = -1;
224
225                         size -= 3; prop += 3;
226                         while(size > 0 && lpnum) {
227                                 if (prop[0] == shift)
228                                         lpenc = prop[1];
229                                 prop += 2; size -= 2;
230                                 lpnum--;
231                         }
232                         switch(shift) {
233                         case 0xc:
234                                 idx = MMU_PAGE_4K;
235                                 break;
236                         case 0x10:
237                                 idx = MMU_PAGE_64K;
238                                 break;
239                         case 0x14:
240                                 idx = MMU_PAGE_1M;
241                                 break;
242                         case 0x18:
243                                 idx = MMU_PAGE_16M;
244                                 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
245                                 break;
246                         case 0x22:
247                                 idx = MMU_PAGE_16G;
248                                 break;
249                         }
250                         if (idx < 0)
251                                 continue;
252                         def = &mmu_psize_defs[idx];
253                         def->shift = shift;
254                         if (shift <= 23)
255                                 def->avpnm = 0;
256                         else
257                                 def->avpnm = (1 << (shift - 23)) - 1;
258                         def->sllp = slbenc;
259                         def->penc = lpenc;
260                         /* We don't know for sure what's up with tlbiel, so
261                          * for now we only set it for 4K and 64K pages
262                          */
263                         if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
264                                 def->tlbiel = 1;
265                         else
266                                 def->tlbiel = 0;
267
268                         DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
269                             "tlbiel=%d, penc=%d\n",
270                             idx, shift, def->sllp, def->avpnm, def->tlbiel,
271                             def->penc);
272                 }
273                 return 1;
274         }
275         return 0;
276 }
277
278
279 static void __init htab_init_page_sizes(void)
280 {
281         int rc;
282
283         /* Default to 4K pages only */
284         memcpy(mmu_psize_defs, mmu_psize_defaults_old,
285                sizeof(mmu_psize_defaults_old));
286
287         /*
288          * Try to find the available page sizes in the device-tree
289          */
290         rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
291         if (rc != 0)  /* Found */
292                 goto found;
293
294         /*
295          * Not in the device-tree, let's fallback on known size
296          * list for 16M capable GP & GR
297          */
298         if (cpu_has_feature(CPU_FTR_16M_PAGE) && !machine_is(iseries))
299                 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
300                        sizeof(mmu_psize_defaults_gp));
301  found:
302         /*
303          * Pick a size for the linear mapping. Currently, we only support
304          * 16M, 1M and 4K which is the default
305          */
306         if (mmu_psize_defs[MMU_PAGE_16M].shift)
307                 mmu_linear_psize = MMU_PAGE_16M;
308         else if (mmu_psize_defs[MMU_PAGE_1M].shift)
309                 mmu_linear_psize = MMU_PAGE_1M;
310
311         /*
312          * Pick a size for the ordinary pages. Default is 4K, we support
313          * 64K if cache inhibited large pages are supported by the
314          * processor
315          */
316 #ifdef CONFIG_PPC_64K_PAGES
317         if (mmu_psize_defs[MMU_PAGE_64K].shift &&
318             cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
319                 mmu_virtual_psize = MMU_PAGE_64K;
320 #endif
321
322         printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n",
323                mmu_psize_defs[mmu_linear_psize].shift,
324                mmu_psize_defs[mmu_virtual_psize].shift);
325
326 #ifdef CONFIG_HUGETLB_PAGE
327         /* Init large page size. Currently, we pick 16M or 1M depending
328          * on what is available
329          */
330         if (mmu_psize_defs[MMU_PAGE_16M].shift)
331                 mmu_huge_psize = MMU_PAGE_16M;
332         /* With 4k/4level pagetables, we can't (for now) cope with a
333          * huge page size < PMD_SIZE */
334         else if (mmu_psize_defs[MMU_PAGE_1M].shift)
335                 mmu_huge_psize = MMU_PAGE_1M;
336
337         /* Calculate HPAGE_SHIFT and sanity check it */
338         if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
339             mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
340                 HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
341         else
342                 HPAGE_SHIFT = 0; /* No huge pages dude ! */
343 #endif /* CONFIG_HUGETLB_PAGE */
344 }
345
346 static int __init htab_dt_scan_pftsize(unsigned long node,
347                                        const char *uname, int depth,
348                                        void *data)
349 {
350         char *type = of_get_flat_dt_prop(node, "device_type", NULL);
351         u32 *prop;
352
353         /* We are scanning "cpu" nodes only */
354         if (type == NULL || strcmp(type, "cpu") != 0)
355                 return 0;
356
357         prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
358         if (prop != NULL) {
359                 /* pft_size[0] is the NUMA CEC cookie */
360                 ppc64_pft_size = prop[1];
361                 return 1;
362         }
363         return 0;
364 }
365
366 static unsigned long __init htab_get_table_size(void)
367 {
368         unsigned long mem_size, rnd_mem_size, pteg_count;
369
370         /* If hash size isn't already provided by the platform, we try to
371          * retrieve it from the device-tree. If it's not there neither, we
372          * calculate it now based on the total RAM size
373          */
374         if (ppc64_pft_size == 0)
375                 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
376         if (ppc64_pft_size)
377                 return 1UL << ppc64_pft_size;
378
379         /* round mem_size up to next power of 2 */
380         mem_size = lmb_phys_mem_size();
381         rnd_mem_size = 1UL << __ilog2(mem_size);
382         if (rnd_mem_size < mem_size)
383                 rnd_mem_size <<= 1;
384
385         /* # pages / 2 */
386         pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
387
388         return pteg_count << 7;
389 }
390
391 #ifdef CONFIG_MEMORY_HOTPLUG
392 void create_section_mapping(unsigned long start, unsigned long end)
393 {
394                 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
395                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
396                         mmu_linear_psize));
397 }
398 #endif /* CONFIG_MEMORY_HOTPLUG */
399
400 void __init htab_initialize(void)
401 {
402         unsigned long table;
403         unsigned long pteg_count;
404         unsigned long mode_rw;
405         unsigned long base = 0, size = 0;
406         int i;
407
408         extern unsigned long tce_alloc_start, tce_alloc_end;
409
410         DBG(" -> htab_initialize()\n");
411
412         /* Initialize page sizes */
413         htab_init_page_sizes();
414
415         /*
416          * Calculate the required size of the htab.  We want the number of
417          * PTEGs to equal one half the number of real pages.
418          */ 
419         htab_size_bytes = htab_get_table_size();
420         pteg_count = htab_size_bytes >> 7;
421
422         htab_hash_mask = pteg_count - 1;
423
424         if (firmware_has_feature(FW_FEATURE_LPAR)) {
425                 /* Using a hypervisor which owns the htab */
426                 htab_address = NULL;
427                 _SDR1 = 0; 
428         } else {
429                 /* Find storage for the HPT.  Must be contiguous in
430                  * the absolute address space.
431                  */
432                 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
433
434                 DBG("Hash table allocated at %lx, size: %lx\n", table,
435                     htab_size_bytes);
436
437                 htab_address = abs_to_virt(table);
438
439                 /* htab absolute addr + encoded htabsize */
440                 _SDR1 = table + __ilog2(pteg_count) - 11;
441
442                 /* Initialize the HPT with no entries */
443                 memset((void *)table, 0, htab_size_bytes);
444
445                 /* Set SDR1 */
446                 mtspr(SPRN_SDR1, _SDR1);
447         }
448
449         mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
450
451         /* On U3 based machines, we need to reserve the DART area and
452          * _NOT_ map it to avoid cache paradoxes as it's remapped non
453          * cacheable later on
454          */
455
456         /* create bolted the linear mapping in the hash table */
457         for (i=0; i < lmb.memory.cnt; i++) {
458                 base = (unsigned long)__va(lmb.memory.region[i].base);
459                 size = lmb.memory.region[i].size;
460
461                 DBG("creating mapping for region: %lx : %lx\n", base, size);
462
463 #ifdef CONFIG_U3_DART
464                 /* Do not map the DART space. Fortunately, it will be aligned
465                  * in such a way that it will not cross two lmb regions and
466                  * will fit within a single 16Mb page.
467                  * The DART space is assumed to be a full 16Mb region even if
468                  * we only use 2Mb of that space. We will use more of it later
469                  * for AGP GART. We have to use a full 16Mb large page.
470                  */
471                 DBG("DART base: %lx\n", dart_tablebase);
472
473                 if (dart_tablebase != 0 && dart_tablebase >= base
474                     && dart_tablebase < (base + size)) {
475                         unsigned long dart_table_end = dart_tablebase + 16 * MB;
476                         if (base != dart_tablebase)
477                                 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
478                                                         __pa(base), mode_rw,
479                                                         mmu_linear_psize));
480                         if ((base + size) > dart_table_end)
481                                 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
482                                                         base + size,
483                                                         __pa(dart_table_end),
484                                                          mode_rw,
485                                                          mmu_linear_psize));
486                         continue;
487                 }
488 #endif /* CONFIG_U3_DART */
489                 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
490                                         mode_rw, mmu_linear_psize));
491        }
492
493         /*
494          * If we have a memory_limit and we've allocated TCEs then we need to
495          * explicitly map the TCE area at the top of RAM. We also cope with the
496          * case that the TCEs start below memory_limit.
497          * tce_alloc_start/end are 16MB aligned so the mapping should work
498          * for either 4K or 16MB pages.
499          */
500         if (tce_alloc_start) {
501                 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
502                 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
503
504                 if (base + size >= tce_alloc_start)
505                         tce_alloc_start = base + size + 1;
506
507                 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
508                                          __pa(tce_alloc_start), mode_rw,
509                                          mmu_linear_psize));
510         }
511
512         DBG(" <- htab_initialize()\n");
513 }
514 #undef KB
515 #undef MB
516
517 void htab_initialize_secondary(void)
518 {
519         if (!firmware_has_feature(FW_FEATURE_LPAR))
520                 mtspr(SPRN_SDR1, _SDR1);
521 }
522
523 /*
524  * Called by asm hashtable.S for doing lazy icache flush
525  */
526 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
527 {
528         struct page *page;
529
530         if (!pfn_valid(pte_pfn(pte)))
531                 return pp;
532
533         page = pte_page(pte);
534
535         /* page is dirty */
536         if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
537                 if (trap == 0x400) {
538                         __flush_dcache_icache(page_address(page));
539                         set_bit(PG_arch_1, &page->flags);
540                 } else
541                         pp |= HPTE_R_N;
542         }
543         return pp;
544 }
545
546 /* Result code is:
547  *  0 - handled
548  *  1 - normal page fault
549  * -1 - critical hash insertion error
550  */
551 int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
552 {
553         void *pgdir;
554         unsigned long vsid;
555         struct mm_struct *mm;
556         pte_t *ptep;
557         cpumask_t tmp;
558         int rc, user_region = 0, local = 0;
559
560         DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
561                 ea, access, trap);
562
563         if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
564                 DBG_LOW(" out of pgtable range !\n");
565                 return 1;
566         }
567
568         /* Get region & vsid */
569         switch (REGION_ID(ea)) {
570         case USER_REGION_ID:
571                 user_region = 1;
572                 mm = current->mm;
573                 if (! mm) {
574                         DBG_LOW(" user region with no mm !\n");
575                         return 1;
576                 }
577                 vsid = get_vsid(mm->context.id, ea);
578                 break;
579         case VMALLOC_REGION_ID:
580                 mm = &init_mm;
581                 vsid = get_kernel_vsid(ea);
582                 break;
583         default:
584                 /* Not a valid range
585                  * Send the problem up to do_page_fault 
586                  */
587                 return 1;
588         }
589         DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
590
591         /* Get pgdir */
592         pgdir = mm->pgd;
593         if (pgdir == NULL)
594                 return 1;
595
596         /* Check CPU locality */
597         tmp = cpumask_of_cpu(smp_processor_id());
598         if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
599                 local = 1;
600
601         /* Handle hugepage regions */
602         if (unlikely(in_hugepage_area(mm->context, ea))) {
603                 DBG_LOW(" -> huge page !\n");
604                 return hash_huge_page(mm, access, ea, vsid, local, trap);
605         }
606
607         /* Get PTE and page size from page tables */
608         ptep = find_linux_pte(pgdir, ea);
609         if (ptep == NULL || !pte_present(*ptep)) {
610                 DBG_LOW(" no PTE !\n");
611                 return 1;
612         }
613
614 #ifndef CONFIG_PPC_64K_PAGES
615         DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
616 #else
617         DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
618                 pte_val(*(ptep + PTRS_PER_PTE)));
619 #endif
620         /* Pre-check access permissions (will be re-checked atomically
621          * in __hash_page_XX but this pre-check is a fast path
622          */
623         if (access & ~pte_val(*ptep)) {
624                 DBG_LOW(" no access !\n");
625                 return 1;
626         }
627
628         /* Do actual hashing */
629 #ifndef CONFIG_PPC_64K_PAGES
630         rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
631 #else
632         if (mmu_virtual_psize == MMU_PAGE_64K)
633                 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
634         else
635                 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
636 #endif /* CONFIG_PPC_64K_PAGES */
637
638 #ifndef CONFIG_PPC_64K_PAGES
639         DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
640 #else
641         DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
642                 pte_val(*(ptep + PTRS_PER_PTE)));
643 #endif
644         DBG_LOW(" -> rc=%d\n", rc);
645         return rc;
646 }
647 EXPORT_SYMBOL_GPL(hash_page);
648
649 void hash_preload(struct mm_struct *mm, unsigned long ea,
650                   unsigned long access, unsigned long trap)
651 {
652         unsigned long vsid;
653         void *pgdir;
654         pte_t *ptep;
655         cpumask_t mask;
656         unsigned long flags;
657         int local = 0;
658
659         /* We don't want huge pages prefaulted for now
660          */
661         if (unlikely(in_hugepage_area(mm->context, ea)))
662                 return;
663
664         DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
665                 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
666
667         /* Get PTE, VSID, access mask */
668         pgdir = mm->pgd;
669         if (pgdir == NULL)
670                 return;
671         ptep = find_linux_pte(pgdir, ea);
672         if (!ptep)
673                 return;
674         vsid = get_vsid(mm->context.id, ea);
675
676         /* Hash it in */
677         local_irq_save(flags);
678         mask = cpumask_of_cpu(smp_processor_id());
679         if (cpus_equal(mm->cpu_vm_mask, mask))
680                 local = 1;
681 #ifndef CONFIG_PPC_64K_PAGES
682         __hash_page_4K(ea, access, vsid, ptep, trap, local);
683 #else
684         if (mmu_virtual_psize == MMU_PAGE_64K)
685                 __hash_page_64K(ea, access, vsid, ptep, trap, local);
686         else
687                 __hash_page_4K(ea, access, vsid, ptep, trap, local);
688 #endif /* CONFIG_PPC_64K_PAGES */
689         local_irq_restore(flags);
690 }
691
692 void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local)
693 {
694         unsigned long hash, index, shift, hidx, slot;
695
696         DBG_LOW("flush_hash_page(va=%016x)\n", va);
697         pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
698                 hash = hpt_hash(va, shift);
699                 hidx = __rpte_to_hidx(pte, index);
700                 if (hidx & _PTEIDX_SECONDARY)
701                         hash = ~hash;
702                 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
703                 slot += hidx & _PTEIDX_GROUP_IX;
704                 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
705                 ppc_md.hpte_invalidate(slot, va, psize, local);
706         } pte_iterate_hashed_end();
707 }
708
709 void flush_hash_range(unsigned long number, int local)
710 {
711         if (ppc_md.flush_hash_range)
712                 ppc_md.flush_hash_range(number, local);
713         else {
714                 int i;
715                 struct ppc64_tlb_batch *batch =
716                         &__get_cpu_var(ppc64_tlb_batch);
717
718                 for (i = 0; i < number; i++)
719                         flush_hash_page(batch->vaddr[i], batch->pte[i],
720                                         batch->psize, local);
721         }
722 }
723
724 static inline void make_bl(unsigned int *insn_addr, void *func)
725 {
726         unsigned long funcp = *((unsigned long *)func);
727         int offset = funcp - (unsigned long)insn_addr;
728
729         *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
730         flush_icache_range((unsigned long)insn_addr, 4+
731                            (unsigned long)insn_addr);
732 }
733
734 /*
735  * low_hash_fault is called when we the low level hash code failed
736  * to instert a PTE due to an hypervisor error
737  */
738 void low_hash_fault(struct pt_regs *regs, unsigned long address)
739 {
740         if (user_mode(regs)) {
741                 siginfo_t info;
742
743                 info.si_signo = SIGBUS;
744                 info.si_errno = 0;
745                 info.si_code = BUS_ADRERR;
746                 info.si_addr = (void __user *)address;
747                 force_sig_info(SIGBUS, &info, current);
748                 return;
749         }
750         bad_page_fault(regs, address, SIGBUS);
751 }
752
753 void __init htab_finish_init(void)
754 {
755         extern unsigned int *htab_call_hpte_insert1;
756         extern unsigned int *htab_call_hpte_insert2;
757         extern unsigned int *htab_call_hpte_remove;
758         extern unsigned int *htab_call_hpte_updatepp;
759
760 #ifdef CONFIG_PPC_64K_PAGES
761         extern unsigned int *ht64_call_hpte_insert1;
762         extern unsigned int *ht64_call_hpte_insert2;
763         extern unsigned int *ht64_call_hpte_remove;
764         extern unsigned int *ht64_call_hpte_updatepp;
765
766         make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
767         make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
768         make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
769         make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
770 #endif /* CONFIG_PPC_64K_PAGES */
771
772         make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
773         make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
774         make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
775         make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
776 }