X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fmm%2Fhash_native_64.c;h=056d23a1b105f851757a082bcb04001e374a48b3;hb=aa0ce5bbc2dbb1853bd0c6d13f17716fcc38ac5a;hp=7b7fe2d7b9dc7c1e882984ab2b8ba17461b70617;hpb=71bf08b6c083df4ee97874d895f911529f4150dd;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 7b7fe2d..056d23a 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -27,6 +27,7 @@ #include #include #include +#include #ifdef DEBUG_LOW #define DBG_LOW(fmt...) udbg_printf(fmt) @@ -38,7 +39,7 @@ static DEFINE_SPINLOCK(native_tlbie_lock); -static inline void __tlbie(unsigned long va, unsigned int psize) +static inline void __tlbie(unsigned long va, int psize, int ssize) { unsigned int penc; @@ -48,18 +49,27 @@ static inline void __tlbie(unsigned long va, unsigned int psize) switch (psize) { case MMU_PAGE_4K: va &= ~0xffful; - asm volatile("tlbie %0,0" : : "r" (va) : "memory"); + va |= ssize << 8; + asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), + %2) + : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206) + : "memory"); break; default: penc = mmu_psize_defs[psize].penc; va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); va |= penc << 12; - asm volatile("tlbie %0,1" : : "r" (va) : "memory"); + va |= ssize << 8; + va |= 1; /* L */ + asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), + %2) + : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206) + : "memory"); break; } } -static inline void __tlbiel(unsigned long va, unsigned int psize) +static inline void __tlbiel(unsigned long va, int psize, int ssize) { unsigned int penc; @@ -69,6 +79,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize) switch (psize) { case MMU_PAGE_4K: va &= ~0xffful; + va |= ssize << 8; asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" : : "r"(va) : "memory"); break; @@ -76,6 +87,8 @@ static inline void __tlbiel(unsigned long va, unsigned int psize) penc = mmu_psize_defs[psize].penc; va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); va |= penc << 12; + va |= ssize << 8; + va |= 1; /* L */ asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" : : "r"(va) : "memory"); break; @@ -83,7 +96,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize) } -static inline void tlbie(unsigned long va, int psize, int local) +static inline void tlbie(unsigned long va, int psize, int ssize, int local) { unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL); int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); @@ -94,17 +107,17 @@ static inline void tlbie(unsigned long va, int psize, int local) spin_lock(&native_tlbie_lock); asm volatile("ptesync": : :"memory"); if (use_local) { - __tlbiel(va, psize); + __tlbiel(va, psize, ssize); asm volatile("ptesync": : :"memory"); } else { - __tlbie(va, psize); + __tlbie(va, psize, ssize); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } if (lock_tlbie && !use_local) spin_unlock(&native_tlbie_lock); } -static inline void native_lock_hpte(hpte_t *hptep) +static inline void native_lock_hpte(struct hash_pte *hptep) { unsigned long *word = &hptep->v; @@ -116,7 +129,7 @@ static inline void native_lock_hpte(hpte_t *hptep) } } -static inline void native_unlock_hpte(hpte_t *hptep) +static inline void native_unlock_hpte(struct hash_pte *hptep) { unsigned long *word = &hptep->v; @@ -126,9 +139,9 @@ static inline void native_unlock_hpte(hpte_t *hptep) static long native_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long pa, unsigned long rflags, - unsigned long vflags, int psize) + unsigned long vflags, int psize, int ssize) { - hpte_t *hptep = htab_address + hpte_group; + struct hash_pte *hptep = htab_address + hpte_group; unsigned long hpte_v, hpte_r; int i; @@ -153,7 +166,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va, if (i == HPTES_PER_GROUP) return -1; - hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID; + hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize) | rflags; if (!(vflags & HPTE_V_BOLTED)) { @@ -163,7 +176,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va, hptep->r = hpte_r; /* Guarantee the second dword is visible before the valid bit */ - __asm__ __volatile__ ("eieio" : : : "memory"); + eieio(); /* * Now set the first dword including the valid bit * NOTE: this also unlocks the hpte @@ -177,7 +190,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va, static long native_hpte_remove(unsigned long hpte_group) { - hpte_t *hptep; + struct hash_pte *hptep; int i; int slot_offset; unsigned long hpte_v; @@ -215,13 +228,14 @@ static long native_hpte_remove(unsigned long hpte_group) } static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long va, int psize, int local) + unsigned long va, int psize, int ssize, + int local) { - hpte_t *hptep = htab_address + slot; + struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v, want_v; int ret = 0; - want_v = hpte_encode_v(va, psize); + want_v = hpte_encode_v(va, psize, ssize); DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", va, want_v & HPTE_V_AVPN, slot, newpp); @@ -233,50 +247,42 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, /* Even if we miss, we need to invalidate the TLB */ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { DBG_LOW(" -> miss\n"); - native_unlock_hpte(hptep); ret = -1; } else { DBG_LOW(" -> hit\n"); /* Update the HPTE */ hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); - native_unlock_hpte(hptep); } + native_unlock_hpte(hptep); /* Ensure it is out of the tlb too. */ - tlbie(va, psize, local); + tlbie(va, psize, ssize, local); return ret; } -static long native_hpte_find(unsigned long va, int psize) +static long native_hpte_find(unsigned long va, int psize, int ssize) { - hpte_t *hptep; + struct hash_pte *hptep; unsigned long hash; - unsigned long i, j; + unsigned long i; long slot; unsigned long want_v, hpte_v; - hash = hpt_hash(va, mmu_psize_defs[psize].shift); - want_v = hpte_encode_v(va, psize); + hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); + want_v = hpte_encode_v(va, psize, ssize); - for (j = 0; j < 2; j++) { - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - for (i = 0; i < HPTES_PER_GROUP; i++) { - hptep = htab_address + slot; - hpte_v = hptep->v; + /* Bolted mappings are only ever in the primary group */ + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + for (i = 0; i < HPTES_PER_GROUP; i++) { + hptep = htab_address + slot; + hpte_v = hptep->v; - if (HPTE_V_COMPARE(hpte_v, want_v) - && (hpte_v & HPTE_V_VALID) - && ( !!(hpte_v & HPTE_V_SECONDARY) == j)) { - /* HPTE matches */ - if (j) - slot = -slot; - return slot; - } - ++slot; - } - hash = ~hash; + if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) + /* HPTE matches */ + return slot; + ++slot; } return -1; @@ -290,16 +296,16 @@ static long native_hpte_find(unsigned long va, int psize) * No need to lock here because we should be the only user. */ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, - int psize) + int psize, int ssize) { unsigned long vsid, va; long slot; - hpte_t *hptep; + struct hash_pte *hptep; - vsid = get_kernel_vsid(ea); - va = (vsid << 28) | (ea & 0x0fffffff); + vsid = get_kernel_vsid(ea, ssize); + va = hpt_va(ea, vsid, ssize); - slot = native_hpte_find(va, psize); + slot = native_hpte_find(va, psize, ssize); if (slot == -1) panic("could not find page to bolt\n"); hptep = htab_address + slot; @@ -309,13 +315,13 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, (newpp & (HPTE_R_PP | HPTE_R_N)); /* Ensure it is out of the tlb too. */ - tlbie(va, psize, 0); + tlbie(va, psize, ssize, 0); } static void native_hpte_invalidate(unsigned long slot, unsigned long va, - int psize, int local) + int psize, int ssize, int local) { - hpte_t *hptep = htab_address + slot; + struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v; unsigned long want_v; unsigned long flags; @@ -324,7 +330,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); - want_v = hpte_encode_v(va, psize); + want_v = hpte_encode_v(va, psize, ssize); native_lock_hpte(hptep); hpte_v = hptep->v; @@ -336,7 +342,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, hptep->v = 0; /* Invalidate the TLB */ - tlbie(va, psize, local); + tlbie(va, psize, ssize, local); local_irq_restore(flags); } @@ -345,13 +351,13 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, #define LP_BITS 8 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) -static void hpte_decode(hpte_t *hpte, unsigned long slot, - int *psize, unsigned long *va) +static void hpte_decode(struct hash_pte *hpte, unsigned long slot, + int *psize, int *ssize, unsigned long *va) { unsigned long hpte_r = hpte->r; unsigned long hpte_v = hpte->v; unsigned long avpn; - int i, size, shift, penc, avpnm_bits; + int i, size, shift, penc; if (!(hpte_v & HPTE_V_LARGE)) size = MMU_PAGE_4K; @@ -376,35 +382,33 @@ static void hpte_decode(hpte_t *hpte, unsigned long slot, } } - /* - * FIXME, the code below works for 16M, 64K, and 4K pages as these - * fall under the p<=23 rules for calculating the virtual address. - * In the case of 16M pages, an extra bit is stolen from the AVPN - * field to achieve the requisite 24 bits. - * - * Does not work for 16G pages or 1 TB segments. - */ + /* This works for all page sizes, and for 256M and 1T segments */ shift = mmu_psize_defs[size].shift; - if (mmu_psize_defs[size].avpnm) - avpnm_bits = __ilog2_u64(mmu_psize_defs[size].avpnm) + 1; - else - avpnm_bits = 0; - if (shift - avpnm_bits <= 23) { - avpn = HPTE_V_AVPN_VAL(hpte_v) << 23; + avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23; - if (shift < 23) { - unsigned long vpi, pteg; + if (shift < 23) { + unsigned long vpi, vsid, pteg; - pteg = slot / HPTES_PER_GROUP; - if (hpte_v & HPTE_V_SECONDARY) - pteg = ~pteg; + pteg = slot / HPTES_PER_GROUP; + if (hpte_v & HPTE_V_SECONDARY) + pteg = ~pteg; + switch (hpte_v >> HPTE_V_SSIZE_SHIFT) { + case MMU_SEGSIZE_256M: vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask; - avpn |= (vpi << mmu_psize_defs[size].shift); + break; + case MMU_SEGSIZE_1T: + vsid = avpn >> 40; + vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; + break; + default: + avpn = vpi = size = 0; } + avpn |= (vpi << mmu_psize_defs[size].shift); } *va = avpn; *psize = size; + *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; } /* @@ -418,10 +422,10 @@ static void hpte_decode(hpte_t *hpte, unsigned long slot, static void native_hpte_clear(void) { unsigned long slot, slots, flags; - hpte_t *hptep = htab_address; + struct hash_pte *hptep = htab_address; unsigned long hpte_v, va; unsigned long pteg_count; - int psize; + int psize, ssize; pteg_count = htab_hash_mask + 1; @@ -447,9 +451,9 @@ static void native_hpte_clear(void) * already hold the native_tlbie_lock. */ if (hpte_v & HPTE_V_VALID) { - hpte_decode(hptep, slot, &psize, &va); + hpte_decode(hptep, slot, &psize, &ssize, &va); hptep->v = 0; - __tlbie(va, psize); + __tlbie(va, psize, ssize); } } @@ -465,13 +469,14 @@ static void native_hpte_clear(void) static void native_flush_hash_range(unsigned long number, int local) { unsigned long va, hash, index, hidx, shift, slot; - hpte_t *hptep; + struct hash_pte *hptep; unsigned long hpte_v; unsigned long want_v; unsigned long flags; real_pte_t pte; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); unsigned long psize = batch->psize; + int ssize = batch->ssize; int i; local_irq_save(flags); @@ -481,14 +486,14 @@ static void native_flush_hash_range(unsigned long number, int local) pte = batch->pte[i]; pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - hash = hpt_hash(va, shift); + hash = hpt_hash(va, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; hptep = htab_address + slot; - want_v = hpte_encode_v(va, psize); + want_v = hpte_encode_v(va, psize, ssize); native_lock_hpte(hptep); hpte_v = hptep->v; if (!HPTE_V_COMPARE(hpte_v, want_v) || @@ -508,7 +513,7 @@ static void native_flush_hash_range(unsigned long number, int local) pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - __tlbiel(va, psize); + __tlbiel(va, psize, ssize); } pte_iterate_hashed_end(); } asm volatile("ptesync":::"memory"); @@ -525,7 +530,7 @@ static void native_flush_hash_range(unsigned long number, int local) pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - __tlbie(va, psize); + __tlbie(va, psize, ssize); } pte_iterate_hashed_end(); } asm volatile("eieio; tlbsync; ptesync":::"memory");