2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
23 #include <asm/kvm_ppc.h>
24 #include <asm/kvm_book3s.h>
25 #include <asm/mmu-hash32.h>
26 #include <asm/machdep.h>
27 #include <asm/mmu_context.h>
28 #include <asm/hw_irq.h>
30 /* #define DEBUG_MMU */
31 /* #define DEBUG_SR */
34 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
36 #define dprintk_mmu(a, ...) do { } while(0)
40 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
42 #define dprintk_sr(a, ...) do { } while(0)
46 #error Unknown page size
50 #error XXX need to grab mmu_hash_lock
53 #ifdef CONFIG_PTE_64BIT
54 #error Only 32 bit pages are supported for now
57 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
61 dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n",
62 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
64 pteg = (u32*)pte->slot;
67 asm volatile ("sync");
68 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
69 asm volatile ("sync");
70 asm volatile ("tlbsync");
74 if (pte->pte.may_write)
75 kvm_release_pfn_dirty(pte->pfn);
77 kvm_release_pfn_clean(pte->pfn);
80 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
84 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
85 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
86 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
89 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
90 struct hpte_cache *pte;
92 pte = &vcpu->arch.hpte_cache[i];
96 if ((pte->pte.eaddr & ea_mask) == guest_ea) {
97 invalidate_pte(vcpu, pte);
101 /* Doing a complete flush -> start from scratch */
103 vcpu->arch.hpte_cache_offset = 0;
106 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
110 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
111 vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
112 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
115 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
116 struct hpte_cache *pte;
118 pte = &vcpu->arch.hpte_cache[i];
122 if ((pte->pte.vpage & vp_mask) == guest_vp) {
123 invalidate_pte(vcpu, pte);
128 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
132 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
133 vcpu->arch.hpte_cache_offset, pa_start, pa_end);
134 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
136 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
137 struct hpte_cache *pte;
139 pte = &vcpu->arch.hpte_cache[i];
143 if ((pte->pte.raddr >= pa_start) &&
144 (pte->pte.raddr < pa_end)) {
145 invalidate_pte(vcpu, pte);
150 struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data)
155 guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false);
156 for (i=0; i<vcpu->arch.hpte_cache_offset; i++) {
157 struct hpte_cache *pte;
159 pte = &vcpu->arch.hpte_cache[i];
163 if (pte->pte.vpage == guest_vp)
170 static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
172 if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
173 kvmppc_mmu_pte_flush(vcpu, 0, 0);
175 return vcpu->arch.hpte_cache_offset++;
178 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
179 * a hash, so we don't waste cycles on looping */
180 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
182 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
183 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
184 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
185 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
186 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
187 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
188 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
189 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
193 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
195 struct kvmppc_sid_map *map;
198 if (vcpu->arch.msr & MSR_PR)
201 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
202 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
203 if (map->guest_vsid == gvsid) {
204 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
205 gvsid, map->host_vsid);
209 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
210 if (map->guest_vsid == gvsid) {
211 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
212 gvsid, map->host_vsid);
216 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
220 extern struct hash_pte *Hash;
221 extern unsigned long _SDR1;
223 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
226 u32 page, hash, htabmask;
227 ulong pteg = (ulong)Hash;
229 page = (eaddr & ~ESID_MASK) >> 12;
231 hash = ((vsid ^ page) << 6);
235 htabmask = ((_SDR1 & 0x1FF) << 16) | 0xFFC0;
240 dprintk_mmu("htab: %p | hash: %x | htabmask: %x | pteg: %lx\n",
241 Hash, hash, htabmask, pteg);
248 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
253 struct kvmppc_sid_map *map;
255 u32 eaddr = orig_pte->eaddr;
258 bool primary = false;
261 struct hpte_cache *pte;
263 /* Get host physical address for gpa */
264 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
265 if (kvm_is_error_hva(hpaddr)) {
266 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
270 hpaddr <<= PAGE_SHIFT;
272 /* and write the mapping ea -> hpa into the pt */
273 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
274 map = find_sid_vsid(vcpu, vsid);
276 kvmppc_mmu_map_segment(vcpu, eaddr);
277 map = find_sid_vsid(vcpu, vsid);
281 vsid = map->host_vsid;
282 va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK);
291 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
293 /* not evicting yet */
294 if (!evict && (pteg[rr] & PTE_V)) {
299 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
300 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
301 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
302 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
303 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
304 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
305 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
306 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
307 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
309 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
310 (primary ? 0 : PTE_SEC);
311 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
313 if (orig_pte->may_write) {
315 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
324 asm volatile ("sync");
326 pteg[rr + 1] = pteg1;
328 asm volatile ("sync");
332 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
333 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
334 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
335 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
336 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
337 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
338 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
339 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
340 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
343 /* Now tell our Shadow PTE code about the new page */
345 hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
346 pte = &vcpu->arch.hpte_cache[hpte_id];
348 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
349 orig_pte->may_write ? 'w' : '-',
350 orig_pte->may_execute ? 'x' : '-',
351 orig_pte->eaddr, (ulong)pteg, va,
352 orig_pte->vpage, hpaddr);
354 pte->slot = (ulong)&pteg[rr];
356 pte->pte = *orig_pte;
357 pte->pfn = hpaddr >> PAGE_SHIFT;
362 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
364 struct kvmppc_sid_map *map;
365 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
367 static int backwards_map = 0;
369 if (vcpu->arch.msr & MSR_PR)
372 /* We might get collisions that trap in preceding order, so let's
373 map them differently */
375 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
377 sid_map_mask = SID_MAP_MASK - sid_map_mask;
379 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
381 /* Make sure we're taking the other map next time */
382 backwards_map = !backwards_map;
384 /* Uh-oh ... out of mappings. Let's flush! */
385 if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) {
386 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
387 memset(vcpu_book3s->sid_map, 0,
388 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
389 kvmppc_mmu_pte_flush(vcpu, 0, 0);
390 kvmppc_mmu_flush_segments(vcpu);
392 map->host_vsid = vcpu_book3s->vsid_next;
394 /* Would have to be 111 to be completely aligned with the rest of
395 Linux, but that is just way too little space! */
396 vcpu_book3s->vsid_next+=1;
398 map->guest_vsid = gvsid;
404 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
406 u32 esid = eaddr >> SID_SHIFT;
409 struct kvmppc_sid_map *map;
410 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
412 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
413 /* Invalidate an entry */
414 svcpu->sr[esid] = SR_INVALID;
418 map = find_sid_vsid(vcpu, gvsid);
420 map = create_sid_map(vcpu, gvsid);
422 map->guest_esid = esid;
423 sr = map->host_vsid | SR_KP;
424 svcpu->sr[esid] = sr;
426 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
431 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
434 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
436 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
437 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
438 svcpu->sr[i] = SR_INVALID;
441 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
443 kvmppc_mmu_pte_flush(vcpu, 0, 0);
445 __destroy_context(to_book3s(vcpu)->context_id);
449 /* From mm/mmu_context_hash32.c */
450 #define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff)
452 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
454 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
457 err = __init_new_context();
460 vcpu3s->context_id = err;
462 vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1;
463 vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id);
465 #if 0 /* XXX still doesn't guarantee uniqueness */
466 /* We could collide with the Linux vsid space because the vsid
467 * wraps around at 24 bits. We're safe if we do our own space
468 * though, so let's always set the highest bit. */
470 vcpu3s->vsid_max |= 0x00800000;
471 vcpu3s->vsid_first |= 0x00800000;
473 BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first);
475 vcpu3s->vsid_next = vcpu3s->vsid_first;