KVM: Add instruction emulation statistics
[safe/jmp/linux-2.6] / drivers / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "vmx.h"
21 #include "kvm.h"
22 #include "x86.h"
23
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29
30 #include <asm/page.h>
31 #include <asm/cmpxchg.h>
32
33 #undef MMU_DEBUG
34
35 #undef AUDIT
36
37 #ifdef AUDIT
38 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
39 #else
40 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
41 #endif
42
43 #ifdef MMU_DEBUG
44
45 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
46 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
47
48 #else
49
50 #define pgprintk(x...) do { } while (0)
51 #define rmap_printk(x...) do { } while (0)
52
53 #endif
54
55 #if defined(MMU_DEBUG) || defined(AUDIT)
56 static int dbg = 1;
57 #endif
58
59 #ifndef MMU_DEBUG
60 #define ASSERT(x) do { } while (0)
61 #else
62 #define ASSERT(x)                                                       \
63         if (!(x)) {                                                     \
64                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
65                        __FILE__, __LINE__, #x);                         \
66         }
67 #endif
68
69 #define PT64_PT_BITS 9
70 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
71 #define PT32_PT_BITS 10
72 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
73
74 #define PT_WRITABLE_SHIFT 1
75
76 #define PT_PRESENT_MASK (1ULL << 0)
77 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
78 #define PT_USER_MASK (1ULL << 2)
79 #define PT_PWT_MASK (1ULL << 3)
80 #define PT_PCD_MASK (1ULL << 4)
81 #define PT_ACCESSED_MASK (1ULL << 5)
82 #define PT_DIRTY_MASK (1ULL << 6)
83 #define PT_PAGE_SIZE_MASK (1ULL << 7)
84 #define PT_PAT_MASK (1ULL << 7)
85 #define PT_GLOBAL_MASK (1ULL << 8)
86 #define PT64_NX_MASK (1ULL << 63)
87
88 #define PT_PAT_SHIFT 7
89 #define PT_DIR_PAT_SHIFT 12
90 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
91
92 #define PT32_DIR_PSE36_SIZE 4
93 #define PT32_DIR_PSE36_SHIFT 13
94 #define PT32_DIR_PSE36_MASK \
95         (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
96
97
98 #define PT_FIRST_AVAIL_BITS_SHIFT 9
99 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
100
101 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
102
103 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
104
105 #define PT64_LEVEL_BITS 9
106
107 #define PT64_LEVEL_SHIFT(level) \
108                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
109
110 #define PT64_LEVEL_MASK(level) \
111                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
112
113 #define PT64_INDEX(address, level)\
114         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
115
116
117 #define PT32_LEVEL_BITS 10
118
119 #define PT32_LEVEL_SHIFT(level) \
120                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
121
122 #define PT32_LEVEL_MASK(level) \
123                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
124
125 #define PT32_INDEX(address, level)\
126         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
127
128
129 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
130 #define PT64_DIR_BASE_ADDR_MASK \
131         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
132
133 #define PT32_BASE_ADDR_MASK PAGE_MASK
134 #define PT32_DIR_BASE_ADDR_MASK \
135         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
136
137
138 #define PFERR_PRESENT_MASK (1U << 0)
139 #define PFERR_WRITE_MASK (1U << 1)
140 #define PFERR_USER_MASK (1U << 2)
141 #define PFERR_FETCH_MASK (1U << 4)
142
143 #define PT64_ROOT_LEVEL 4
144 #define PT32_ROOT_LEVEL 2
145 #define PT32E_ROOT_LEVEL 3
146
147 #define PT_DIRECTORY_LEVEL 2
148 #define PT_PAGE_TABLE_LEVEL 1
149
150 #define RMAP_EXT 4
151
152 struct kvm_rmap_desc {
153         u64 *shadow_ptes[RMAP_EXT];
154         struct kvm_rmap_desc *more;
155 };
156
157 static struct kmem_cache *pte_chain_cache;
158 static struct kmem_cache *rmap_desc_cache;
159 static struct kmem_cache *mmu_page_header_cache;
160
161 static u64 __read_mostly shadow_trap_nonpresent_pte;
162 static u64 __read_mostly shadow_notrap_nonpresent_pte;
163
164 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
165 {
166         shadow_trap_nonpresent_pte = trap_pte;
167         shadow_notrap_nonpresent_pte = notrap_pte;
168 }
169 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
170
171 static int is_write_protection(struct kvm_vcpu *vcpu)
172 {
173         return vcpu->cr0 & X86_CR0_WP;
174 }
175
176 static int is_cpuid_PSE36(void)
177 {
178         return 1;
179 }
180
181 static int is_nx(struct kvm_vcpu *vcpu)
182 {
183         return vcpu->shadow_efer & EFER_NX;
184 }
185
186 static int is_present_pte(unsigned long pte)
187 {
188         return pte & PT_PRESENT_MASK;
189 }
190
191 static int is_shadow_present_pte(u64 pte)
192 {
193         pte &= ~PT_SHADOW_IO_MARK;
194         return pte != shadow_trap_nonpresent_pte
195                 && pte != shadow_notrap_nonpresent_pte;
196 }
197
198 static int is_writeble_pte(unsigned long pte)
199 {
200         return pte & PT_WRITABLE_MASK;
201 }
202
203 static int is_dirty_pte(unsigned long pte)
204 {
205         return pte & PT_DIRTY_MASK;
206 }
207
208 static int is_io_pte(unsigned long pte)
209 {
210         return pte & PT_SHADOW_IO_MARK;
211 }
212
213 static int is_rmap_pte(u64 pte)
214 {
215         return pte != shadow_trap_nonpresent_pte
216                 && pte != shadow_notrap_nonpresent_pte;
217 }
218
219 static void set_shadow_pte(u64 *sptep, u64 spte)
220 {
221 #ifdef CONFIG_X86_64
222         set_64bit((unsigned long *)sptep, spte);
223 #else
224         set_64bit((unsigned long long *)sptep, spte);
225 #endif
226 }
227
228 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
229                                   struct kmem_cache *base_cache, int min)
230 {
231         void *obj;
232
233         if (cache->nobjs >= min)
234                 return 0;
235         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
236                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
237                 if (!obj)
238                         return -ENOMEM;
239                 cache->objects[cache->nobjs++] = obj;
240         }
241         return 0;
242 }
243
244 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
245 {
246         while (mc->nobjs)
247                 kfree(mc->objects[--mc->nobjs]);
248 }
249
250 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
251                                        int min)
252 {
253         struct page *page;
254
255         if (cache->nobjs >= min)
256                 return 0;
257         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
258                 page = alloc_page(GFP_KERNEL);
259                 if (!page)
260                         return -ENOMEM;
261                 set_page_private(page, 0);
262                 cache->objects[cache->nobjs++] = page_address(page);
263         }
264         return 0;
265 }
266
267 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
268 {
269         while (mc->nobjs)
270                 free_page((unsigned long)mc->objects[--mc->nobjs]);
271 }
272
273 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
274 {
275         int r;
276
277         kvm_mmu_free_some_pages(vcpu);
278         r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
279                                    pte_chain_cache, 4);
280         if (r)
281                 goto out;
282         r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
283                                    rmap_desc_cache, 1);
284         if (r)
285                 goto out;
286         r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
287         if (r)
288                 goto out;
289         r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
290                                    mmu_page_header_cache, 4);
291 out:
292         return r;
293 }
294
295 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
296 {
297         mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
298         mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
299         mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
300         mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
301 }
302
303 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
304                                     size_t size)
305 {
306         void *p;
307
308         BUG_ON(!mc->nobjs);
309         p = mc->objects[--mc->nobjs];
310         memset(p, 0, size);
311         return p;
312 }
313
314 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
315 {
316         return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
317                                       sizeof(struct kvm_pte_chain));
318 }
319
320 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
321 {
322         kfree(pc);
323 }
324
325 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
326 {
327         return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
328                                       sizeof(struct kvm_rmap_desc));
329 }
330
331 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
332 {
333         kfree(rd);
334 }
335
336 /*
337  * Take gfn and return the reverse mapping to it.
338  * Note: gfn must be unaliased before this function get called
339  */
340
341 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
342 {
343         struct kvm_memory_slot *slot;
344
345         slot = gfn_to_memslot(kvm, gfn);
346         return &slot->rmap[gfn - slot->base_gfn];
347 }
348
349 /*
350  * Reverse mapping data structures:
351  *
352  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
353  * that points to page_address(page).
354  *
355  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
356  * containing more mappings.
357  */
358 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
359 {
360         struct kvm_mmu_page *page;
361         struct kvm_rmap_desc *desc;
362         unsigned long *rmapp;
363         int i;
364
365         if (!is_rmap_pte(*spte))
366                 return;
367         gfn = unalias_gfn(vcpu->kvm, gfn);
368         page = page_header(__pa(spte));
369         page->gfns[spte - page->spt] = gfn;
370         rmapp = gfn_to_rmap(vcpu->kvm, gfn);
371         if (!*rmapp) {
372                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
373                 *rmapp = (unsigned long)spte;
374         } else if (!(*rmapp & 1)) {
375                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
376                 desc = mmu_alloc_rmap_desc(vcpu);
377                 desc->shadow_ptes[0] = (u64 *)*rmapp;
378                 desc->shadow_ptes[1] = spte;
379                 *rmapp = (unsigned long)desc | 1;
380         } else {
381                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
382                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
383                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
384                         desc = desc->more;
385                 if (desc->shadow_ptes[RMAP_EXT-1]) {
386                         desc->more = mmu_alloc_rmap_desc(vcpu);
387                         desc = desc->more;
388                 }
389                 for (i = 0; desc->shadow_ptes[i]; ++i)
390                         ;
391                 desc->shadow_ptes[i] = spte;
392         }
393 }
394
395 static void rmap_desc_remove_entry(unsigned long *rmapp,
396                                    struct kvm_rmap_desc *desc,
397                                    int i,
398                                    struct kvm_rmap_desc *prev_desc)
399 {
400         int j;
401
402         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
403                 ;
404         desc->shadow_ptes[i] = desc->shadow_ptes[j];
405         desc->shadow_ptes[j] = NULL;
406         if (j != 0)
407                 return;
408         if (!prev_desc && !desc->more)
409                 *rmapp = (unsigned long)desc->shadow_ptes[0];
410         else
411                 if (prev_desc)
412                         prev_desc->more = desc->more;
413                 else
414                         *rmapp = (unsigned long)desc->more | 1;
415         mmu_free_rmap_desc(desc);
416 }
417
418 static void rmap_remove(struct kvm *kvm, u64 *spte)
419 {
420         struct kvm_rmap_desc *desc;
421         struct kvm_rmap_desc *prev_desc;
422         struct kvm_mmu_page *page;
423         unsigned long *rmapp;
424         int i;
425
426         if (!is_rmap_pte(*spte))
427                 return;
428         page = page_header(__pa(spte));
429         kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >>
430                          PAGE_SHIFT));
431         rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
432         if (!*rmapp) {
433                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
434                 BUG();
435         } else if (!(*rmapp & 1)) {
436                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
437                 if ((u64 *)*rmapp != spte) {
438                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
439                                spte, *spte);
440                         BUG();
441                 }
442                 *rmapp = 0;
443         } else {
444                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
445                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
446                 prev_desc = NULL;
447                 while (desc) {
448                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
449                                 if (desc->shadow_ptes[i] == spte) {
450                                         rmap_desc_remove_entry(rmapp,
451                                                                desc, i,
452                                                                prev_desc);
453                                         return;
454                                 }
455                         prev_desc = desc;
456                         desc = desc->more;
457                 }
458                 BUG();
459         }
460 }
461
462 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
463 {
464         struct kvm_rmap_desc *desc;
465         struct kvm_rmap_desc *prev_desc;
466         u64 *prev_spte;
467         int i;
468
469         if (!*rmapp)
470                 return NULL;
471         else if (!(*rmapp & 1)) {
472                 if (!spte)
473                         return (u64 *)*rmapp;
474                 return NULL;
475         }
476         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
477         prev_desc = NULL;
478         prev_spte = NULL;
479         while (desc) {
480                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
481                         if (prev_spte == spte)
482                                 return desc->shadow_ptes[i];
483                         prev_spte = desc->shadow_ptes[i];
484                 }
485                 desc = desc->more;
486         }
487         return NULL;
488 }
489
490 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
491 {
492         unsigned long *rmapp;
493         u64 *spte;
494
495         gfn = unalias_gfn(kvm, gfn);
496         rmapp = gfn_to_rmap(kvm, gfn);
497
498         spte = rmap_next(kvm, rmapp, NULL);
499         while (spte) {
500                 BUG_ON(!spte);
501                 BUG_ON(!(*spte & PT_PRESENT_MASK));
502                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
503                 if (is_writeble_pte(*spte))
504                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
505                 kvm_flush_remote_tlbs(kvm);
506                 spte = rmap_next(kvm, rmapp, spte);
507         }
508 }
509
510 #ifdef MMU_DEBUG
511 static int is_empty_shadow_page(u64 *spt)
512 {
513         u64 *pos;
514         u64 *end;
515
516         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
517                 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
518                         printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
519                                pos, *pos);
520                         return 0;
521                 }
522         return 1;
523 }
524 #endif
525
526 static void kvm_mmu_free_page(struct kvm *kvm,
527                               struct kvm_mmu_page *page_head)
528 {
529         ASSERT(is_empty_shadow_page(page_head->spt));
530         list_del(&page_head->link);
531         __free_page(virt_to_page(page_head->spt));
532         __free_page(virt_to_page(page_head->gfns));
533         kfree(page_head);
534         ++kvm->n_free_mmu_pages;
535 }
536
537 static unsigned kvm_page_table_hashfn(gfn_t gfn)
538 {
539         return gfn;
540 }
541
542 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
543                                                u64 *parent_pte)
544 {
545         struct kvm_mmu_page *page;
546
547         if (!vcpu->kvm->n_free_mmu_pages)
548                 return NULL;
549
550         page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
551                                       sizeof *page);
552         page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
553         page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
554         set_page_private(virt_to_page(page->spt), (unsigned long)page);
555         list_add(&page->link, &vcpu->kvm->active_mmu_pages);
556         ASSERT(is_empty_shadow_page(page->spt));
557         page->slot_bitmap = 0;
558         page->multimapped = 0;
559         page->parent_pte = parent_pte;
560         --vcpu->kvm->n_free_mmu_pages;
561         return page;
562 }
563
564 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
565                                     struct kvm_mmu_page *page, u64 *parent_pte)
566 {
567         struct kvm_pte_chain *pte_chain;
568         struct hlist_node *node;
569         int i;
570
571         if (!parent_pte)
572                 return;
573         if (!page->multimapped) {
574                 u64 *old = page->parent_pte;
575
576                 if (!old) {
577                         page->parent_pte = parent_pte;
578                         return;
579                 }
580                 page->multimapped = 1;
581                 pte_chain = mmu_alloc_pte_chain(vcpu);
582                 INIT_HLIST_HEAD(&page->parent_ptes);
583                 hlist_add_head(&pte_chain->link, &page->parent_ptes);
584                 pte_chain->parent_ptes[0] = old;
585         }
586         hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
587                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
588                         continue;
589                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
590                         if (!pte_chain->parent_ptes[i]) {
591                                 pte_chain->parent_ptes[i] = parent_pte;
592                                 return;
593                         }
594         }
595         pte_chain = mmu_alloc_pte_chain(vcpu);
596         BUG_ON(!pte_chain);
597         hlist_add_head(&pte_chain->link, &page->parent_ptes);
598         pte_chain->parent_ptes[0] = parent_pte;
599 }
600
601 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
602                                        u64 *parent_pte)
603 {
604         struct kvm_pte_chain *pte_chain;
605         struct hlist_node *node;
606         int i;
607
608         if (!page->multimapped) {
609                 BUG_ON(page->parent_pte != parent_pte);
610                 page->parent_pte = NULL;
611                 return;
612         }
613         hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
614                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
615                         if (!pte_chain->parent_ptes[i])
616                                 break;
617                         if (pte_chain->parent_ptes[i] != parent_pte)
618                                 continue;
619                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
620                                 && pte_chain->parent_ptes[i + 1]) {
621                                 pte_chain->parent_ptes[i]
622                                         = pte_chain->parent_ptes[i + 1];
623                                 ++i;
624                         }
625                         pte_chain->parent_ptes[i] = NULL;
626                         if (i == 0) {
627                                 hlist_del(&pte_chain->link);
628                                 mmu_free_pte_chain(pte_chain);
629                                 if (hlist_empty(&page->parent_ptes)) {
630                                         page->multimapped = 0;
631                                         page->parent_pte = NULL;
632                                 }
633                         }
634                         return;
635                 }
636         BUG();
637 }
638
639 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
640                                                 gfn_t gfn)
641 {
642         unsigned index;
643         struct hlist_head *bucket;
644         struct kvm_mmu_page *page;
645         struct hlist_node *node;
646
647         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
648         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
649         bucket = &kvm->mmu_page_hash[index];
650         hlist_for_each_entry(page, node, bucket, hash_link)
651                 if (page->gfn == gfn && !page->role.metaphysical) {
652                         pgprintk("%s: found role %x\n",
653                                  __FUNCTION__, page->role.word);
654                         return page;
655                 }
656         return NULL;
657 }
658
659 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
660                                              gfn_t gfn,
661                                              gva_t gaddr,
662                                              unsigned level,
663                                              int metaphysical,
664                                              unsigned hugepage_access,
665                                              u64 *parent_pte)
666 {
667         union kvm_mmu_page_role role;
668         unsigned index;
669         unsigned quadrant;
670         struct hlist_head *bucket;
671         struct kvm_mmu_page *page;
672         struct hlist_node *node;
673
674         role.word = 0;
675         role.glevels = vcpu->mmu.root_level;
676         role.level = level;
677         role.metaphysical = metaphysical;
678         role.hugepage_access = hugepage_access;
679         if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
680                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
681                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
682                 role.quadrant = quadrant;
683         }
684         pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
685                  gfn, role.word);
686         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
687         bucket = &vcpu->kvm->mmu_page_hash[index];
688         hlist_for_each_entry(page, node, bucket, hash_link)
689                 if (page->gfn == gfn && page->role.word == role.word) {
690                         mmu_page_add_parent_pte(vcpu, page, parent_pte);
691                         pgprintk("%s: found\n", __FUNCTION__);
692                         return page;
693                 }
694         page = kvm_mmu_alloc_page(vcpu, parent_pte);
695         if (!page)
696                 return page;
697         pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
698         page->gfn = gfn;
699         page->role = role;
700         hlist_add_head(&page->hash_link, bucket);
701         vcpu->mmu.prefetch_page(vcpu, page);
702         if (!metaphysical)
703                 rmap_write_protect(vcpu->kvm, gfn);
704         return page;
705 }
706
707 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
708                                          struct kvm_mmu_page *page)
709 {
710         unsigned i;
711         u64 *pt;
712         u64 ent;
713
714         pt = page->spt;
715
716         if (page->role.level == PT_PAGE_TABLE_LEVEL) {
717                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
718                         if (is_shadow_present_pte(pt[i]))
719                                 rmap_remove(kvm, &pt[i]);
720                         pt[i] = shadow_trap_nonpresent_pte;
721                 }
722                 kvm_flush_remote_tlbs(kvm);
723                 return;
724         }
725
726         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
727                 ent = pt[i];
728
729                 pt[i] = shadow_trap_nonpresent_pte;
730                 if (!is_shadow_present_pte(ent))
731                         continue;
732                 ent &= PT64_BASE_ADDR_MASK;
733                 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
734         }
735         kvm_flush_remote_tlbs(kvm);
736 }
737
738 static void kvm_mmu_put_page(struct kvm_mmu_page *page,
739                              u64 *parent_pte)
740 {
741         mmu_page_remove_parent_pte(page, parent_pte);
742 }
743
744 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
745 {
746         int i;
747
748         for (i = 0; i < KVM_MAX_VCPUS; ++i)
749                 if (kvm->vcpus[i])
750                         kvm->vcpus[i]->last_pte_updated = NULL;
751 }
752
753 static void kvm_mmu_zap_page(struct kvm *kvm,
754                              struct kvm_mmu_page *page)
755 {
756         u64 *parent_pte;
757
758         while (page->multimapped || page->parent_pte) {
759                 if (!page->multimapped)
760                         parent_pte = page->parent_pte;
761                 else {
762                         struct kvm_pte_chain *chain;
763
764                         chain = container_of(page->parent_ptes.first,
765                                              struct kvm_pte_chain, link);
766                         parent_pte = chain->parent_ptes[0];
767                 }
768                 BUG_ON(!parent_pte);
769                 kvm_mmu_put_page(page, parent_pte);
770                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
771         }
772         kvm_mmu_page_unlink_children(kvm, page);
773         if (!page->root_count) {
774                 hlist_del(&page->hash_link);
775                 kvm_mmu_free_page(kvm, page);
776         } else
777                 list_move(&page->link, &kvm->active_mmu_pages);
778         kvm_mmu_reset_last_pte_updated(kvm);
779 }
780
781 /*
782  * Changing the number of mmu pages allocated to the vm
783  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
784  */
785 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
786 {
787         /*
788          * If we set the number of mmu pages to be smaller be than the
789          * number of actived pages , we must to free some mmu pages before we
790          * change the value
791          */
792
793         if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
794             kvm_nr_mmu_pages) {
795                 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
796                                        - kvm->n_free_mmu_pages;
797
798                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
799                         struct kvm_mmu_page *page;
800
801                         page = container_of(kvm->active_mmu_pages.prev,
802                                             struct kvm_mmu_page, link);
803                         kvm_mmu_zap_page(kvm, page);
804                         n_used_mmu_pages--;
805                 }
806                 kvm->n_free_mmu_pages = 0;
807         }
808         else
809                 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
810                                          - kvm->n_alloc_mmu_pages;
811
812         kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
813 }
814
815 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
816 {
817         unsigned index;
818         struct hlist_head *bucket;
819         struct kvm_mmu_page *page;
820         struct hlist_node *node, *n;
821         int r;
822
823         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
824         r = 0;
825         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
826         bucket = &kvm->mmu_page_hash[index];
827         hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
828                 if (page->gfn == gfn && !page->role.metaphysical) {
829                         pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
830                                  page->role.word);
831                         kvm_mmu_zap_page(kvm, page);
832                         r = 1;
833                 }
834         return r;
835 }
836
837 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
838 {
839         struct kvm_mmu_page *page;
840
841         while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
842                 pgprintk("%s: zap %lx %x\n",
843                          __FUNCTION__, gfn, page->role.word);
844                 kvm_mmu_zap_page(kvm, page);
845         }
846 }
847
848 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
849 {
850         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
851         struct kvm_mmu_page *page_head = page_header(__pa(pte));
852
853         __set_bit(slot, &page_head->slot_bitmap);
854 }
855
856 hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
857 {
858         struct page *page;
859         hpa_t hpa;
860
861         ASSERT((gpa & HPA_ERR_MASK) == 0);
862         page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
863         hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1));
864         if (is_error_page(page))
865                 return hpa | HPA_ERR_MASK;
866         return hpa;
867 }
868
869 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
870 {
871         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
872
873         if (gpa == UNMAPPED_GVA)
874                 return UNMAPPED_GVA;
875         return gpa_to_hpa(vcpu->kvm, gpa);
876 }
877
878 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
879 {
880         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
881
882         if (gpa == UNMAPPED_GVA)
883                 return NULL;
884         return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
885 }
886
887 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
888 {
889 }
890
891 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
892 {
893         int level = PT32E_ROOT_LEVEL;
894         hpa_t table_addr = vcpu->mmu.root_hpa;
895
896         for (; ; level--) {
897                 u32 index = PT64_INDEX(v, level);
898                 u64 *table;
899                 u64 pte;
900
901                 ASSERT(VALID_PAGE(table_addr));
902                 table = __va(table_addr);
903
904                 if (level == 1) {
905                         int was_rmapped;
906
907                         pte = table[index];
908                         was_rmapped = is_rmap_pte(pte);
909                         if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
910                                 return 0;
911                         mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
912                         page_header_update_slot(vcpu->kvm, table, v);
913                         table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
914                                                                 PT_USER_MASK;
915                         if (!was_rmapped)
916                                 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
917                         else
918                                 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
919                         return 0;
920                 }
921
922                 if (table[index] == shadow_trap_nonpresent_pte) {
923                         struct kvm_mmu_page *new_table;
924                         gfn_t pseudo_gfn;
925
926                         pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
927                                 >> PAGE_SHIFT;
928                         new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
929                                                      v, level - 1,
930                                                      1, 3, &table[index]);
931                         if (!new_table) {
932                                 pgprintk("nonpaging_map: ENOMEM\n");
933                                 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
934                                 return -ENOMEM;
935                         }
936
937                         table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
938                                 | PT_WRITABLE_MASK | PT_USER_MASK;
939                 }
940                 table_addr = table[index] & PT64_BASE_ADDR_MASK;
941         }
942 }
943
944 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
945                                     struct kvm_mmu_page *sp)
946 {
947         int i;
948
949         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
950                 sp->spt[i] = shadow_trap_nonpresent_pte;
951 }
952
953 static void mmu_free_roots(struct kvm_vcpu *vcpu)
954 {
955         int i;
956         struct kvm_mmu_page *page;
957
958         if (!VALID_PAGE(vcpu->mmu.root_hpa))
959                 return;
960 #ifdef CONFIG_X86_64
961         if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
962                 hpa_t root = vcpu->mmu.root_hpa;
963
964                 page = page_header(root);
965                 --page->root_count;
966                 vcpu->mmu.root_hpa = INVALID_PAGE;
967                 return;
968         }
969 #endif
970         for (i = 0; i < 4; ++i) {
971                 hpa_t root = vcpu->mmu.pae_root[i];
972
973                 if (root) {
974                         root &= PT64_BASE_ADDR_MASK;
975                         page = page_header(root);
976                         --page->root_count;
977                 }
978                 vcpu->mmu.pae_root[i] = INVALID_PAGE;
979         }
980         vcpu->mmu.root_hpa = INVALID_PAGE;
981 }
982
983 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
984 {
985         int i;
986         gfn_t root_gfn;
987         struct kvm_mmu_page *page;
988
989         root_gfn = vcpu->cr3 >> PAGE_SHIFT;
990
991 #ifdef CONFIG_X86_64
992         if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
993                 hpa_t root = vcpu->mmu.root_hpa;
994
995                 ASSERT(!VALID_PAGE(root));
996                 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
997                                         PT64_ROOT_LEVEL, 0, 0, NULL);
998                 root = __pa(page->spt);
999                 ++page->root_count;
1000                 vcpu->mmu.root_hpa = root;
1001                 return;
1002         }
1003 #endif
1004         for (i = 0; i < 4; ++i) {
1005                 hpa_t root = vcpu->mmu.pae_root[i];
1006
1007                 ASSERT(!VALID_PAGE(root));
1008                 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1009                         if (!is_present_pte(vcpu->pdptrs[i])) {
1010                                 vcpu->mmu.pae_root[i] = 0;
1011                                 continue;
1012                         }
1013                         root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
1014                 } else if (vcpu->mmu.root_level == 0)
1015                         root_gfn = 0;
1016                 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1017                                         PT32_ROOT_LEVEL, !is_paging(vcpu),
1018                                         0, NULL);
1019                 root = __pa(page->spt);
1020                 ++page->root_count;
1021                 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1022         }
1023         vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1024 }
1025
1026 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1027 {
1028         return vaddr;
1029 }
1030
1031 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1032                                u32 error_code)
1033 {
1034         gpa_t addr = gva;
1035         hpa_t paddr;
1036         int r;
1037
1038         r = mmu_topup_memory_caches(vcpu);
1039         if (r)
1040                 return r;
1041
1042         ASSERT(vcpu);
1043         ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1044
1045
1046         paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
1047
1048         if (is_error_hpa(paddr)) {
1049                 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
1050                                  >> PAGE_SHIFT));
1051                 return 1;
1052         }
1053
1054         return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
1055 }
1056
1057 static void nonpaging_free(struct kvm_vcpu *vcpu)
1058 {
1059         mmu_free_roots(vcpu);
1060 }
1061
1062 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1063 {
1064         struct kvm_mmu *context = &vcpu->mmu;
1065
1066         context->new_cr3 = nonpaging_new_cr3;
1067         context->page_fault = nonpaging_page_fault;
1068         context->gva_to_gpa = nonpaging_gva_to_gpa;
1069         context->free = nonpaging_free;
1070         context->prefetch_page = nonpaging_prefetch_page;
1071         context->root_level = 0;
1072         context->shadow_root_level = PT32E_ROOT_LEVEL;
1073         context->root_hpa = INVALID_PAGE;
1074         return 0;
1075 }
1076
1077 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1078 {
1079         ++vcpu->stat.tlb_flush;
1080         kvm_x86_ops->tlb_flush(vcpu);
1081 }
1082
1083 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1084 {
1085         pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
1086         mmu_free_roots(vcpu);
1087 }
1088
1089 static void inject_page_fault(struct kvm_vcpu *vcpu,
1090                               u64 addr,
1091                               u32 err_code)
1092 {
1093         kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
1094 }
1095
1096 static void paging_free(struct kvm_vcpu *vcpu)
1097 {
1098         nonpaging_free(vcpu);
1099 }
1100
1101 #define PTTYPE 64
1102 #include "paging_tmpl.h"
1103 #undef PTTYPE
1104
1105 #define PTTYPE 32
1106 #include "paging_tmpl.h"
1107 #undef PTTYPE
1108
1109 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1110 {
1111         struct kvm_mmu *context = &vcpu->mmu;
1112
1113         ASSERT(is_pae(vcpu));
1114         context->new_cr3 = paging_new_cr3;
1115         context->page_fault = paging64_page_fault;
1116         context->gva_to_gpa = paging64_gva_to_gpa;
1117         context->prefetch_page = paging64_prefetch_page;
1118         context->free = paging_free;
1119         context->root_level = level;
1120         context->shadow_root_level = level;
1121         context->root_hpa = INVALID_PAGE;
1122         return 0;
1123 }
1124
1125 static int paging64_init_context(struct kvm_vcpu *vcpu)
1126 {
1127         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1128 }
1129
1130 static int paging32_init_context(struct kvm_vcpu *vcpu)
1131 {
1132         struct kvm_mmu *context = &vcpu->mmu;
1133
1134         context->new_cr3 = paging_new_cr3;
1135         context->page_fault = paging32_page_fault;
1136         context->gva_to_gpa = paging32_gva_to_gpa;
1137         context->free = paging_free;
1138         context->prefetch_page = paging32_prefetch_page;
1139         context->root_level = PT32_ROOT_LEVEL;
1140         context->shadow_root_level = PT32E_ROOT_LEVEL;
1141         context->root_hpa = INVALID_PAGE;
1142         return 0;
1143 }
1144
1145 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1146 {
1147         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1148 }
1149
1150 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1151 {
1152         ASSERT(vcpu);
1153         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1154
1155         if (!is_paging(vcpu))
1156                 return nonpaging_init_context(vcpu);
1157         else if (is_long_mode(vcpu))
1158                 return paging64_init_context(vcpu);
1159         else if (is_pae(vcpu))
1160                 return paging32E_init_context(vcpu);
1161         else
1162                 return paging32_init_context(vcpu);
1163 }
1164
1165 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1166 {
1167         ASSERT(vcpu);
1168         if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1169                 vcpu->mmu.free(vcpu);
1170                 vcpu->mmu.root_hpa = INVALID_PAGE;
1171         }
1172 }
1173
1174 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1175 {
1176         destroy_kvm_mmu(vcpu);
1177         return init_kvm_mmu(vcpu);
1178 }
1179 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1180
1181 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1182 {
1183         int r;
1184
1185         mutex_lock(&vcpu->kvm->lock);
1186         r = mmu_topup_memory_caches(vcpu);
1187         if (r)
1188                 goto out;
1189         mmu_alloc_roots(vcpu);
1190         kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1191         kvm_mmu_flush_tlb(vcpu);
1192 out:
1193         mutex_unlock(&vcpu->kvm->lock);
1194         return r;
1195 }
1196 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1197
1198 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1199 {
1200         mmu_free_roots(vcpu);
1201 }
1202
1203 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1204                                   struct kvm_mmu_page *page,
1205                                   u64 *spte)
1206 {
1207         u64 pte;
1208         struct kvm_mmu_page *child;
1209
1210         pte = *spte;
1211         if (is_shadow_present_pte(pte)) {
1212                 if (page->role.level == PT_PAGE_TABLE_LEVEL)
1213                         rmap_remove(vcpu->kvm, spte);
1214                 else {
1215                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1216                         mmu_page_remove_parent_pte(child, spte);
1217                 }
1218         }
1219         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1220         kvm_flush_remote_tlbs(vcpu->kvm);
1221 }
1222
1223 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1224                                   struct kvm_mmu_page *page,
1225                                   u64 *spte,
1226                                   const void *new, int bytes,
1227                                   int offset_in_pte)
1228 {
1229         if (page->role.level != PT_PAGE_TABLE_LEVEL)
1230                 return;
1231
1232         if (page->role.glevels == PT32_ROOT_LEVEL)
1233                 paging32_update_pte(vcpu, page, spte, new, bytes,
1234                                     offset_in_pte);
1235         else
1236                 paging64_update_pte(vcpu, page, spte, new, bytes,
1237                                     offset_in_pte);
1238 }
1239
1240 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1241 {
1242         u64 *spte = vcpu->last_pte_updated;
1243
1244         return !!(spte && (*spte & PT_ACCESSED_MASK));
1245 }
1246
1247 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1248                        const u8 *new, int bytes)
1249 {
1250         gfn_t gfn = gpa >> PAGE_SHIFT;
1251         struct kvm_mmu_page *page;
1252         struct hlist_node *node, *n;
1253         struct hlist_head *bucket;
1254         unsigned index;
1255         u64 *spte;
1256         unsigned offset = offset_in_page(gpa);
1257         unsigned pte_size;
1258         unsigned page_offset;
1259         unsigned misaligned;
1260         unsigned quadrant;
1261         int level;
1262         int flooded = 0;
1263         int npte;
1264
1265         pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1266         kvm_mmu_audit(vcpu, "pre pte write");
1267         if (gfn == vcpu->last_pt_write_gfn
1268             && !last_updated_pte_accessed(vcpu)) {
1269                 ++vcpu->last_pt_write_count;
1270                 if (vcpu->last_pt_write_count >= 3)
1271                         flooded = 1;
1272         } else {
1273                 vcpu->last_pt_write_gfn = gfn;
1274                 vcpu->last_pt_write_count = 1;
1275                 vcpu->last_pte_updated = NULL;
1276         }
1277         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1278         bucket = &vcpu->kvm->mmu_page_hash[index];
1279         hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1280                 if (page->gfn != gfn || page->role.metaphysical)
1281                         continue;
1282                 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1283                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1284                 misaligned |= bytes < 4;
1285                 if (misaligned || flooded) {
1286                         /*
1287                          * Misaligned accesses are too much trouble to fix
1288                          * up; also, they usually indicate a page is not used
1289                          * as a page table.
1290                          *
1291                          * If we're seeing too many writes to a page,
1292                          * it may no longer be a page table, or we may be
1293                          * forking, in which case it is better to unmap the
1294                          * page.
1295                          */
1296                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1297                                  gpa, bytes, page->role.word);
1298                         kvm_mmu_zap_page(vcpu->kvm, page);
1299                         continue;
1300                 }
1301                 page_offset = offset;
1302                 level = page->role.level;
1303                 npte = 1;
1304                 if (page->role.glevels == PT32_ROOT_LEVEL) {
1305                         page_offset <<= 1;      /* 32->64 */
1306                         /*
1307                          * A 32-bit pde maps 4MB while the shadow pdes map
1308                          * only 2MB.  So we need to double the offset again
1309                          * and zap two pdes instead of one.
1310                          */
1311                         if (level == PT32_ROOT_LEVEL) {
1312                                 page_offset &= ~7; /* kill rounding error */
1313                                 page_offset <<= 1;
1314                                 npte = 2;
1315                         }
1316                         quadrant = page_offset >> PAGE_SHIFT;
1317                         page_offset &= ~PAGE_MASK;
1318                         if (quadrant != page->role.quadrant)
1319                                 continue;
1320                 }
1321                 spte = &page->spt[page_offset / sizeof(*spte)];
1322                 while (npte--) {
1323                         mmu_pte_write_zap_pte(vcpu, page, spte);
1324                         mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
1325                                               page_offset & (pte_size - 1));
1326                         ++spte;
1327                 }
1328         }
1329         kvm_mmu_audit(vcpu, "post pte write");
1330 }
1331
1332 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1333 {
1334         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1335
1336         return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1337 }
1338
1339 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1340 {
1341         while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1342                 struct kvm_mmu_page *page;
1343
1344                 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1345                                     struct kvm_mmu_page, link);
1346                 kvm_mmu_zap_page(vcpu->kvm, page);
1347         }
1348 }
1349
1350 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1351 {
1352         int r;
1353         enum emulation_result er;
1354
1355         mutex_lock(&vcpu->kvm->lock);
1356         r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
1357         if (r < 0)
1358                 goto out;
1359
1360         if (!r) {
1361                 r = 1;
1362                 goto out;
1363         }
1364
1365         r = mmu_topup_memory_caches(vcpu);
1366         if (r)
1367                 goto out;
1368
1369         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1370         mutex_unlock(&vcpu->kvm->lock);
1371
1372         switch (er) {
1373         case EMULATE_DONE:
1374                 return 1;
1375         case EMULATE_DO_MMIO:
1376                 ++vcpu->stat.mmio_exits;
1377                 return 0;
1378         case EMULATE_FAIL:
1379                 kvm_report_emulation_failure(vcpu, "pagetable");
1380                 return 1;
1381         default:
1382                 BUG();
1383         }
1384 out:
1385         mutex_unlock(&vcpu->kvm->lock);
1386         return r;
1387 }
1388 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1389
1390 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1391 {
1392         struct kvm_mmu_page *page;
1393
1394         while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1395                 page = container_of(vcpu->kvm->active_mmu_pages.next,
1396                                     struct kvm_mmu_page, link);
1397                 kvm_mmu_zap_page(vcpu->kvm, page);
1398         }
1399         free_page((unsigned long)vcpu->mmu.pae_root);
1400 }
1401
1402 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1403 {
1404         struct page *page;
1405         int i;
1406
1407         ASSERT(vcpu);
1408
1409         if (vcpu->kvm->n_requested_mmu_pages)
1410                 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1411         else
1412                 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
1413         /*
1414          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1415          * Therefore we need to allocate shadow page tables in the first
1416          * 4GB of memory, which happens to fit the DMA32 zone.
1417          */
1418         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1419         if (!page)
1420                 goto error_1;
1421         vcpu->mmu.pae_root = page_address(page);
1422         for (i = 0; i < 4; ++i)
1423                 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1424
1425         return 0;
1426
1427 error_1:
1428         free_mmu_pages(vcpu);
1429         return -ENOMEM;
1430 }
1431
1432 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1433 {
1434         ASSERT(vcpu);
1435         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1436
1437         return alloc_mmu_pages(vcpu);
1438 }
1439
1440 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1441 {
1442         ASSERT(vcpu);
1443         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1444
1445         return init_kvm_mmu(vcpu);
1446 }
1447
1448 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1449 {
1450         ASSERT(vcpu);
1451
1452         destroy_kvm_mmu(vcpu);
1453         free_mmu_pages(vcpu);
1454         mmu_free_memory_caches(vcpu);
1455 }
1456
1457 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1458 {
1459         struct kvm_mmu_page *page;
1460
1461         list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1462                 int i;
1463                 u64 *pt;
1464
1465                 if (!test_bit(slot, &page->slot_bitmap))
1466                         continue;
1467
1468                 pt = page->spt;
1469                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1470                         /* avoid RMW */
1471                         if (pt[i] & PT_WRITABLE_MASK)
1472                                 pt[i] &= ~PT_WRITABLE_MASK;
1473         }
1474 }
1475
1476 void kvm_mmu_zap_all(struct kvm *kvm)
1477 {
1478         struct kvm_mmu_page *page, *node;
1479
1480         list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1481                 kvm_mmu_zap_page(kvm, page);
1482
1483         kvm_flush_remote_tlbs(kvm);
1484 }
1485
1486 void kvm_mmu_module_exit(void)
1487 {
1488         if (pte_chain_cache)
1489                 kmem_cache_destroy(pte_chain_cache);
1490         if (rmap_desc_cache)
1491                 kmem_cache_destroy(rmap_desc_cache);
1492         if (mmu_page_header_cache)
1493                 kmem_cache_destroy(mmu_page_header_cache);
1494 }
1495
1496 int kvm_mmu_module_init(void)
1497 {
1498         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1499                                             sizeof(struct kvm_pte_chain),
1500                                             0, 0, NULL);
1501         if (!pte_chain_cache)
1502                 goto nomem;
1503         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1504                                             sizeof(struct kvm_rmap_desc),
1505                                             0, 0, NULL);
1506         if (!rmap_desc_cache)
1507                 goto nomem;
1508
1509         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1510                                                   sizeof(struct kvm_mmu_page),
1511                                                   0, 0, NULL);
1512         if (!mmu_page_header_cache)
1513                 goto nomem;
1514
1515         return 0;
1516
1517 nomem:
1518         kvm_mmu_module_exit();
1519         return -ENOMEM;
1520 }
1521
1522 #ifdef AUDIT
1523
1524 static const char *audit_msg;
1525
1526 static gva_t canonicalize(gva_t gva)
1527 {
1528 #ifdef CONFIG_X86_64
1529         gva = (long long)(gva << 16) >> 16;
1530 #endif
1531         return gva;
1532 }
1533
1534 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1535                                 gva_t va, int level)
1536 {
1537         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1538         int i;
1539         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1540
1541         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1542                 u64 ent = pt[i];
1543
1544                 if (ent == shadow_trap_nonpresent_pte)
1545                         continue;
1546
1547                 va = canonicalize(va);
1548                 if (level > 1) {
1549                         if (ent == shadow_notrap_nonpresent_pte)
1550                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
1551                                        " in nonleaf level: levels %d gva %lx"
1552                                        " level %d pte %llx\n", audit_msg,
1553                                        vcpu->mmu.root_level, va, level, ent);
1554
1555                         audit_mappings_page(vcpu, ent, va, level - 1);
1556                 } else {
1557                         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1558                         hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1559                         struct page *page;
1560
1561                         if (is_shadow_present_pte(ent)
1562                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
1563                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
1564                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1565                                        audit_msg, vcpu->mmu.root_level,
1566                                        va, gpa, hpa, ent,
1567                                        is_shadow_present_pte(ent));
1568                         else if (ent == shadow_notrap_nonpresent_pte
1569                                  && !is_error_hpa(hpa))
1570                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
1571                                        " valid guest gva %lx\n", audit_msg, va);
1572                         page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
1573                                            >> PAGE_SHIFT);
1574                         kvm_release_page(page);
1575
1576                 }
1577         }
1578 }
1579
1580 static void audit_mappings(struct kvm_vcpu *vcpu)
1581 {
1582         unsigned i;
1583
1584         if (vcpu->mmu.root_level == 4)
1585                 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1586         else
1587                 for (i = 0; i < 4; ++i)
1588                         if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1589                                 audit_mappings_page(vcpu,
1590                                                     vcpu->mmu.pae_root[i],
1591                                                     i << 30,
1592                                                     2);
1593 }
1594
1595 static int count_rmaps(struct kvm_vcpu *vcpu)
1596 {
1597         int nmaps = 0;
1598         int i, j, k;
1599
1600         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1601                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1602                 struct kvm_rmap_desc *d;
1603
1604                 for (j = 0; j < m->npages; ++j) {
1605                         unsigned long *rmapp = &m->rmap[j];
1606
1607                         if (!*rmapp)
1608                                 continue;
1609                         if (!(*rmapp & 1)) {
1610                                 ++nmaps;
1611                                 continue;
1612                         }
1613                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
1614                         while (d) {
1615                                 for (k = 0; k < RMAP_EXT; ++k)
1616                                         if (d->shadow_ptes[k])
1617                                                 ++nmaps;
1618                                         else
1619                                                 break;
1620                                 d = d->more;
1621                         }
1622                 }
1623         }
1624         return nmaps;
1625 }
1626
1627 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1628 {
1629         int nmaps = 0;
1630         struct kvm_mmu_page *page;
1631         int i;
1632
1633         list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1634                 u64 *pt = page->spt;
1635
1636                 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1637                         continue;
1638
1639                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1640                         u64 ent = pt[i];
1641
1642                         if (!(ent & PT_PRESENT_MASK))
1643                                 continue;
1644                         if (!(ent & PT_WRITABLE_MASK))
1645                                 continue;
1646                         ++nmaps;
1647                 }
1648         }
1649         return nmaps;
1650 }
1651
1652 static void audit_rmap(struct kvm_vcpu *vcpu)
1653 {
1654         int n_rmap = count_rmaps(vcpu);
1655         int n_actual = count_writable_mappings(vcpu);
1656
1657         if (n_rmap != n_actual)
1658                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1659                        __FUNCTION__, audit_msg, n_rmap, n_actual);
1660 }
1661
1662 static void audit_write_protection(struct kvm_vcpu *vcpu)
1663 {
1664         struct kvm_mmu_page *page;
1665         struct kvm_memory_slot *slot;
1666         unsigned long *rmapp;
1667         gfn_t gfn;
1668
1669         list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1670                 if (page->role.metaphysical)
1671                         continue;
1672
1673                 slot = gfn_to_memslot(vcpu->kvm, page->gfn);
1674                 gfn = unalias_gfn(vcpu->kvm, page->gfn);
1675                 rmapp = &slot->rmap[gfn - slot->base_gfn];
1676                 if (*rmapp)
1677                         printk(KERN_ERR "%s: (%s) shadow page has writable"
1678                                " mappings: gfn %lx role %x\n",
1679                                __FUNCTION__, audit_msg, page->gfn,
1680                                page->role.word);
1681         }
1682 }
1683
1684 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1685 {
1686         int olddbg = dbg;
1687
1688         dbg = 0;
1689         audit_msg = msg;
1690         audit_rmap(vcpu);
1691         audit_write_protection(vcpu);
1692         audit_mappings(vcpu);
1693         dbg = olddbg;
1694 }
1695
1696 #endif