KVM: MMU: large page support
[safe/jmp/linux-2.6] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "vmx.h"
21 #include "mmu.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31
32 #include <asm/page.h>
33 #include <asm/cmpxchg.h>
34 #include <asm/io.h>
35
36 /*
37  * When setting this variable to true it enables Two-Dimensional-Paging
38  * where the hardware walks 2 page tables:
39  * 1. the guest-virtual to guest-physical
40  * 2. while doing 1. it walks guest-physical to host-physical
41  * If the hardware supports that we don't need to do shadow paging.
42  */
43 static bool tdp_enabled = false;
44
45 #undef MMU_DEBUG
46
47 #undef AUDIT
48
49 #ifdef AUDIT
50 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
51 #else
52 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
53 #endif
54
55 #ifdef MMU_DEBUG
56
57 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
58 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
59
60 #else
61
62 #define pgprintk(x...) do { } while (0)
63 #define rmap_printk(x...) do { } while (0)
64
65 #endif
66
67 #if defined(MMU_DEBUG) || defined(AUDIT)
68 static int dbg = 1;
69 #endif
70
71 #ifndef MMU_DEBUG
72 #define ASSERT(x) do { } while (0)
73 #else
74 #define ASSERT(x)                                                       \
75         if (!(x)) {                                                     \
76                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
77                        __FILE__, __LINE__, #x);                         \
78         }
79 #endif
80
81 #define PT64_PT_BITS 9
82 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
83 #define PT32_PT_BITS 10
84 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
85
86 #define PT_WRITABLE_SHIFT 1
87
88 #define PT_PRESENT_MASK (1ULL << 0)
89 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
90 #define PT_USER_MASK (1ULL << 2)
91 #define PT_PWT_MASK (1ULL << 3)
92 #define PT_PCD_MASK (1ULL << 4)
93 #define PT_ACCESSED_MASK (1ULL << 5)
94 #define PT_DIRTY_MASK (1ULL << 6)
95 #define PT_PAGE_SIZE_MASK (1ULL << 7)
96 #define PT_PAT_MASK (1ULL << 7)
97 #define PT_GLOBAL_MASK (1ULL << 8)
98 #define PT64_NX_SHIFT 63
99 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
100
101 #define PT_PAT_SHIFT 7
102 #define PT_DIR_PAT_SHIFT 12
103 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
104
105 #define PT32_DIR_PSE36_SIZE 4
106 #define PT32_DIR_PSE36_SHIFT 13
107 #define PT32_DIR_PSE36_MASK \
108         (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
109
110
111 #define PT_FIRST_AVAIL_BITS_SHIFT 9
112 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
113
114 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
115
116 #define PT64_LEVEL_BITS 9
117
118 #define PT64_LEVEL_SHIFT(level) \
119                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
120
121 #define PT64_LEVEL_MASK(level) \
122                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
123
124 #define PT64_INDEX(address, level)\
125         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
126
127
128 #define PT32_LEVEL_BITS 10
129
130 #define PT32_LEVEL_SHIFT(level) \
131                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
132
133 #define PT32_LEVEL_MASK(level) \
134                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
135
136 #define PT32_INDEX(address, level)\
137         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
138
139
140 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
141 #define PT64_DIR_BASE_ADDR_MASK \
142         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
143
144 #define PT32_BASE_ADDR_MASK PAGE_MASK
145 #define PT32_DIR_BASE_ADDR_MASK \
146         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
147
148 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
149                         | PT64_NX_MASK)
150
151 #define PFERR_PRESENT_MASK (1U << 0)
152 #define PFERR_WRITE_MASK (1U << 1)
153 #define PFERR_USER_MASK (1U << 2)
154 #define PFERR_FETCH_MASK (1U << 4)
155
156 #define PT64_ROOT_LEVEL 4
157 #define PT32_ROOT_LEVEL 2
158 #define PT32E_ROOT_LEVEL 3
159
160 #define PT_DIRECTORY_LEVEL 2
161 #define PT_PAGE_TABLE_LEVEL 1
162
163 #define RMAP_EXT 4
164
165 #define ACC_EXEC_MASK    1
166 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
167 #define ACC_USER_MASK    PT_USER_MASK
168 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
169
170 struct kvm_rmap_desc {
171         u64 *shadow_ptes[RMAP_EXT];
172         struct kvm_rmap_desc *more;
173 };
174
175 static struct kmem_cache *pte_chain_cache;
176 static struct kmem_cache *rmap_desc_cache;
177 static struct kmem_cache *mmu_page_header_cache;
178
179 static u64 __read_mostly shadow_trap_nonpresent_pte;
180 static u64 __read_mostly shadow_notrap_nonpresent_pte;
181
182 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
183 {
184         shadow_trap_nonpresent_pte = trap_pte;
185         shadow_notrap_nonpresent_pte = notrap_pte;
186 }
187 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
188
189 static int is_write_protection(struct kvm_vcpu *vcpu)
190 {
191         return vcpu->arch.cr0 & X86_CR0_WP;
192 }
193
194 static int is_cpuid_PSE36(void)
195 {
196         return 1;
197 }
198
199 static int is_nx(struct kvm_vcpu *vcpu)
200 {
201         return vcpu->arch.shadow_efer & EFER_NX;
202 }
203
204 static int is_present_pte(unsigned long pte)
205 {
206         return pte & PT_PRESENT_MASK;
207 }
208
209 static int is_shadow_present_pte(u64 pte)
210 {
211         return pte != shadow_trap_nonpresent_pte
212                 && pte != shadow_notrap_nonpresent_pte;
213 }
214
215 static int is_large_pte(u64 pte)
216 {
217         return pte & PT_PAGE_SIZE_MASK;
218 }
219
220 static int is_writeble_pte(unsigned long pte)
221 {
222         return pte & PT_WRITABLE_MASK;
223 }
224
225 static int is_dirty_pte(unsigned long pte)
226 {
227         return pte & PT_DIRTY_MASK;
228 }
229
230 static int is_rmap_pte(u64 pte)
231 {
232         return is_shadow_present_pte(pte);
233 }
234
235 static gfn_t pse36_gfn_delta(u32 gpte)
236 {
237         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
238
239         return (gpte & PT32_DIR_PSE36_MASK) << shift;
240 }
241
242 static void set_shadow_pte(u64 *sptep, u64 spte)
243 {
244 #ifdef CONFIG_X86_64
245         set_64bit((unsigned long *)sptep, spte);
246 #else
247         set_64bit((unsigned long long *)sptep, spte);
248 #endif
249 }
250
251 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
252                                   struct kmem_cache *base_cache, int min)
253 {
254         void *obj;
255
256         if (cache->nobjs >= min)
257                 return 0;
258         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
259                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
260                 if (!obj)
261                         return -ENOMEM;
262                 cache->objects[cache->nobjs++] = obj;
263         }
264         return 0;
265 }
266
267 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
268 {
269         while (mc->nobjs)
270                 kfree(mc->objects[--mc->nobjs]);
271 }
272
273 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
274                                        int min)
275 {
276         struct page *page;
277
278         if (cache->nobjs >= min)
279                 return 0;
280         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
281                 page = alloc_page(GFP_KERNEL);
282                 if (!page)
283                         return -ENOMEM;
284                 set_page_private(page, 0);
285                 cache->objects[cache->nobjs++] = page_address(page);
286         }
287         return 0;
288 }
289
290 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
291 {
292         while (mc->nobjs)
293                 free_page((unsigned long)mc->objects[--mc->nobjs]);
294 }
295
296 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
297 {
298         int r;
299
300         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
301                                    pte_chain_cache, 4);
302         if (r)
303                 goto out;
304         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
305                                    rmap_desc_cache, 1);
306         if (r)
307                 goto out;
308         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
309         if (r)
310                 goto out;
311         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
312                                    mmu_page_header_cache, 4);
313 out:
314         return r;
315 }
316
317 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
318 {
319         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
320         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
321         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
322         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
323 }
324
325 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
326                                     size_t size)
327 {
328         void *p;
329
330         BUG_ON(!mc->nobjs);
331         p = mc->objects[--mc->nobjs];
332         memset(p, 0, size);
333         return p;
334 }
335
336 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
337 {
338         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
339                                       sizeof(struct kvm_pte_chain));
340 }
341
342 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
343 {
344         kfree(pc);
345 }
346
347 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
348 {
349         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
350                                       sizeof(struct kvm_rmap_desc));
351 }
352
353 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
354 {
355         kfree(rd);
356 }
357
358 /*
359  * Return the pointer to the largepage write count for a given
360  * gfn, handling slots that are not large page aligned.
361  */
362 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
363 {
364         unsigned long idx;
365
366         idx = (gfn / KVM_PAGES_PER_HPAGE) -
367               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
368         return &slot->lpage_info[idx].write_count;
369 }
370
371 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
372 {
373         int *write_count;
374
375         write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
376         *write_count += 1;
377         WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
378 }
379
380 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
381 {
382         int *write_count;
383
384         write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
385         *write_count -= 1;
386         WARN_ON(*write_count < 0);
387 }
388
389 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
390 {
391         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
392         int *largepage_idx;
393
394         if (slot) {
395                 largepage_idx = slot_largepage_idx(gfn, slot);
396                 return *largepage_idx;
397         }
398
399         return 1;
400 }
401
402 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
403 {
404         struct vm_area_struct *vma;
405         unsigned long addr;
406
407         addr = gfn_to_hva(kvm, gfn);
408         if (kvm_is_error_hva(addr))
409                 return 0;
410
411         vma = find_vma(current->mm, addr);
412         if (vma && is_vm_hugetlb_page(vma))
413                 return 1;
414
415         return 0;
416 }
417
418 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
419 {
420         struct kvm_memory_slot *slot;
421
422         if (has_wrprotected_page(vcpu->kvm, large_gfn))
423                 return 0;
424
425         if (!host_largepage_backed(vcpu->kvm, large_gfn))
426                 return 0;
427
428         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
429         if (slot && slot->dirty_bitmap)
430                 return 0;
431
432         return 1;
433 }
434
435 /*
436  * Take gfn and return the reverse mapping to it.
437  * Note: gfn must be unaliased before this function get called
438  */
439
440 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
441 {
442         struct kvm_memory_slot *slot;
443         unsigned long idx;
444
445         slot = gfn_to_memslot(kvm, gfn);
446         if (!lpage)
447                 return &slot->rmap[gfn - slot->base_gfn];
448
449         idx = (gfn / KVM_PAGES_PER_HPAGE) -
450               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
451
452         return &slot->lpage_info[idx].rmap_pde;
453 }
454
455 /*
456  * Reverse mapping data structures:
457  *
458  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
459  * that points to page_address(page).
460  *
461  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
462  * containing more mappings.
463  */
464 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
465 {
466         struct kvm_mmu_page *sp;
467         struct kvm_rmap_desc *desc;
468         unsigned long *rmapp;
469         int i;
470
471         if (!is_rmap_pte(*spte))
472                 return;
473         gfn = unalias_gfn(vcpu->kvm, gfn);
474         sp = page_header(__pa(spte));
475         sp->gfns[spte - sp->spt] = gfn;
476         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
477         if (!*rmapp) {
478                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
479                 *rmapp = (unsigned long)spte;
480         } else if (!(*rmapp & 1)) {
481                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
482                 desc = mmu_alloc_rmap_desc(vcpu);
483                 desc->shadow_ptes[0] = (u64 *)*rmapp;
484                 desc->shadow_ptes[1] = spte;
485                 *rmapp = (unsigned long)desc | 1;
486         } else {
487                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
488                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
489                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
490                         desc = desc->more;
491                 if (desc->shadow_ptes[RMAP_EXT-1]) {
492                         desc->more = mmu_alloc_rmap_desc(vcpu);
493                         desc = desc->more;
494                 }
495                 for (i = 0; desc->shadow_ptes[i]; ++i)
496                         ;
497                 desc->shadow_ptes[i] = spte;
498         }
499 }
500
501 static void rmap_desc_remove_entry(unsigned long *rmapp,
502                                    struct kvm_rmap_desc *desc,
503                                    int i,
504                                    struct kvm_rmap_desc *prev_desc)
505 {
506         int j;
507
508         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
509                 ;
510         desc->shadow_ptes[i] = desc->shadow_ptes[j];
511         desc->shadow_ptes[j] = NULL;
512         if (j != 0)
513                 return;
514         if (!prev_desc && !desc->more)
515                 *rmapp = (unsigned long)desc->shadow_ptes[0];
516         else
517                 if (prev_desc)
518                         prev_desc->more = desc->more;
519                 else
520                         *rmapp = (unsigned long)desc->more | 1;
521         mmu_free_rmap_desc(desc);
522 }
523
524 static void rmap_remove(struct kvm *kvm, u64 *spte)
525 {
526         struct kvm_rmap_desc *desc;
527         struct kvm_rmap_desc *prev_desc;
528         struct kvm_mmu_page *sp;
529         struct page *page;
530         unsigned long *rmapp;
531         int i;
532
533         if (!is_rmap_pte(*spte))
534                 return;
535         sp = page_header(__pa(spte));
536         page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
537         mark_page_accessed(page);
538         if (is_writeble_pte(*spte))
539                 kvm_release_page_dirty(page);
540         else
541                 kvm_release_page_clean(page);
542         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
543         if (!*rmapp) {
544                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
545                 BUG();
546         } else if (!(*rmapp & 1)) {
547                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
548                 if ((u64 *)*rmapp != spte) {
549                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
550                                spte, *spte);
551                         BUG();
552                 }
553                 *rmapp = 0;
554         } else {
555                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
556                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
557                 prev_desc = NULL;
558                 while (desc) {
559                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
560                                 if (desc->shadow_ptes[i] == spte) {
561                                         rmap_desc_remove_entry(rmapp,
562                                                                desc, i,
563                                                                prev_desc);
564                                         return;
565                                 }
566                         prev_desc = desc;
567                         desc = desc->more;
568                 }
569                 BUG();
570         }
571 }
572
573 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
574 {
575         struct kvm_rmap_desc *desc;
576         struct kvm_rmap_desc *prev_desc;
577         u64 *prev_spte;
578         int i;
579
580         if (!*rmapp)
581                 return NULL;
582         else if (!(*rmapp & 1)) {
583                 if (!spte)
584                         return (u64 *)*rmapp;
585                 return NULL;
586         }
587         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
588         prev_desc = NULL;
589         prev_spte = NULL;
590         while (desc) {
591                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
592                         if (prev_spte == spte)
593                                 return desc->shadow_ptes[i];
594                         prev_spte = desc->shadow_ptes[i];
595                 }
596                 desc = desc->more;
597         }
598         return NULL;
599 }
600
601 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
602 {
603         unsigned long *rmapp;
604         u64 *spte;
605         int write_protected = 0;
606
607         gfn = unalias_gfn(kvm, gfn);
608         rmapp = gfn_to_rmap(kvm, gfn, 0);
609
610         spte = rmap_next(kvm, rmapp, NULL);
611         while (spte) {
612                 BUG_ON(!spte);
613                 BUG_ON(!(*spte & PT_PRESENT_MASK));
614                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
615                 if (is_writeble_pte(*spte)) {
616                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
617                         write_protected = 1;
618                 }
619                 spte = rmap_next(kvm, rmapp, spte);
620         }
621         /* check for huge page mappings */
622         rmapp = gfn_to_rmap(kvm, gfn, 1);
623         spte = rmap_next(kvm, rmapp, NULL);
624         while (spte) {
625                 BUG_ON(!spte);
626                 BUG_ON(!(*spte & PT_PRESENT_MASK));
627                 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
628                 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
629                 if (is_writeble_pte(*spte)) {
630                         rmap_remove(kvm, spte);
631                         --kvm->stat.lpages;
632                         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
633                         write_protected = 1;
634                 }
635                 spte = rmap_next(kvm, rmapp, spte);
636         }
637
638         if (write_protected)
639                 kvm_flush_remote_tlbs(kvm);
640
641         account_shadowed(kvm, gfn);
642 }
643
644 #ifdef MMU_DEBUG
645 static int is_empty_shadow_page(u64 *spt)
646 {
647         u64 *pos;
648         u64 *end;
649
650         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
651                 if (*pos != shadow_trap_nonpresent_pte) {
652                         printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
653                                pos, *pos);
654                         return 0;
655                 }
656         return 1;
657 }
658 #endif
659
660 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
661 {
662         ASSERT(is_empty_shadow_page(sp->spt));
663         list_del(&sp->link);
664         __free_page(virt_to_page(sp->spt));
665         __free_page(virt_to_page(sp->gfns));
666         kfree(sp);
667         ++kvm->arch.n_free_mmu_pages;
668 }
669
670 static unsigned kvm_page_table_hashfn(gfn_t gfn)
671 {
672         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
673 }
674
675 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
676                                                u64 *parent_pte)
677 {
678         struct kvm_mmu_page *sp;
679
680         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
681         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
682         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
683         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
684         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
685         ASSERT(is_empty_shadow_page(sp->spt));
686         sp->slot_bitmap = 0;
687         sp->multimapped = 0;
688         sp->parent_pte = parent_pte;
689         --vcpu->kvm->arch.n_free_mmu_pages;
690         return sp;
691 }
692
693 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
694                                     struct kvm_mmu_page *sp, u64 *parent_pte)
695 {
696         struct kvm_pte_chain *pte_chain;
697         struct hlist_node *node;
698         int i;
699
700         if (!parent_pte)
701                 return;
702         if (!sp->multimapped) {
703                 u64 *old = sp->parent_pte;
704
705                 if (!old) {
706                         sp->parent_pte = parent_pte;
707                         return;
708                 }
709                 sp->multimapped = 1;
710                 pte_chain = mmu_alloc_pte_chain(vcpu);
711                 INIT_HLIST_HEAD(&sp->parent_ptes);
712                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
713                 pte_chain->parent_ptes[0] = old;
714         }
715         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
716                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
717                         continue;
718                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
719                         if (!pte_chain->parent_ptes[i]) {
720                                 pte_chain->parent_ptes[i] = parent_pte;
721                                 return;
722                         }
723         }
724         pte_chain = mmu_alloc_pte_chain(vcpu);
725         BUG_ON(!pte_chain);
726         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
727         pte_chain->parent_ptes[0] = parent_pte;
728 }
729
730 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
731                                        u64 *parent_pte)
732 {
733         struct kvm_pte_chain *pte_chain;
734         struct hlist_node *node;
735         int i;
736
737         if (!sp->multimapped) {
738                 BUG_ON(sp->parent_pte != parent_pte);
739                 sp->parent_pte = NULL;
740                 return;
741         }
742         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
743                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
744                         if (!pte_chain->parent_ptes[i])
745                                 break;
746                         if (pte_chain->parent_ptes[i] != parent_pte)
747                                 continue;
748                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
749                                 && pte_chain->parent_ptes[i + 1]) {
750                                 pte_chain->parent_ptes[i]
751                                         = pte_chain->parent_ptes[i + 1];
752                                 ++i;
753                         }
754                         pte_chain->parent_ptes[i] = NULL;
755                         if (i == 0) {
756                                 hlist_del(&pte_chain->link);
757                                 mmu_free_pte_chain(pte_chain);
758                                 if (hlist_empty(&sp->parent_ptes)) {
759                                         sp->multimapped = 0;
760                                         sp->parent_pte = NULL;
761                                 }
762                         }
763                         return;
764                 }
765         BUG();
766 }
767
768 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
769 {
770         unsigned index;
771         struct hlist_head *bucket;
772         struct kvm_mmu_page *sp;
773         struct hlist_node *node;
774
775         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
776         index = kvm_page_table_hashfn(gfn);
777         bucket = &kvm->arch.mmu_page_hash[index];
778         hlist_for_each_entry(sp, node, bucket, hash_link)
779                 if (sp->gfn == gfn && !sp->role.metaphysical
780                     && !sp->role.invalid) {
781                         pgprintk("%s: found role %x\n",
782                                  __FUNCTION__, sp->role.word);
783                         return sp;
784                 }
785         return NULL;
786 }
787
788 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
789                                              gfn_t gfn,
790                                              gva_t gaddr,
791                                              unsigned level,
792                                              int metaphysical,
793                                              unsigned access,
794                                              u64 *parent_pte)
795 {
796         union kvm_mmu_page_role role;
797         unsigned index;
798         unsigned quadrant;
799         struct hlist_head *bucket;
800         struct kvm_mmu_page *sp;
801         struct hlist_node *node;
802
803         role.word = 0;
804         role.glevels = vcpu->arch.mmu.root_level;
805         role.level = level;
806         role.metaphysical = metaphysical;
807         role.access = access;
808         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
809                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
810                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
811                 role.quadrant = quadrant;
812         }
813         pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
814                  gfn, role.word);
815         index = kvm_page_table_hashfn(gfn);
816         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
817         hlist_for_each_entry(sp, node, bucket, hash_link)
818                 if (sp->gfn == gfn && sp->role.word == role.word) {
819                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
820                         pgprintk("%s: found\n", __FUNCTION__);
821                         return sp;
822                 }
823         ++vcpu->kvm->stat.mmu_cache_miss;
824         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
825         if (!sp)
826                 return sp;
827         pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
828         sp->gfn = gfn;
829         sp->role = role;
830         hlist_add_head(&sp->hash_link, bucket);
831         vcpu->arch.mmu.prefetch_page(vcpu, sp);
832         if (!metaphysical)
833                 rmap_write_protect(vcpu->kvm, gfn);
834         return sp;
835 }
836
837 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
838                                          struct kvm_mmu_page *sp)
839 {
840         unsigned i;
841         u64 *pt;
842         u64 ent;
843
844         pt = sp->spt;
845
846         if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
847                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
848                         if (is_shadow_present_pte(pt[i]))
849                                 rmap_remove(kvm, &pt[i]);
850                         pt[i] = shadow_trap_nonpresent_pte;
851                 }
852                 kvm_flush_remote_tlbs(kvm);
853                 return;
854         }
855
856         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
857                 ent = pt[i];
858
859                 if (is_shadow_present_pte(ent)) {
860                         if (!is_large_pte(ent)) {
861                                 ent &= PT64_BASE_ADDR_MASK;
862                                 mmu_page_remove_parent_pte(page_header(ent),
863                                                            &pt[i]);
864                         } else {
865                                 --kvm->stat.lpages;
866                                 rmap_remove(kvm, &pt[i]);
867                         }
868                 }
869                 pt[i] = shadow_trap_nonpresent_pte;
870         }
871         kvm_flush_remote_tlbs(kvm);
872 }
873
874 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
875 {
876         mmu_page_remove_parent_pte(sp, parent_pte);
877 }
878
879 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
880 {
881         int i;
882
883         for (i = 0; i < KVM_MAX_VCPUS; ++i)
884                 if (kvm->vcpus[i])
885                         kvm->vcpus[i]->arch.last_pte_updated = NULL;
886 }
887
888 static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
889 {
890         u64 *parent_pte;
891
892         ++kvm->stat.mmu_shadow_zapped;
893         while (sp->multimapped || sp->parent_pte) {
894                 if (!sp->multimapped)
895                         parent_pte = sp->parent_pte;
896                 else {
897                         struct kvm_pte_chain *chain;
898
899                         chain = container_of(sp->parent_ptes.first,
900                                              struct kvm_pte_chain, link);
901                         parent_pte = chain->parent_ptes[0];
902                 }
903                 BUG_ON(!parent_pte);
904                 kvm_mmu_put_page(sp, parent_pte);
905                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
906         }
907         kvm_mmu_page_unlink_children(kvm, sp);
908         if (!sp->root_count) {
909                 if (!sp->role.metaphysical)
910                         unaccount_shadowed(kvm, sp->gfn);
911                 hlist_del(&sp->hash_link);
912                 kvm_mmu_free_page(kvm, sp);
913         } else {
914                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
915                 sp->role.invalid = 1;
916                 kvm_reload_remote_mmus(kvm);
917         }
918         kvm_mmu_reset_last_pte_updated(kvm);
919 }
920
921 /*
922  * Changing the number of mmu pages allocated to the vm
923  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
924  */
925 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
926 {
927         /*
928          * If we set the number of mmu pages to be smaller be than the
929          * number of actived pages , we must to free some mmu pages before we
930          * change the value
931          */
932
933         if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
934             kvm_nr_mmu_pages) {
935                 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
936                                        - kvm->arch.n_free_mmu_pages;
937
938                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
939                         struct kvm_mmu_page *page;
940
941                         page = container_of(kvm->arch.active_mmu_pages.prev,
942                                             struct kvm_mmu_page, link);
943                         kvm_mmu_zap_page(kvm, page);
944                         n_used_mmu_pages--;
945                 }
946                 kvm->arch.n_free_mmu_pages = 0;
947         }
948         else
949                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
950                                          - kvm->arch.n_alloc_mmu_pages;
951
952         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
953 }
954
955 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
956 {
957         unsigned index;
958         struct hlist_head *bucket;
959         struct kvm_mmu_page *sp;
960         struct hlist_node *node, *n;
961         int r;
962
963         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
964         r = 0;
965         index = kvm_page_table_hashfn(gfn);
966         bucket = &kvm->arch.mmu_page_hash[index];
967         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
968                 if (sp->gfn == gfn && !sp->role.metaphysical) {
969                         pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
970                                  sp->role.word);
971                         kvm_mmu_zap_page(kvm, sp);
972                         r = 1;
973                 }
974         return r;
975 }
976
977 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
978 {
979         struct kvm_mmu_page *sp;
980
981         while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
982                 pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
983                 kvm_mmu_zap_page(kvm, sp);
984         }
985 }
986
987 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
988 {
989         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
990         struct kvm_mmu_page *sp = page_header(__pa(pte));
991
992         __set_bit(slot, &sp->slot_bitmap);
993 }
994
995 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
996 {
997         struct page *page;
998
999         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1000
1001         if (gpa == UNMAPPED_GVA)
1002                 return NULL;
1003
1004         down_read(&current->mm->mmap_sem);
1005         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1006         up_read(&current->mm->mmap_sem);
1007
1008         return page;
1009 }
1010
1011 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1012                          unsigned pt_access, unsigned pte_access,
1013                          int user_fault, int write_fault, int dirty,
1014                          int *ptwrite, int largepage, gfn_t gfn,
1015                          struct page *page)
1016 {
1017         u64 spte;
1018         int was_rmapped = 0;
1019         int was_writeble = is_writeble_pte(*shadow_pte);
1020         hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1021
1022         pgprintk("%s: spte %llx access %x write_fault %d"
1023                  " user_fault %d gfn %lx\n",
1024                  __FUNCTION__, *shadow_pte, pt_access,
1025                  write_fault, user_fault, gfn);
1026
1027         if (is_rmap_pte(*shadow_pte)) {
1028                 /*
1029                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1030                  * the parent of the now unreachable PTE.
1031                  */
1032                 if (largepage && !is_large_pte(*shadow_pte)) {
1033                         struct kvm_mmu_page *child;
1034                         u64 pte = *shadow_pte;
1035
1036                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1037                         mmu_page_remove_parent_pte(child, shadow_pte);
1038                 } else if (host_pfn != page_to_pfn(page)) {
1039                         pgprintk("hfn old %lx new %lx\n",
1040                                  host_pfn, page_to_pfn(page));
1041                         rmap_remove(vcpu->kvm, shadow_pte);
1042                 } else {
1043                         if (largepage)
1044                                 was_rmapped = is_large_pte(*shadow_pte);
1045                         else
1046                                 was_rmapped = 1;
1047                 }
1048         }
1049
1050
1051         /*
1052          * We don't set the accessed bit, since we sometimes want to see
1053          * whether the guest actually used the pte (in order to detect
1054          * demand paging).
1055          */
1056         spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
1057         if (!dirty)
1058                 pte_access &= ~ACC_WRITE_MASK;
1059         if (!(pte_access & ACC_EXEC_MASK))
1060                 spte |= PT64_NX_MASK;
1061
1062         spte |= PT_PRESENT_MASK;
1063         if (pte_access & ACC_USER_MASK)
1064                 spte |= PT_USER_MASK;
1065         if (largepage)
1066                 spte |= PT_PAGE_SIZE_MASK;
1067
1068         spte |= page_to_phys(page);
1069
1070         if ((pte_access & ACC_WRITE_MASK)
1071             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1072                 struct kvm_mmu_page *shadow;
1073
1074                 spte |= PT_WRITABLE_MASK;
1075                 if (user_fault) {
1076                         mmu_unshadow(vcpu->kvm, gfn);
1077                         goto unshadowed;
1078                 }
1079
1080                 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1081                 if (shadow ||
1082                    (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1083                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1084                                  __FUNCTION__, gfn);
1085                         pte_access &= ~ACC_WRITE_MASK;
1086                         if (is_writeble_pte(spte)) {
1087                                 spte &= ~PT_WRITABLE_MASK;
1088                                 kvm_x86_ops->tlb_flush(vcpu);
1089                         }
1090                         if (write_fault)
1091                                 *ptwrite = 1;
1092                 }
1093         }
1094
1095 unshadowed:
1096
1097         if (pte_access & ACC_WRITE_MASK)
1098                 mark_page_dirty(vcpu->kvm, gfn);
1099
1100         pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
1101         pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
1102                  (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1103                  (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1104         set_shadow_pte(shadow_pte, spte);
1105         if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
1106             && (spte & PT_PRESENT_MASK))
1107                 ++vcpu->kvm->stat.lpages;
1108
1109         page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1110         if (!was_rmapped) {
1111                 rmap_add(vcpu, shadow_pte, gfn, largepage);
1112                 if (!is_rmap_pte(*shadow_pte))
1113                         kvm_release_page_clean(page);
1114         } else {
1115                 if (was_writeble)
1116                         kvm_release_page_dirty(page);
1117                 else
1118                         kvm_release_page_clean(page);
1119         }
1120         if (!ptwrite || !*ptwrite)
1121                 vcpu->arch.last_pte_updated = shadow_pte;
1122 }
1123
1124 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1125 {
1126 }
1127
1128 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1129                            int largepage, gfn_t gfn, struct page *page,
1130                            int level)
1131 {
1132         hpa_t table_addr = vcpu->arch.mmu.root_hpa;
1133         int pt_write = 0;
1134
1135         for (; ; level--) {
1136                 u32 index = PT64_INDEX(v, level);
1137                 u64 *table;
1138
1139                 ASSERT(VALID_PAGE(table_addr));
1140                 table = __va(table_addr);
1141
1142                 if (level == 1) {
1143                         mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1144                                      0, write, 1, &pt_write, 0, gfn, page);
1145                         return pt_write;
1146                 }
1147
1148                 if (largepage && level == 2) {
1149                         mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1150                                     0, write, 1, &pt_write, 1, gfn, page);
1151                         return pt_write;
1152                 }
1153
1154                 if (table[index] == shadow_trap_nonpresent_pte) {
1155                         struct kvm_mmu_page *new_table;
1156                         gfn_t pseudo_gfn;
1157
1158                         pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
1159                                 >> PAGE_SHIFT;
1160                         new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1161                                                      v, level - 1,
1162                                                      1, ACC_ALL, &table[index]);
1163                         if (!new_table) {
1164                                 pgprintk("nonpaging_map: ENOMEM\n");
1165                                 kvm_release_page_clean(page);
1166                                 return -ENOMEM;
1167                         }
1168
1169                         table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
1170                                 | PT_WRITABLE_MASK | PT_USER_MASK;
1171                 }
1172                 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1173         }
1174 }
1175
1176 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1177 {
1178         int r;
1179         int largepage = 0;
1180
1181         struct page *page;
1182
1183         down_read(&vcpu->kvm->slots_lock);
1184
1185         down_read(&current->mm->mmap_sem);
1186         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1187                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1188                 largepage = 1;
1189         }
1190
1191         page = gfn_to_page(vcpu->kvm, gfn);
1192         up_read(&current->mm->mmap_sem);
1193
1194         /* mmio */
1195         if (is_error_page(page)) {
1196                 kvm_release_page_clean(page);
1197                 up_read(&vcpu->kvm->slots_lock);
1198                 return 1;
1199         }
1200
1201         spin_lock(&vcpu->kvm->mmu_lock);
1202         kvm_mmu_free_some_pages(vcpu);
1203         r = __direct_map(vcpu, v, write, largepage, gfn, page,
1204                          PT32E_ROOT_LEVEL);
1205         spin_unlock(&vcpu->kvm->mmu_lock);
1206
1207         up_read(&vcpu->kvm->slots_lock);
1208
1209         return r;
1210 }
1211
1212
1213 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1214                                     struct kvm_mmu_page *sp)
1215 {
1216         int i;
1217
1218         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1219                 sp->spt[i] = shadow_trap_nonpresent_pte;
1220 }
1221
1222 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1223 {
1224         int i;
1225         struct kvm_mmu_page *sp;
1226
1227         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1228                 return;
1229         spin_lock(&vcpu->kvm->mmu_lock);
1230 #ifdef CONFIG_X86_64
1231         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1232                 hpa_t root = vcpu->arch.mmu.root_hpa;
1233
1234                 sp = page_header(root);
1235                 --sp->root_count;
1236                 if (!sp->root_count && sp->role.invalid)
1237                         kvm_mmu_zap_page(vcpu->kvm, sp);
1238                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1239                 spin_unlock(&vcpu->kvm->mmu_lock);
1240                 return;
1241         }
1242 #endif
1243         for (i = 0; i < 4; ++i) {
1244                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1245
1246                 if (root) {
1247                         root &= PT64_BASE_ADDR_MASK;
1248                         sp = page_header(root);
1249                         --sp->root_count;
1250                         if (!sp->root_count && sp->role.invalid)
1251                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1252                 }
1253                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1254         }
1255         spin_unlock(&vcpu->kvm->mmu_lock);
1256         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1257 }
1258
1259 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1260 {
1261         int i;
1262         gfn_t root_gfn;
1263         struct kvm_mmu_page *sp;
1264         int metaphysical = 0;
1265
1266         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1267
1268 #ifdef CONFIG_X86_64
1269         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1270                 hpa_t root = vcpu->arch.mmu.root_hpa;
1271
1272                 ASSERT(!VALID_PAGE(root));
1273                 if (tdp_enabled)
1274                         metaphysical = 1;
1275                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1276                                       PT64_ROOT_LEVEL, metaphysical,
1277                                       ACC_ALL, NULL);
1278                 root = __pa(sp->spt);
1279                 ++sp->root_count;
1280                 vcpu->arch.mmu.root_hpa = root;
1281                 return;
1282         }
1283 #endif
1284         metaphysical = !is_paging(vcpu);
1285         if (tdp_enabled)
1286                 metaphysical = 1;
1287         for (i = 0; i < 4; ++i) {
1288                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1289
1290                 ASSERT(!VALID_PAGE(root));
1291                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1292                         if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1293                                 vcpu->arch.mmu.pae_root[i] = 0;
1294                                 continue;
1295                         }
1296                         root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1297                 } else if (vcpu->arch.mmu.root_level == 0)
1298                         root_gfn = 0;
1299                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1300                                       PT32_ROOT_LEVEL, metaphysical,
1301                                       ACC_ALL, NULL);
1302                 root = __pa(sp->spt);
1303                 ++sp->root_count;
1304                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1305         }
1306         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1307 }
1308
1309 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1310 {
1311         return vaddr;
1312 }
1313
1314 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1315                                 u32 error_code)
1316 {
1317         gfn_t gfn;
1318         int r;
1319
1320         pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
1321         r = mmu_topup_memory_caches(vcpu);
1322         if (r)
1323                 return r;
1324
1325         ASSERT(vcpu);
1326         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1327
1328         gfn = gva >> PAGE_SHIFT;
1329
1330         return nonpaging_map(vcpu, gva & PAGE_MASK,
1331                              error_code & PFERR_WRITE_MASK, gfn);
1332 }
1333
1334 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1335                                 u32 error_code)
1336 {
1337         struct page *page;
1338         int r;
1339         int largepage = 0;
1340         gfn_t gfn = gpa >> PAGE_SHIFT;
1341
1342         ASSERT(vcpu);
1343         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1344
1345         r = mmu_topup_memory_caches(vcpu);
1346         if (r)
1347                 return r;
1348
1349         down_read(&current->mm->mmap_sem);
1350         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1351                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1352                 largepage = 1;
1353         }
1354         page = gfn_to_page(vcpu->kvm, gfn);
1355         if (is_error_page(page)) {
1356                 kvm_release_page_clean(page);
1357                 up_read(&current->mm->mmap_sem);
1358                 return 1;
1359         }
1360         spin_lock(&vcpu->kvm->mmu_lock);
1361         kvm_mmu_free_some_pages(vcpu);
1362         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1363                          largepage, gfn, page, TDP_ROOT_LEVEL);
1364         spin_unlock(&vcpu->kvm->mmu_lock);
1365         up_read(&current->mm->mmap_sem);
1366
1367         return r;
1368 }
1369
1370 static void nonpaging_free(struct kvm_vcpu *vcpu)
1371 {
1372         mmu_free_roots(vcpu);
1373 }
1374
1375 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1376 {
1377         struct kvm_mmu *context = &vcpu->arch.mmu;
1378
1379         context->new_cr3 = nonpaging_new_cr3;
1380         context->page_fault = nonpaging_page_fault;
1381         context->gva_to_gpa = nonpaging_gva_to_gpa;
1382         context->free = nonpaging_free;
1383         context->prefetch_page = nonpaging_prefetch_page;
1384         context->root_level = 0;
1385         context->shadow_root_level = PT32E_ROOT_LEVEL;
1386         context->root_hpa = INVALID_PAGE;
1387         return 0;
1388 }
1389
1390 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1391 {
1392         ++vcpu->stat.tlb_flush;
1393         kvm_x86_ops->tlb_flush(vcpu);
1394 }
1395
1396 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1397 {
1398         pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
1399         mmu_free_roots(vcpu);
1400 }
1401
1402 static void inject_page_fault(struct kvm_vcpu *vcpu,
1403                               u64 addr,
1404                               u32 err_code)
1405 {
1406         kvm_inject_page_fault(vcpu, addr, err_code);
1407 }
1408
1409 static void paging_free(struct kvm_vcpu *vcpu)
1410 {
1411         nonpaging_free(vcpu);
1412 }
1413
1414 #define PTTYPE 64
1415 #include "paging_tmpl.h"
1416 #undef PTTYPE
1417
1418 #define PTTYPE 32
1419 #include "paging_tmpl.h"
1420 #undef PTTYPE
1421
1422 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1423 {
1424         struct kvm_mmu *context = &vcpu->arch.mmu;
1425
1426         ASSERT(is_pae(vcpu));
1427         context->new_cr3 = paging_new_cr3;
1428         context->page_fault = paging64_page_fault;
1429         context->gva_to_gpa = paging64_gva_to_gpa;
1430         context->prefetch_page = paging64_prefetch_page;
1431         context->free = paging_free;
1432         context->root_level = level;
1433         context->shadow_root_level = level;
1434         context->root_hpa = INVALID_PAGE;
1435         return 0;
1436 }
1437
1438 static int paging64_init_context(struct kvm_vcpu *vcpu)
1439 {
1440         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1441 }
1442
1443 static int paging32_init_context(struct kvm_vcpu *vcpu)
1444 {
1445         struct kvm_mmu *context = &vcpu->arch.mmu;
1446
1447         context->new_cr3 = paging_new_cr3;
1448         context->page_fault = paging32_page_fault;
1449         context->gva_to_gpa = paging32_gva_to_gpa;
1450         context->free = paging_free;
1451         context->prefetch_page = paging32_prefetch_page;
1452         context->root_level = PT32_ROOT_LEVEL;
1453         context->shadow_root_level = PT32E_ROOT_LEVEL;
1454         context->root_hpa = INVALID_PAGE;
1455         return 0;
1456 }
1457
1458 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1459 {
1460         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1461 }
1462
1463 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1464 {
1465         struct kvm_mmu *context = &vcpu->arch.mmu;
1466
1467         context->new_cr3 = nonpaging_new_cr3;
1468         context->page_fault = tdp_page_fault;
1469         context->free = nonpaging_free;
1470         context->prefetch_page = nonpaging_prefetch_page;
1471         context->shadow_root_level = TDP_ROOT_LEVEL;
1472         context->root_hpa = INVALID_PAGE;
1473
1474         if (!is_paging(vcpu)) {
1475                 context->gva_to_gpa = nonpaging_gva_to_gpa;
1476                 context->root_level = 0;
1477         } else if (is_long_mode(vcpu)) {
1478                 context->gva_to_gpa = paging64_gva_to_gpa;
1479                 context->root_level = PT64_ROOT_LEVEL;
1480         } else if (is_pae(vcpu)) {
1481                 context->gva_to_gpa = paging64_gva_to_gpa;
1482                 context->root_level = PT32E_ROOT_LEVEL;
1483         } else {
1484                 context->gva_to_gpa = paging32_gva_to_gpa;
1485                 context->root_level = PT32_ROOT_LEVEL;
1486         }
1487
1488         return 0;
1489 }
1490
1491 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1492 {
1493         ASSERT(vcpu);
1494         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1495
1496         if (!is_paging(vcpu))
1497                 return nonpaging_init_context(vcpu);
1498         else if (is_long_mode(vcpu))
1499                 return paging64_init_context(vcpu);
1500         else if (is_pae(vcpu))
1501                 return paging32E_init_context(vcpu);
1502         else
1503                 return paging32_init_context(vcpu);
1504 }
1505
1506 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1507 {
1508         if (tdp_enabled)
1509                 return init_kvm_tdp_mmu(vcpu);
1510         else
1511                 return init_kvm_softmmu(vcpu);
1512 }
1513
1514 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1515 {
1516         ASSERT(vcpu);
1517         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1518                 vcpu->arch.mmu.free(vcpu);
1519                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1520         }
1521 }
1522
1523 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1524 {
1525         destroy_kvm_mmu(vcpu);
1526         return init_kvm_mmu(vcpu);
1527 }
1528 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1529
1530 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1531 {
1532         int r;
1533
1534         r = mmu_topup_memory_caches(vcpu);
1535         if (r)
1536                 goto out;
1537         spin_lock(&vcpu->kvm->mmu_lock);
1538         kvm_mmu_free_some_pages(vcpu);
1539         mmu_alloc_roots(vcpu);
1540         spin_unlock(&vcpu->kvm->mmu_lock);
1541         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
1542         kvm_mmu_flush_tlb(vcpu);
1543 out:
1544         return r;
1545 }
1546 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1547
1548 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1549 {
1550         mmu_free_roots(vcpu);
1551 }
1552
1553 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1554                                   struct kvm_mmu_page *sp,
1555                                   u64 *spte)
1556 {
1557         u64 pte;
1558         struct kvm_mmu_page *child;
1559
1560         pte = *spte;
1561         if (is_shadow_present_pte(pte)) {
1562                 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1563                     is_large_pte(pte))
1564                         rmap_remove(vcpu->kvm, spte);
1565                 else {
1566                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1567                         mmu_page_remove_parent_pte(child, spte);
1568                 }
1569         }
1570         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1571         if (is_large_pte(pte))
1572                 --vcpu->kvm->stat.lpages;
1573 }
1574
1575 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1576                                   struct kvm_mmu_page *sp,
1577                                   u64 *spte,
1578                                   const void *new)
1579 {
1580         if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
1581             && !vcpu->arch.update_pte.largepage) {
1582                 ++vcpu->kvm->stat.mmu_pde_zapped;
1583                 return;
1584         }
1585
1586         ++vcpu->kvm->stat.mmu_pte_updated;
1587         if (sp->role.glevels == PT32_ROOT_LEVEL)
1588                 paging32_update_pte(vcpu, sp, spte, new);
1589         else
1590                 paging64_update_pte(vcpu, sp, spte, new);
1591 }
1592
1593 static bool need_remote_flush(u64 old, u64 new)
1594 {
1595         if (!is_shadow_present_pte(old))
1596                 return false;
1597         if (!is_shadow_present_pte(new))
1598                 return true;
1599         if ((old ^ new) & PT64_BASE_ADDR_MASK)
1600                 return true;
1601         old ^= PT64_NX_MASK;
1602         new ^= PT64_NX_MASK;
1603         return (old & ~new & PT64_PERM_MASK) != 0;
1604 }
1605
1606 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1607 {
1608         if (need_remote_flush(old, new))
1609                 kvm_flush_remote_tlbs(vcpu->kvm);
1610         else
1611                 kvm_mmu_flush_tlb(vcpu);
1612 }
1613
1614 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1615 {
1616         u64 *spte = vcpu->arch.last_pte_updated;
1617
1618         return !!(spte && (*spte & PT_ACCESSED_MASK));
1619 }
1620
1621 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1622                                           const u8 *new, int bytes)
1623 {
1624         gfn_t gfn;
1625         int r;
1626         u64 gpte = 0;
1627         struct page *page;
1628
1629         vcpu->arch.update_pte.largepage = 0;
1630
1631         if (bytes != 4 && bytes != 8)
1632                 return;
1633
1634         /*
1635          * Assume that the pte write on a page table of the same type
1636          * as the current vcpu paging mode.  This is nearly always true
1637          * (might be false while changing modes).  Note it is verified later
1638          * by update_pte().
1639          */
1640         if (is_pae(vcpu)) {
1641                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1642                 if ((bytes == 4) && (gpa % 4 == 0)) {
1643                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1644                         if (r)
1645                                 return;
1646                         memcpy((void *)&gpte + (gpa % 8), new, 4);
1647                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1648                         memcpy((void *)&gpte, new, 8);
1649                 }
1650         } else {
1651                 if ((bytes == 4) && (gpa % 4 == 0))
1652                         memcpy((void *)&gpte, new, 4);
1653         }
1654         if (!is_present_pte(gpte))
1655                 return;
1656         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1657
1658         down_read(&current->mm->mmap_sem);
1659         if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1660                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1661                 vcpu->arch.update_pte.largepage = 1;
1662         }
1663         page = gfn_to_page(vcpu->kvm, gfn);
1664         up_read(&current->mm->mmap_sem);
1665
1666         if (is_error_page(page)) {
1667                 kvm_release_page_clean(page);
1668                 return;
1669         }
1670         vcpu->arch.update_pte.gfn = gfn;
1671         vcpu->arch.update_pte.page = page;
1672 }
1673
1674 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1675                        const u8 *new, int bytes)
1676 {
1677         gfn_t gfn = gpa >> PAGE_SHIFT;
1678         struct kvm_mmu_page *sp;
1679         struct hlist_node *node, *n;
1680         struct hlist_head *bucket;
1681         unsigned index;
1682         u64 entry, gentry;
1683         u64 *spte;
1684         unsigned offset = offset_in_page(gpa);
1685         unsigned pte_size;
1686         unsigned page_offset;
1687         unsigned misaligned;
1688         unsigned quadrant;
1689         int level;
1690         int flooded = 0;
1691         int npte;
1692         int r;
1693
1694         pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1695         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1696         spin_lock(&vcpu->kvm->mmu_lock);
1697         kvm_mmu_free_some_pages(vcpu);
1698         ++vcpu->kvm->stat.mmu_pte_write;
1699         kvm_mmu_audit(vcpu, "pre pte write");
1700         if (gfn == vcpu->arch.last_pt_write_gfn
1701             && !last_updated_pte_accessed(vcpu)) {
1702                 ++vcpu->arch.last_pt_write_count;
1703                 if (vcpu->arch.last_pt_write_count >= 3)
1704                         flooded = 1;
1705         } else {
1706                 vcpu->arch.last_pt_write_gfn = gfn;
1707                 vcpu->arch.last_pt_write_count = 1;
1708                 vcpu->arch.last_pte_updated = NULL;
1709         }
1710         index = kvm_page_table_hashfn(gfn);
1711         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1712         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1713                 if (sp->gfn != gfn || sp->role.metaphysical)
1714                         continue;
1715                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1716                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1717                 misaligned |= bytes < 4;
1718                 if (misaligned || flooded) {
1719                         /*
1720                          * Misaligned accesses are too much trouble to fix
1721                          * up; also, they usually indicate a page is not used
1722                          * as a page table.
1723                          *
1724                          * If we're seeing too many writes to a page,
1725                          * it may no longer be a page table, or we may be
1726                          * forking, in which case it is better to unmap the
1727                          * page.
1728                          */
1729                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1730                                  gpa, bytes, sp->role.word);
1731                         kvm_mmu_zap_page(vcpu->kvm, sp);
1732                         ++vcpu->kvm->stat.mmu_flooded;
1733                         continue;
1734                 }
1735                 page_offset = offset;
1736                 level = sp->role.level;
1737                 npte = 1;
1738                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
1739                         page_offset <<= 1;      /* 32->64 */
1740                         /*
1741                          * A 32-bit pde maps 4MB while the shadow pdes map
1742                          * only 2MB.  So we need to double the offset again
1743                          * and zap two pdes instead of one.
1744                          */
1745                         if (level == PT32_ROOT_LEVEL) {
1746                                 page_offset &= ~7; /* kill rounding error */
1747                                 page_offset <<= 1;
1748                                 npte = 2;
1749                         }
1750                         quadrant = page_offset >> PAGE_SHIFT;
1751                         page_offset &= ~PAGE_MASK;
1752                         if (quadrant != sp->role.quadrant)
1753                                 continue;
1754                 }
1755                 spte = &sp->spt[page_offset / sizeof(*spte)];
1756                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1757                         gentry = 0;
1758                         r = kvm_read_guest_atomic(vcpu->kvm,
1759                                                   gpa & ~(u64)(pte_size - 1),
1760                                                   &gentry, pte_size);
1761                         new = (const void *)&gentry;
1762                         if (r < 0)
1763                                 new = NULL;
1764                 }
1765                 while (npte--) {
1766                         entry = *spte;
1767                         mmu_pte_write_zap_pte(vcpu, sp, spte);
1768                         if (new)
1769                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
1770                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1771                         ++spte;
1772                 }
1773         }
1774         kvm_mmu_audit(vcpu, "post pte write");
1775         spin_unlock(&vcpu->kvm->mmu_lock);
1776         if (vcpu->arch.update_pte.page) {
1777                 kvm_release_page_clean(vcpu->arch.update_pte.page);
1778                 vcpu->arch.update_pte.page = NULL;
1779         }
1780 }
1781
1782 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1783 {
1784         gpa_t gpa;
1785         int r;
1786
1787         down_read(&vcpu->kvm->slots_lock);
1788         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1789         up_read(&vcpu->kvm->slots_lock);
1790
1791         spin_lock(&vcpu->kvm->mmu_lock);
1792         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1793         spin_unlock(&vcpu->kvm->mmu_lock);
1794         return r;
1795 }
1796
1797 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1798 {
1799         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
1800                 struct kvm_mmu_page *sp;
1801
1802                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
1803                                   struct kvm_mmu_page, link);
1804                 kvm_mmu_zap_page(vcpu->kvm, sp);
1805                 ++vcpu->kvm->stat.mmu_recycled;
1806         }
1807 }
1808
1809 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1810 {
1811         int r;
1812         enum emulation_result er;
1813
1814         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
1815         if (r < 0)
1816                 goto out;
1817
1818         if (!r) {
1819                 r = 1;
1820                 goto out;
1821         }
1822
1823         r = mmu_topup_memory_caches(vcpu);
1824         if (r)
1825                 goto out;
1826
1827         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1828
1829         switch (er) {
1830         case EMULATE_DONE:
1831                 return 1;
1832         case EMULATE_DO_MMIO:
1833                 ++vcpu->stat.mmio_exits;
1834                 return 0;
1835         case EMULATE_FAIL:
1836                 kvm_report_emulation_failure(vcpu, "pagetable");
1837                 return 1;
1838         default:
1839                 BUG();
1840         }
1841 out:
1842         return r;
1843 }
1844 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1845
1846 void kvm_enable_tdp(void)
1847 {
1848         tdp_enabled = true;
1849 }
1850 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1851
1852 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1853 {
1854         struct kvm_mmu_page *sp;
1855
1856         while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1857                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
1858                                   struct kvm_mmu_page, link);
1859                 kvm_mmu_zap_page(vcpu->kvm, sp);
1860         }
1861         free_page((unsigned long)vcpu->arch.mmu.pae_root);
1862 }
1863
1864 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1865 {
1866         struct page *page;
1867         int i;
1868
1869         ASSERT(vcpu);
1870
1871         if (vcpu->kvm->arch.n_requested_mmu_pages)
1872                 vcpu->kvm->arch.n_free_mmu_pages =
1873                                         vcpu->kvm->arch.n_requested_mmu_pages;
1874         else
1875                 vcpu->kvm->arch.n_free_mmu_pages =
1876                                         vcpu->kvm->arch.n_alloc_mmu_pages;
1877         /*
1878          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1879          * Therefore we need to allocate shadow page tables in the first
1880          * 4GB of memory, which happens to fit the DMA32 zone.
1881          */
1882         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1883         if (!page)
1884                 goto error_1;
1885         vcpu->arch.mmu.pae_root = page_address(page);
1886         for (i = 0; i < 4; ++i)
1887                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1888
1889         return 0;
1890
1891 error_1:
1892         free_mmu_pages(vcpu);
1893         return -ENOMEM;
1894 }
1895
1896 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1897 {
1898         ASSERT(vcpu);
1899         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1900
1901         return alloc_mmu_pages(vcpu);
1902 }
1903
1904 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1905 {
1906         ASSERT(vcpu);
1907         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1908
1909         return init_kvm_mmu(vcpu);
1910 }
1911
1912 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1913 {
1914         ASSERT(vcpu);
1915
1916         destroy_kvm_mmu(vcpu);
1917         free_mmu_pages(vcpu);
1918         mmu_free_memory_caches(vcpu);
1919 }
1920
1921 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1922 {
1923         struct kvm_mmu_page *sp;
1924
1925         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
1926                 int i;
1927                 u64 *pt;
1928
1929                 if (!test_bit(slot, &sp->slot_bitmap))
1930                         continue;
1931
1932                 pt = sp->spt;
1933                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1934                         /* avoid RMW */
1935                         if (pt[i] & PT_WRITABLE_MASK)
1936                                 pt[i] &= ~PT_WRITABLE_MASK;
1937         }
1938 }
1939
1940 void kvm_mmu_zap_all(struct kvm *kvm)
1941 {
1942         struct kvm_mmu_page *sp, *node;
1943
1944         spin_lock(&kvm->mmu_lock);
1945         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
1946                 kvm_mmu_zap_page(kvm, sp);
1947         spin_unlock(&kvm->mmu_lock);
1948
1949         kvm_flush_remote_tlbs(kvm);
1950 }
1951
1952 void kvm_mmu_module_exit(void)
1953 {
1954         if (pte_chain_cache)
1955                 kmem_cache_destroy(pte_chain_cache);
1956         if (rmap_desc_cache)
1957                 kmem_cache_destroy(rmap_desc_cache);
1958         if (mmu_page_header_cache)
1959                 kmem_cache_destroy(mmu_page_header_cache);
1960 }
1961
1962 int kvm_mmu_module_init(void)
1963 {
1964         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1965                                             sizeof(struct kvm_pte_chain),
1966                                             0, 0, NULL);
1967         if (!pte_chain_cache)
1968                 goto nomem;
1969         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1970                                             sizeof(struct kvm_rmap_desc),
1971                                             0, 0, NULL);
1972         if (!rmap_desc_cache)
1973                 goto nomem;
1974
1975         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1976                                                   sizeof(struct kvm_mmu_page),
1977                                                   0, 0, NULL);
1978         if (!mmu_page_header_cache)
1979                 goto nomem;
1980
1981         return 0;
1982
1983 nomem:
1984         kvm_mmu_module_exit();
1985         return -ENOMEM;
1986 }
1987
1988 /*
1989  * Caculate mmu pages needed for kvm.
1990  */
1991 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1992 {
1993         int i;
1994         unsigned int nr_mmu_pages;
1995         unsigned int  nr_pages = 0;
1996
1997         for (i = 0; i < kvm->nmemslots; i++)
1998                 nr_pages += kvm->memslots[i].npages;
1999
2000         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2001         nr_mmu_pages = max(nr_mmu_pages,
2002                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2003
2004         return nr_mmu_pages;
2005 }
2006
2007 #ifdef AUDIT
2008
2009 static const char *audit_msg;
2010
2011 static gva_t canonicalize(gva_t gva)
2012 {
2013 #ifdef CONFIG_X86_64
2014         gva = (long long)(gva << 16) >> 16;
2015 #endif
2016         return gva;
2017 }
2018
2019 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2020                                 gva_t va, int level)
2021 {
2022         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2023         int i;
2024         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2025
2026         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2027                 u64 ent = pt[i];
2028
2029                 if (ent == shadow_trap_nonpresent_pte)
2030                         continue;
2031
2032                 va = canonicalize(va);
2033                 if (level > 1) {
2034                         if (ent == shadow_notrap_nonpresent_pte)
2035                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
2036                                        " in nonleaf level: levels %d gva %lx"
2037                                        " level %d pte %llx\n", audit_msg,
2038                                        vcpu->arch.mmu.root_level, va, level, ent);
2039
2040                         audit_mappings_page(vcpu, ent, va, level - 1);
2041                 } else {
2042                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
2043                         struct page *page = gpa_to_page(vcpu, gpa);
2044                         hpa_t hpa = page_to_phys(page);
2045
2046                         if (is_shadow_present_pte(ent)
2047                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
2048                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
2049                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
2050                                        audit_msg, vcpu->arch.mmu.root_level,
2051                                        va, gpa, hpa, ent,
2052                                        is_shadow_present_pte(ent));
2053                         else if (ent == shadow_notrap_nonpresent_pte
2054                                  && !is_error_hpa(hpa))
2055                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
2056                                        " valid guest gva %lx\n", audit_msg, va);
2057                         kvm_release_page_clean(page);
2058
2059                 }
2060         }
2061 }
2062
2063 static void audit_mappings(struct kvm_vcpu *vcpu)
2064 {
2065         unsigned i;
2066
2067         if (vcpu->arch.mmu.root_level == 4)
2068                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
2069         else
2070                 for (i = 0; i < 4; ++i)
2071                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
2072                                 audit_mappings_page(vcpu,
2073                                                     vcpu->arch.mmu.pae_root[i],
2074                                                     i << 30,
2075                                                     2);
2076 }
2077
2078 static int count_rmaps(struct kvm_vcpu *vcpu)
2079 {
2080         int nmaps = 0;
2081         int i, j, k;
2082
2083         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2084                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2085                 struct kvm_rmap_desc *d;
2086
2087                 for (j = 0; j < m->npages; ++j) {
2088                         unsigned long *rmapp = &m->rmap[j];
2089
2090                         if (!*rmapp)
2091                                 continue;
2092                         if (!(*rmapp & 1)) {
2093                                 ++nmaps;
2094                                 continue;
2095                         }
2096                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
2097                         while (d) {
2098                                 for (k = 0; k < RMAP_EXT; ++k)
2099                                         if (d->shadow_ptes[k])
2100                                                 ++nmaps;
2101                                         else
2102                                                 break;
2103                                 d = d->more;
2104                         }
2105                 }
2106         }
2107         return nmaps;
2108 }
2109
2110 static int count_writable_mappings(struct kvm_vcpu *vcpu)
2111 {
2112         int nmaps = 0;
2113         struct kvm_mmu_page *sp;
2114         int i;
2115
2116         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2117                 u64 *pt = sp->spt;
2118
2119                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
2120                         continue;
2121
2122                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2123                         u64 ent = pt[i];
2124
2125                         if (!(ent & PT_PRESENT_MASK))
2126                                 continue;
2127                         if (!(ent & PT_WRITABLE_MASK))
2128                                 continue;
2129                         ++nmaps;
2130                 }
2131         }
2132         return nmaps;
2133 }
2134
2135 static void audit_rmap(struct kvm_vcpu *vcpu)
2136 {
2137         int n_rmap = count_rmaps(vcpu);
2138         int n_actual = count_writable_mappings(vcpu);
2139
2140         if (n_rmap != n_actual)
2141                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
2142                        __FUNCTION__, audit_msg, n_rmap, n_actual);
2143 }
2144
2145 static void audit_write_protection(struct kvm_vcpu *vcpu)
2146 {
2147         struct kvm_mmu_page *sp;
2148         struct kvm_memory_slot *slot;
2149         unsigned long *rmapp;
2150         gfn_t gfn;
2151
2152         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2153                 if (sp->role.metaphysical)
2154                         continue;
2155
2156                 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2157                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
2158                 rmapp = &slot->rmap[gfn - slot->base_gfn];
2159                 if (*rmapp)
2160                         printk(KERN_ERR "%s: (%s) shadow page has writable"
2161                                " mappings: gfn %lx role %x\n",
2162                                __FUNCTION__, audit_msg, sp->gfn,
2163                                sp->role.word);
2164         }
2165 }
2166
2167 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2168 {
2169         int olddbg = dbg;
2170
2171         dbg = 0;
2172         audit_msg = msg;
2173         audit_rmap(vcpu);
2174         audit_write_protection(vcpu);
2175         audit_mappings(vcpu);
2176         dbg = olddbg;
2177 }
2178
2179 #endif