KVM: MMU: Optimize page unshadowing
[safe/jmp/linux-2.6] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "mmu.h"
21
22 #include <linux/kvm_host.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/mm.h>
26 #include <linux/highmem.h>
27 #include <linux/module.h>
28 #include <linux/swap.h>
29 #include <linux/hugetlb.h>
30 #include <linux/compiler.h>
31
32 #include <asm/page.h>
33 #include <asm/cmpxchg.h>
34 #include <asm/io.h>
35 #include <asm/vmx.h>
36
37 /*
38  * When setting this variable to true it enables Two-Dimensional-Paging
39  * where the hardware walks 2 page tables:
40  * 1. the guest-virtual to guest-physical
41  * 2. while doing 1. it walks guest-physical to host-physical
42  * If the hardware supports that we don't need to do shadow paging.
43  */
44 bool tdp_enabled = false;
45
46 #undef MMU_DEBUG
47
48 #undef AUDIT
49
50 #ifdef AUDIT
51 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52 #else
53 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54 #endif
55
56 #ifdef MMU_DEBUG
57
58 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61 #else
62
63 #define pgprintk(x...) do { } while (0)
64 #define rmap_printk(x...) do { } while (0)
65
66 #endif
67
68 #if defined(MMU_DEBUG) || defined(AUDIT)
69 static int dbg = 0;
70 module_param(dbg, bool, 0644);
71 #endif
72
73 static int oos_shadow = 1;
74 module_param(oos_shadow, bool, 0644);
75
76 #ifndef MMU_DEBUG
77 #define ASSERT(x) do { } while (0)
78 #else
79 #define ASSERT(x)                                                       \
80         if (!(x)) {                                                     \
81                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
82                        __FILE__, __LINE__, #x);                         \
83         }
84 #endif
85
86 #define PT_FIRST_AVAIL_BITS_SHIFT 9
87 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
88
89 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
90
91 #define PT64_LEVEL_BITS 9
92
93 #define PT64_LEVEL_SHIFT(level) \
94                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
95
96 #define PT64_LEVEL_MASK(level) \
97                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
98
99 #define PT64_INDEX(address, level)\
100         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
101
102
103 #define PT32_LEVEL_BITS 10
104
105 #define PT32_LEVEL_SHIFT(level) \
106                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
107
108 #define PT32_LEVEL_MASK(level) \
109                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
110
111 #define PT32_INDEX(address, level)\
112         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
113
114
115 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
116 #define PT64_DIR_BASE_ADDR_MASK \
117         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
118
119 #define PT32_BASE_ADDR_MASK PAGE_MASK
120 #define PT32_DIR_BASE_ADDR_MASK \
121         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
122
123 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
124                         | PT64_NX_MASK)
125
126 #define PFERR_PRESENT_MASK (1U << 0)
127 #define PFERR_WRITE_MASK (1U << 1)
128 #define PFERR_USER_MASK (1U << 2)
129 #define PFERR_FETCH_MASK (1U << 4)
130
131 #define PT_DIRECTORY_LEVEL 2
132 #define PT_PAGE_TABLE_LEVEL 1
133
134 #define RMAP_EXT 4
135
136 #define ACC_EXEC_MASK    1
137 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
138 #define ACC_USER_MASK    PT_USER_MASK
139 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
140
141 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
142
143 struct kvm_rmap_desc {
144         u64 *shadow_ptes[RMAP_EXT];
145         struct kvm_rmap_desc *more;
146 };
147
148 struct kvm_shadow_walk_iterator {
149         u64 addr;
150         hpa_t shadow_addr;
151         int level;
152         u64 *sptep;
153         unsigned index;
154 };
155
156 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
157         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
158              shadow_walk_okay(&(_walker));                      \
159              shadow_walk_next(&(_walker)))
160
161
162 struct kvm_unsync_walk {
163         int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
164 };
165
166 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
167
168 static struct kmem_cache *pte_chain_cache;
169 static struct kmem_cache *rmap_desc_cache;
170 static struct kmem_cache *mmu_page_header_cache;
171
172 static u64 __read_mostly shadow_trap_nonpresent_pte;
173 static u64 __read_mostly shadow_notrap_nonpresent_pte;
174 static u64 __read_mostly shadow_base_present_pte;
175 static u64 __read_mostly shadow_nx_mask;
176 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
177 static u64 __read_mostly shadow_user_mask;
178 static u64 __read_mostly shadow_accessed_mask;
179 static u64 __read_mostly shadow_dirty_mask;
180 static u64 __read_mostly shadow_mt_mask;
181
182 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
183 {
184         shadow_trap_nonpresent_pte = trap_pte;
185         shadow_notrap_nonpresent_pte = notrap_pte;
186 }
187 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
188
189 void kvm_mmu_set_base_ptes(u64 base_pte)
190 {
191         shadow_base_present_pte = base_pte;
192 }
193 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
194
195 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
196                 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask)
197 {
198         shadow_user_mask = user_mask;
199         shadow_accessed_mask = accessed_mask;
200         shadow_dirty_mask = dirty_mask;
201         shadow_nx_mask = nx_mask;
202         shadow_x_mask = x_mask;
203         shadow_mt_mask = mt_mask;
204 }
205 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
206
207 static int is_write_protection(struct kvm_vcpu *vcpu)
208 {
209         return vcpu->arch.cr0 & X86_CR0_WP;
210 }
211
212 static int is_cpuid_PSE36(void)
213 {
214         return 1;
215 }
216
217 static int is_nx(struct kvm_vcpu *vcpu)
218 {
219         return vcpu->arch.shadow_efer & EFER_NX;
220 }
221
222 static int is_present_pte(unsigned long pte)
223 {
224         return pte & PT_PRESENT_MASK;
225 }
226
227 static int is_shadow_present_pte(u64 pte)
228 {
229         return pte != shadow_trap_nonpresent_pte
230                 && pte != shadow_notrap_nonpresent_pte;
231 }
232
233 static int is_large_pte(u64 pte)
234 {
235         return pte & PT_PAGE_SIZE_MASK;
236 }
237
238 static int is_writeble_pte(unsigned long pte)
239 {
240         return pte & PT_WRITABLE_MASK;
241 }
242
243 static int is_dirty_pte(unsigned long pte)
244 {
245         return pte & shadow_dirty_mask;
246 }
247
248 static int is_rmap_pte(u64 pte)
249 {
250         return is_shadow_present_pte(pte);
251 }
252
253 static pfn_t spte_to_pfn(u64 pte)
254 {
255         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
256 }
257
258 static gfn_t pse36_gfn_delta(u32 gpte)
259 {
260         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
261
262         return (gpte & PT32_DIR_PSE36_MASK) << shift;
263 }
264
265 static void set_shadow_pte(u64 *sptep, u64 spte)
266 {
267 #ifdef CONFIG_X86_64
268         set_64bit((unsigned long *)sptep, spte);
269 #else
270         set_64bit((unsigned long long *)sptep, spte);
271 #endif
272 }
273
274 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
275                                   struct kmem_cache *base_cache, int min)
276 {
277         void *obj;
278
279         if (cache->nobjs >= min)
280                 return 0;
281         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
282                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
283                 if (!obj)
284                         return -ENOMEM;
285                 cache->objects[cache->nobjs++] = obj;
286         }
287         return 0;
288 }
289
290 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
291 {
292         while (mc->nobjs)
293                 kfree(mc->objects[--mc->nobjs]);
294 }
295
296 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
297                                        int min)
298 {
299         struct page *page;
300
301         if (cache->nobjs >= min)
302                 return 0;
303         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
304                 page = alloc_page(GFP_KERNEL);
305                 if (!page)
306                         return -ENOMEM;
307                 set_page_private(page, 0);
308                 cache->objects[cache->nobjs++] = page_address(page);
309         }
310         return 0;
311 }
312
313 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
314 {
315         while (mc->nobjs)
316                 free_page((unsigned long)mc->objects[--mc->nobjs]);
317 }
318
319 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
320 {
321         int r;
322
323         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
324                                    pte_chain_cache, 4);
325         if (r)
326                 goto out;
327         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
328                                    rmap_desc_cache, 4);
329         if (r)
330                 goto out;
331         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
332         if (r)
333                 goto out;
334         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
335                                    mmu_page_header_cache, 4);
336 out:
337         return r;
338 }
339
340 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
341 {
342         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
343         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
344         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
345         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
346 }
347
348 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
349                                     size_t size)
350 {
351         void *p;
352
353         BUG_ON(!mc->nobjs);
354         p = mc->objects[--mc->nobjs];
355         memset(p, 0, size);
356         return p;
357 }
358
359 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
360 {
361         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
362                                       sizeof(struct kvm_pte_chain));
363 }
364
365 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
366 {
367         kfree(pc);
368 }
369
370 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
371 {
372         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
373                                       sizeof(struct kvm_rmap_desc));
374 }
375
376 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
377 {
378         kfree(rd);
379 }
380
381 /*
382  * Return the pointer to the largepage write count for a given
383  * gfn, handling slots that are not large page aligned.
384  */
385 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
386 {
387         unsigned long idx;
388
389         idx = (gfn / KVM_PAGES_PER_HPAGE) -
390               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
391         return &slot->lpage_info[idx].write_count;
392 }
393
394 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
395 {
396         int *write_count;
397
398         gfn = unalias_gfn(kvm, gfn);
399         write_count = slot_largepage_idx(gfn,
400                                          gfn_to_memslot_unaliased(kvm, gfn));
401         *write_count += 1;
402 }
403
404 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
405 {
406         int *write_count;
407
408         gfn = unalias_gfn(kvm, gfn);
409         write_count = slot_largepage_idx(gfn,
410                                          gfn_to_memslot_unaliased(kvm, gfn));
411         *write_count -= 1;
412         WARN_ON(*write_count < 0);
413 }
414
415 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
416 {
417         struct kvm_memory_slot *slot;
418         int *largepage_idx;
419
420         gfn = unalias_gfn(kvm, gfn);
421         slot = gfn_to_memslot_unaliased(kvm, gfn);
422         if (slot) {
423                 largepage_idx = slot_largepage_idx(gfn, slot);
424                 return *largepage_idx;
425         }
426
427         return 1;
428 }
429
430 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
431 {
432         struct vm_area_struct *vma;
433         unsigned long addr;
434         int ret = 0;
435
436         addr = gfn_to_hva(kvm, gfn);
437         if (kvm_is_error_hva(addr))
438                 return ret;
439
440         down_read(&current->mm->mmap_sem);
441         vma = find_vma(current->mm, addr);
442         if (vma && is_vm_hugetlb_page(vma))
443                 ret = 1;
444         up_read(&current->mm->mmap_sem);
445
446         return ret;
447 }
448
449 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
450 {
451         struct kvm_memory_slot *slot;
452
453         if (has_wrprotected_page(vcpu->kvm, large_gfn))
454                 return 0;
455
456         if (!host_largepage_backed(vcpu->kvm, large_gfn))
457                 return 0;
458
459         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
460         if (slot && slot->dirty_bitmap)
461                 return 0;
462
463         return 1;
464 }
465
466 /*
467  * Take gfn and return the reverse mapping to it.
468  * Note: gfn must be unaliased before this function get called
469  */
470
471 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
472 {
473         struct kvm_memory_slot *slot;
474         unsigned long idx;
475
476         slot = gfn_to_memslot(kvm, gfn);
477         if (!lpage)
478                 return &slot->rmap[gfn - slot->base_gfn];
479
480         idx = (gfn / KVM_PAGES_PER_HPAGE) -
481               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
482
483         return &slot->lpage_info[idx].rmap_pde;
484 }
485
486 /*
487  * Reverse mapping data structures:
488  *
489  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
490  * that points to page_address(page).
491  *
492  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
493  * containing more mappings.
494  */
495 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
496 {
497         struct kvm_mmu_page *sp;
498         struct kvm_rmap_desc *desc;
499         unsigned long *rmapp;
500         int i;
501
502         if (!is_rmap_pte(*spte))
503                 return;
504         gfn = unalias_gfn(vcpu->kvm, gfn);
505         sp = page_header(__pa(spte));
506         sp->gfns[spte - sp->spt] = gfn;
507         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
508         if (!*rmapp) {
509                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
510                 *rmapp = (unsigned long)spte;
511         } else if (!(*rmapp & 1)) {
512                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
513                 desc = mmu_alloc_rmap_desc(vcpu);
514                 desc->shadow_ptes[0] = (u64 *)*rmapp;
515                 desc->shadow_ptes[1] = spte;
516                 *rmapp = (unsigned long)desc | 1;
517         } else {
518                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
519                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
520                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
521                         desc = desc->more;
522                 if (desc->shadow_ptes[RMAP_EXT-1]) {
523                         desc->more = mmu_alloc_rmap_desc(vcpu);
524                         desc = desc->more;
525                 }
526                 for (i = 0; desc->shadow_ptes[i]; ++i)
527                         ;
528                 desc->shadow_ptes[i] = spte;
529         }
530 }
531
532 static void rmap_desc_remove_entry(unsigned long *rmapp,
533                                    struct kvm_rmap_desc *desc,
534                                    int i,
535                                    struct kvm_rmap_desc *prev_desc)
536 {
537         int j;
538
539         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
540                 ;
541         desc->shadow_ptes[i] = desc->shadow_ptes[j];
542         desc->shadow_ptes[j] = NULL;
543         if (j != 0)
544                 return;
545         if (!prev_desc && !desc->more)
546                 *rmapp = (unsigned long)desc->shadow_ptes[0];
547         else
548                 if (prev_desc)
549                         prev_desc->more = desc->more;
550                 else
551                         *rmapp = (unsigned long)desc->more | 1;
552         mmu_free_rmap_desc(desc);
553 }
554
555 static void rmap_remove(struct kvm *kvm, u64 *spte)
556 {
557         struct kvm_rmap_desc *desc;
558         struct kvm_rmap_desc *prev_desc;
559         struct kvm_mmu_page *sp;
560         pfn_t pfn;
561         unsigned long *rmapp;
562         int i;
563
564         if (!is_rmap_pte(*spte))
565                 return;
566         sp = page_header(__pa(spte));
567         pfn = spte_to_pfn(*spte);
568         if (*spte & shadow_accessed_mask)
569                 kvm_set_pfn_accessed(pfn);
570         if (is_writeble_pte(*spte))
571                 kvm_release_pfn_dirty(pfn);
572         else
573                 kvm_release_pfn_clean(pfn);
574         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
575         if (!*rmapp) {
576                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
577                 BUG();
578         } else if (!(*rmapp & 1)) {
579                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
580                 if ((u64 *)*rmapp != spte) {
581                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
582                                spte, *spte);
583                         BUG();
584                 }
585                 *rmapp = 0;
586         } else {
587                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
588                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
589                 prev_desc = NULL;
590                 while (desc) {
591                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
592                                 if (desc->shadow_ptes[i] == spte) {
593                                         rmap_desc_remove_entry(rmapp,
594                                                                desc, i,
595                                                                prev_desc);
596                                         return;
597                                 }
598                         prev_desc = desc;
599                         desc = desc->more;
600                 }
601                 BUG();
602         }
603 }
604
605 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
606 {
607         struct kvm_rmap_desc *desc;
608         struct kvm_rmap_desc *prev_desc;
609         u64 *prev_spte;
610         int i;
611
612         if (!*rmapp)
613                 return NULL;
614         else if (!(*rmapp & 1)) {
615                 if (!spte)
616                         return (u64 *)*rmapp;
617                 return NULL;
618         }
619         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
620         prev_desc = NULL;
621         prev_spte = NULL;
622         while (desc) {
623                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
624                         if (prev_spte == spte)
625                                 return desc->shadow_ptes[i];
626                         prev_spte = desc->shadow_ptes[i];
627                 }
628                 desc = desc->more;
629         }
630         return NULL;
631 }
632
633 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
634 {
635         unsigned long *rmapp;
636         u64 *spte;
637         int write_protected = 0;
638
639         gfn = unalias_gfn(kvm, gfn);
640         rmapp = gfn_to_rmap(kvm, gfn, 0);
641
642         spte = rmap_next(kvm, rmapp, NULL);
643         while (spte) {
644                 BUG_ON(!spte);
645                 BUG_ON(!(*spte & PT_PRESENT_MASK));
646                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
647                 if (is_writeble_pte(*spte)) {
648                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
649                         write_protected = 1;
650                 }
651                 spte = rmap_next(kvm, rmapp, spte);
652         }
653         if (write_protected) {
654                 pfn_t pfn;
655
656                 spte = rmap_next(kvm, rmapp, NULL);
657                 pfn = spte_to_pfn(*spte);
658                 kvm_set_pfn_dirty(pfn);
659         }
660
661         /* check for huge page mappings */
662         rmapp = gfn_to_rmap(kvm, gfn, 1);
663         spte = rmap_next(kvm, rmapp, NULL);
664         while (spte) {
665                 BUG_ON(!spte);
666                 BUG_ON(!(*spte & PT_PRESENT_MASK));
667                 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
668                 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
669                 if (is_writeble_pte(*spte)) {
670                         rmap_remove(kvm, spte);
671                         --kvm->stat.lpages;
672                         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
673                         spte = NULL;
674                         write_protected = 1;
675                 }
676                 spte = rmap_next(kvm, rmapp, spte);
677         }
678
679         return write_protected;
680 }
681
682 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
683 {
684         u64 *spte;
685         int need_tlb_flush = 0;
686
687         while ((spte = rmap_next(kvm, rmapp, NULL))) {
688                 BUG_ON(!(*spte & PT_PRESENT_MASK));
689                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
690                 rmap_remove(kvm, spte);
691                 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
692                 need_tlb_flush = 1;
693         }
694         return need_tlb_flush;
695 }
696
697 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
698                           int (*handler)(struct kvm *kvm, unsigned long *rmapp))
699 {
700         int i;
701         int retval = 0;
702
703         /*
704          * If mmap_sem isn't taken, we can look the memslots with only
705          * the mmu_lock by skipping over the slots with userspace_addr == 0.
706          */
707         for (i = 0; i < kvm->nmemslots; i++) {
708                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
709                 unsigned long start = memslot->userspace_addr;
710                 unsigned long end;
711
712                 /* mmu_lock protects userspace_addr */
713                 if (!start)
714                         continue;
715
716                 end = start + (memslot->npages << PAGE_SHIFT);
717                 if (hva >= start && hva < end) {
718                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
719                         retval |= handler(kvm, &memslot->rmap[gfn_offset]);
720                         retval |= handler(kvm,
721                                           &memslot->lpage_info[
722                                                   gfn_offset /
723                                                   KVM_PAGES_PER_HPAGE].rmap_pde);
724                 }
725         }
726
727         return retval;
728 }
729
730 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
731 {
732         return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
733 }
734
735 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
736 {
737         u64 *spte;
738         int young = 0;
739
740         /* always return old for EPT */
741         if (!shadow_accessed_mask)
742                 return 0;
743
744         spte = rmap_next(kvm, rmapp, NULL);
745         while (spte) {
746                 int _young;
747                 u64 _spte = *spte;
748                 BUG_ON(!(_spte & PT_PRESENT_MASK));
749                 _young = _spte & PT_ACCESSED_MASK;
750                 if (_young) {
751                         young = 1;
752                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
753                 }
754                 spte = rmap_next(kvm, rmapp, spte);
755         }
756         return young;
757 }
758
759 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
760 {
761         return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
762 }
763
764 #ifdef MMU_DEBUG
765 static int is_empty_shadow_page(u64 *spt)
766 {
767         u64 *pos;
768         u64 *end;
769
770         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
771                 if (is_shadow_present_pte(*pos)) {
772                         printk(KERN_ERR "%s: %p %llx\n", __func__,
773                                pos, *pos);
774                         return 0;
775                 }
776         return 1;
777 }
778 #endif
779
780 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
781 {
782         ASSERT(is_empty_shadow_page(sp->spt));
783         list_del(&sp->link);
784         __free_page(virt_to_page(sp->spt));
785         __free_page(virt_to_page(sp->gfns));
786         kfree(sp);
787         ++kvm->arch.n_free_mmu_pages;
788 }
789
790 static unsigned kvm_page_table_hashfn(gfn_t gfn)
791 {
792         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
793 }
794
795 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
796                                                u64 *parent_pte)
797 {
798         struct kvm_mmu_page *sp;
799
800         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
801         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
802         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
803         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
804         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
805         INIT_LIST_HEAD(&sp->oos_link);
806         ASSERT(is_empty_shadow_page(sp->spt));
807         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
808         sp->multimapped = 0;
809         sp->parent_pte = parent_pte;
810         --vcpu->kvm->arch.n_free_mmu_pages;
811         return sp;
812 }
813
814 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
815                                     struct kvm_mmu_page *sp, u64 *parent_pte)
816 {
817         struct kvm_pte_chain *pte_chain;
818         struct hlist_node *node;
819         int i;
820
821         if (!parent_pte)
822                 return;
823         if (!sp->multimapped) {
824                 u64 *old = sp->parent_pte;
825
826                 if (!old) {
827                         sp->parent_pte = parent_pte;
828                         return;
829                 }
830                 sp->multimapped = 1;
831                 pte_chain = mmu_alloc_pte_chain(vcpu);
832                 INIT_HLIST_HEAD(&sp->parent_ptes);
833                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
834                 pte_chain->parent_ptes[0] = old;
835         }
836         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
837                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
838                         continue;
839                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
840                         if (!pte_chain->parent_ptes[i]) {
841                                 pte_chain->parent_ptes[i] = parent_pte;
842                                 return;
843                         }
844         }
845         pte_chain = mmu_alloc_pte_chain(vcpu);
846         BUG_ON(!pte_chain);
847         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
848         pte_chain->parent_ptes[0] = parent_pte;
849 }
850
851 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
852                                        u64 *parent_pte)
853 {
854         struct kvm_pte_chain *pte_chain;
855         struct hlist_node *node;
856         int i;
857
858         if (!sp->multimapped) {
859                 BUG_ON(sp->parent_pte != parent_pte);
860                 sp->parent_pte = NULL;
861                 return;
862         }
863         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
864                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
865                         if (!pte_chain->parent_ptes[i])
866                                 break;
867                         if (pte_chain->parent_ptes[i] != parent_pte)
868                                 continue;
869                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
870                                 && pte_chain->parent_ptes[i + 1]) {
871                                 pte_chain->parent_ptes[i]
872                                         = pte_chain->parent_ptes[i + 1];
873                                 ++i;
874                         }
875                         pte_chain->parent_ptes[i] = NULL;
876                         if (i == 0) {
877                                 hlist_del(&pte_chain->link);
878                                 mmu_free_pte_chain(pte_chain);
879                                 if (hlist_empty(&sp->parent_ptes)) {
880                                         sp->multimapped = 0;
881                                         sp->parent_pte = NULL;
882                                 }
883                         }
884                         return;
885                 }
886         BUG();
887 }
888
889
890 static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
891                             mmu_parent_walk_fn fn)
892 {
893         struct kvm_pte_chain *pte_chain;
894         struct hlist_node *node;
895         struct kvm_mmu_page *parent_sp;
896         int i;
897
898         if (!sp->multimapped && sp->parent_pte) {
899                 parent_sp = page_header(__pa(sp->parent_pte));
900                 fn(vcpu, parent_sp);
901                 mmu_parent_walk(vcpu, parent_sp, fn);
902                 return;
903         }
904         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
905                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
906                         if (!pte_chain->parent_ptes[i])
907                                 break;
908                         parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
909                         fn(vcpu, parent_sp);
910                         mmu_parent_walk(vcpu, parent_sp, fn);
911                 }
912 }
913
914 static void kvm_mmu_update_unsync_bitmap(u64 *spte)
915 {
916         unsigned int index;
917         struct kvm_mmu_page *sp = page_header(__pa(spte));
918
919         index = spte - sp->spt;
920         if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
921                 sp->unsync_children++;
922         WARN_ON(!sp->unsync_children);
923 }
924
925 static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
926 {
927         struct kvm_pte_chain *pte_chain;
928         struct hlist_node *node;
929         int i;
930
931         if (!sp->parent_pte)
932                 return;
933
934         if (!sp->multimapped) {
935                 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
936                 return;
937         }
938
939         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
940                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
941                         if (!pte_chain->parent_ptes[i])
942                                 break;
943                         kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
944                 }
945 }
946
947 static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
948 {
949         kvm_mmu_update_parents_unsync(sp);
950         return 1;
951 }
952
953 static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
954                                         struct kvm_mmu_page *sp)
955 {
956         mmu_parent_walk(vcpu, sp, unsync_walk_fn);
957         kvm_mmu_update_parents_unsync(sp);
958 }
959
960 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
961                                     struct kvm_mmu_page *sp)
962 {
963         int i;
964
965         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
966                 sp->spt[i] = shadow_trap_nonpresent_pte;
967 }
968
969 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
970                                struct kvm_mmu_page *sp)
971 {
972         return 1;
973 }
974
975 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
976 {
977 }
978
979 #define KVM_PAGE_ARRAY_NR 16
980
981 struct kvm_mmu_pages {
982         struct mmu_page_and_offset {
983                 struct kvm_mmu_page *sp;
984                 unsigned int idx;
985         } page[KVM_PAGE_ARRAY_NR];
986         unsigned int nr;
987 };
988
989 #define for_each_unsync_children(bitmap, idx)           \
990         for (idx = find_first_bit(bitmap, 512);         \
991              idx < 512;                                 \
992              idx = find_next_bit(bitmap, 512, idx+1))
993
994 int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
995                    int idx)
996 {
997         int i;
998
999         if (sp->unsync)
1000                 for (i=0; i < pvec->nr; i++)
1001                         if (pvec->page[i].sp == sp)
1002                                 return 0;
1003
1004         pvec->page[pvec->nr].sp = sp;
1005         pvec->page[pvec->nr].idx = idx;
1006         pvec->nr++;
1007         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1008 }
1009
1010 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1011                            struct kvm_mmu_pages *pvec)
1012 {
1013         int i, ret, nr_unsync_leaf = 0;
1014
1015         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1016                 u64 ent = sp->spt[i];
1017
1018                 if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
1019                         struct kvm_mmu_page *child;
1020                         child = page_header(ent & PT64_BASE_ADDR_MASK);
1021
1022                         if (child->unsync_children) {
1023                                 if (mmu_pages_add(pvec, child, i))
1024                                         return -ENOSPC;
1025
1026                                 ret = __mmu_unsync_walk(child, pvec);
1027                                 if (!ret)
1028                                         __clear_bit(i, sp->unsync_child_bitmap);
1029                                 else if (ret > 0)
1030                                         nr_unsync_leaf += ret;
1031                                 else
1032                                         return ret;
1033                         }
1034
1035                         if (child->unsync) {
1036                                 nr_unsync_leaf++;
1037                                 if (mmu_pages_add(pvec, child, i))
1038                                         return -ENOSPC;
1039                         }
1040                 }
1041         }
1042
1043         if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
1044                 sp->unsync_children = 0;
1045
1046         return nr_unsync_leaf;
1047 }
1048
1049 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1050                            struct kvm_mmu_pages *pvec)
1051 {
1052         if (!sp->unsync_children)
1053                 return 0;
1054
1055         mmu_pages_add(pvec, sp, 0);
1056         return __mmu_unsync_walk(sp, pvec);
1057 }
1058
1059 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1060 {
1061         unsigned index;
1062         struct hlist_head *bucket;
1063         struct kvm_mmu_page *sp;
1064         struct hlist_node *node;
1065
1066         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1067         index = kvm_page_table_hashfn(gfn);
1068         bucket = &kvm->arch.mmu_page_hash[index];
1069         hlist_for_each_entry(sp, node, bucket, hash_link)
1070                 if (sp->gfn == gfn && !sp->role.metaphysical
1071                     && !sp->role.invalid) {
1072                         pgprintk("%s: found role %x\n",
1073                                  __func__, sp->role.word);
1074                         return sp;
1075                 }
1076         return NULL;
1077 }
1078
1079 static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
1080 {
1081         list_del(&sp->oos_link);
1082         --kvm->stat.mmu_unsync_global;
1083 }
1084
1085 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1086 {
1087         WARN_ON(!sp->unsync);
1088         sp->unsync = 0;
1089         if (sp->global)
1090                 kvm_unlink_unsync_global(kvm, sp);
1091         --kvm->stat.mmu_unsync;
1092 }
1093
1094 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1095
1096 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1097 {
1098         if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1099                 kvm_mmu_zap_page(vcpu->kvm, sp);
1100                 return 1;
1101         }
1102
1103         if (rmap_write_protect(vcpu->kvm, sp->gfn))
1104                 kvm_flush_remote_tlbs(vcpu->kvm);
1105         kvm_unlink_unsync_page(vcpu->kvm, sp);
1106         if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1107                 kvm_mmu_zap_page(vcpu->kvm, sp);
1108                 return 1;
1109         }
1110
1111         kvm_mmu_flush_tlb(vcpu);
1112         return 0;
1113 }
1114
1115 struct mmu_page_path {
1116         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1117         unsigned int idx[PT64_ROOT_LEVEL-1];
1118 };
1119
1120 #define for_each_sp(pvec, sp, parents, i)                       \
1121                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1122                         sp = pvec.page[i].sp;                   \
1123                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1124                         i = mmu_pages_next(&pvec, &parents, i))
1125
1126 int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents,
1127                    int i)
1128 {
1129         int n;
1130
1131         for (n = i+1; n < pvec->nr; n++) {
1132                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1133
1134                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1135                         parents->idx[0] = pvec->page[n].idx;
1136                         return n;
1137                 }
1138
1139                 parents->parent[sp->role.level-2] = sp;
1140                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1141         }
1142
1143         return n;
1144 }
1145
1146 void mmu_pages_clear_parents(struct mmu_page_path *parents)
1147 {
1148         struct kvm_mmu_page *sp;
1149         unsigned int level = 0;
1150
1151         do {
1152                 unsigned int idx = parents->idx[level];
1153
1154                 sp = parents->parent[level];
1155                 if (!sp)
1156                         return;
1157
1158                 --sp->unsync_children;
1159                 WARN_ON((int)sp->unsync_children < 0);
1160                 __clear_bit(idx, sp->unsync_child_bitmap);
1161                 level++;
1162         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1163 }
1164
1165 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1166                                struct mmu_page_path *parents,
1167                                struct kvm_mmu_pages *pvec)
1168 {
1169         parents->parent[parent->role.level-1] = NULL;
1170         pvec->nr = 0;
1171 }
1172
1173 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1174                               struct kvm_mmu_page *parent)
1175 {
1176         int i;
1177         struct kvm_mmu_page *sp;
1178         struct mmu_page_path parents;
1179         struct kvm_mmu_pages pages;
1180
1181         kvm_mmu_pages_init(parent, &parents, &pages);
1182         while (mmu_unsync_walk(parent, &pages)) {
1183                 int protected = 0;
1184
1185                 for_each_sp(pages, sp, parents, i)
1186                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1187
1188                 if (protected)
1189                         kvm_flush_remote_tlbs(vcpu->kvm);
1190
1191                 for_each_sp(pages, sp, parents, i) {
1192                         kvm_sync_page(vcpu, sp);
1193                         mmu_pages_clear_parents(&parents);
1194                 }
1195                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1196                 kvm_mmu_pages_init(parent, &parents, &pages);
1197         }
1198 }
1199
1200 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1201                                              gfn_t gfn,
1202                                              gva_t gaddr,
1203                                              unsigned level,
1204                                              int metaphysical,
1205                                              unsigned access,
1206                                              u64 *parent_pte)
1207 {
1208         union kvm_mmu_page_role role;
1209         unsigned index;
1210         unsigned quadrant;
1211         struct hlist_head *bucket;
1212         struct kvm_mmu_page *sp;
1213         struct hlist_node *node, *tmp;
1214
1215         role = vcpu->arch.mmu.base_role;
1216         role.level = level;
1217         role.metaphysical = metaphysical;
1218         role.access = access;
1219         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1220                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1221                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1222                 role.quadrant = quadrant;
1223         }
1224         pgprintk("%s: looking gfn %lx role %x\n", __func__,
1225                  gfn, role.word);
1226         index = kvm_page_table_hashfn(gfn);
1227         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1228         hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1229                 if (sp->gfn == gfn) {
1230                         if (sp->unsync)
1231                                 if (kvm_sync_page(vcpu, sp))
1232                                         continue;
1233
1234                         if (sp->role.word != role.word)
1235                                 continue;
1236
1237                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1238                         if (sp->unsync_children) {
1239                                 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1240                                 kvm_mmu_mark_parents_unsync(vcpu, sp);
1241                         }
1242                         pgprintk("%s: found\n", __func__);
1243                         return sp;
1244                 }
1245         ++vcpu->kvm->stat.mmu_cache_miss;
1246         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1247         if (!sp)
1248                 return sp;
1249         pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1250         sp->gfn = gfn;
1251         sp->role = role;
1252         sp->global = role.cr4_pge;
1253         hlist_add_head(&sp->hash_link, bucket);
1254         if (!metaphysical) {
1255                 if (rmap_write_protect(vcpu->kvm, gfn))
1256                         kvm_flush_remote_tlbs(vcpu->kvm);
1257                 account_shadowed(vcpu->kvm, gfn);
1258         }
1259         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1260                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1261         else
1262                 nonpaging_prefetch_page(vcpu, sp);
1263         return sp;
1264 }
1265
1266 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1267                              struct kvm_vcpu *vcpu, u64 addr)
1268 {
1269         iterator->addr = addr;
1270         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1271         iterator->level = vcpu->arch.mmu.shadow_root_level;
1272         if (iterator->level == PT32E_ROOT_LEVEL) {
1273                 iterator->shadow_addr
1274                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1275                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1276                 --iterator->level;
1277                 if (!iterator->shadow_addr)
1278                         iterator->level = 0;
1279         }
1280 }
1281
1282 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1283 {
1284         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1285                 return false;
1286         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1287         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1288         return true;
1289 }
1290
1291 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1292 {
1293         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1294         --iterator->level;
1295 }
1296
1297 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1298                                          struct kvm_mmu_page *sp)
1299 {
1300         unsigned i;
1301         u64 *pt;
1302         u64 ent;
1303
1304         pt = sp->spt;
1305
1306         if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1307                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1308                         if (is_shadow_present_pte(pt[i]))
1309                                 rmap_remove(kvm, &pt[i]);
1310                         pt[i] = shadow_trap_nonpresent_pte;
1311                 }
1312                 return;
1313         }
1314
1315         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1316                 ent = pt[i];
1317
1318                 if (is_shadow_present_pte(ent)) {
1319                         if (!is_large_pte(ent)) {
1320                                 ent &= PT64_BASE_ADDR_MASK;
1321                                 mmu_page_remove_parent_pte(page_header(ent),
1322                                                            &pt[i]);
1323                         } else {
1324                                 --kvm->stat.lpages;
1325                                 rmap_remove(kvm, &pt[i]);
1326                         }
1327                 }
1328                 pt[i] = shadow_trap_nonpresent_pte;
1329         }
1330 }
1331
1332 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1333 {
1334         mmu_page_remove_parent_pte(sp, parent_pte);
1335 }
1336
1337 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1338 {
1339         int i;
1340
1341         for (i = 0; i < KVM_MAX_VCPUS; ++i)
1342                 if (kvm->vcpus[i])
1343                         kvm->vcpus[i]->arch.last_pte_updated = NULL;
1344 }
1345
1346 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1347 {
1348         u64 *parent_pte;
1349
1350         while (sp->multimapped || sp->parent_pte) {
1351                 if (!sp->multimapped)
1352                         parent_pte = sp->parent_pte;
1353                 else {
1354                         struct kvm_pte_chain *chain;
1355
1356                         chain = container_of(sp->parent_ptes.first,
1357                                              struct kvm_pte_chain, link);
1358                         parent_pte = chain->parent_ptes[0];
1359                 }
1360                 BUG_ON(!parent_pte);
1361                 kvm_mmu_put_page(sp, parent_pte);
1362                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
1363         }
1364 }
1365
1366 static int mmu_zap_unsync_children(struct kvm *kvm,
1367                                    struct kvm_mmu_page *parent)
1368 {
1369         int i, zapped = 0;
1370         struct mmu_page_path parents;
1371         struct kvm_mmu_pages pages;
1372
1373         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1374                 return 0;
1375
1376         kvm_mmu_pages_init(parent, &parents, &pages);
1377         while (mmu_unsync_walk(parent, &pages)) {
1378                 struct kvm_mmu_page *sp;
1379
1380                 for_each_sp(pages, sp, parents, i) {
1381                         kvm_mmu_zap_page(kvm, sp);
1382                         mmu_pages_clear_parents(&parents);
1383                 }
1384                 zapped += pages.nr;
1385                 kvm_mmu_pages_init(parent, &parents, &pages);
1386         }
1387
1388         return zapped;
1389 }
1390
1391 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1392 {
1393         int ret;
1394         ++kvm->stat.mmu_shadow_zapped;
1395         ret = mmu_zap_unsync_children(kvm, sp);
1396         kvm_mmu_page_unlink_children(kvm, sp);
1397         kvm_mmu_unlink_parents(kvm, sp);
1398         kvm_flush_remote_tlbs(kvm);
1399         if (!sp->role.invalid && !sp->role.metaphysical)
1400                 unaccount_shadowed(kvm, sp->gfn);
1401         if (sp->unsync)
1402                 kvm_unlink_unsync_page(kvm, sp);
1403         if (!sp->root_count) {
1404                 hlist_del(&sp->hash_link);
1405                 kvm_mmu_free_page(kvm, sp);
1406         } else {
1407                 sp->role.invalid = 1;
1408                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1409                 kvm_reload_remote_mmus(kvm);
1410         }
1411         kvm_mmu_reset_last_pte_updated(kvm);
1412         return ret;
1413 }
1414
1415 /*
1416  * Changing the number of mmu pages allocated to the vm
1417  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1418  */
1419 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1420 {
1421         /*
1422          * If we set the number of mmu pages to be smaller be than the
1423          * number of actived pages , we must to free some mmu pages before we
1424          * change the value
1425          */
1426
1427         if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
1428             kvm_nr_mmu_pages) {
1429                 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1430                                        - kvm->arch.n_free_mmu_pages;
1431
1432                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1433                         struct kvm_mmu_page *page;
1434
1435                         page = container_of(kvm->arch.active_mmu_pages.prev,
1436                                             struct kvm_mmu_page, link);
1437                         kvm_mmu_zap_page(kvm, page);
1438                         n_used_mmu_pages--;
1439                 }
1440                 kvm->arch.n_free_mmu_pages = 0;
1441         }
1442         else
1443                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1444                                          - kvm->arch.n_alloc_mmu_pages;
1445
1446         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1447 }
1448
1449 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1450 {
1451         unsigned index;
1452         struct hlist_head *bucket;
1453         struct kvm_mmu_page *sp;
1454         struct hlist_node *node, *n;
1455         int r;
1456
1457         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1458         r = 0;
1459         index = kvm_page_table_hashfn(gfn);
1460         bucket = &kvm->arch.mmu_page_hash[index];
1461         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1462                 if (sp->gfn == gfn && !sp->role.metaphysical) {
1463                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1464                                  sp->role.word);
1465                         r = 1;
1466                         if (kvm_mmu_zap_page(kvm, sp))
1467                                 n = bucket->first;
1468                 }
1469         return r;
1470 }
1471
1472 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1473 {
1474         unsigned index;
1475         struct hlist_head *bucket;
1476         struct kvm_mmu_page *sp;
1477         struct hlist_node *node, *nn;
1478
1479         index = kvm_page_table_hashfn(gfn);
1480         bucket = &kvm->arch.mmu_page_hash[index];
1481         hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1482                 if (sp->gfn == gfn && !sp->role.metaphysical
1483                     && !sp->role.invalid) {
1484                         pgprintk("%s: zap %lx %x\n",
1485                                  __func__, gfn, sp->role.word);
1486                         kvm_mmu_zap_page(kvm, sp);
1487                 }
1488         }
1489 }
1490
1491 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1492 {
1493         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1494         struct kvm_mmu_page *sp = page_header(__pa(pte));
1495
1496         __set_bit(slot, sp->slot_bitmap);
1497 }
1498
1499 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1500 {
1501         int i;
1502         u64 *pt = sp->spt;
1503
1504         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1505                 return;
1506
1507         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1508                 if (pt[i] == shadow_notrap_nonpresent_pte)
1509                         set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte);
1510         }
1511 }
1512
1513 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1514 {
1515         struct page *page;
1516
1517         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1518
1519         if (gpa == UNMAPPED_GVA)
1520                 return NULL;
1521
1522         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1523
1524         return page;
1525 }
1526
1527 /*
1528  * The function is based on mtrr_type_lookup() in
1529  * arch/x86/kernel/cpu/mtrr/generic.c
1530  */
1531 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1532                          u64 start, u64 end)
1533 {
1534         int i;
1535         u64 base, mask;
1536         u8 prev_match, curr_match;
1537         int num_var_ranges = KVM_NR_VAR_MTRR;
1538
1539         if (!mtrr_state->enabled)
1540                 return 0xFF;
1541
1542         /* Make end inclusive end, instead of exclusive */
1543         end--;
1544
1545         /* Look in fixed ranges. Just return the type as per start */
1546         if (mtrr_state->have_fixed && (start < 0x100000)) {
1547                 int idx;
1548
1549                 if (start < 0x80000) {
1550                         idx = 0;
1551                         idx += (start >> 16);
1552                         return mtrr_state->fixed_ranges[idx];
1553                 } else if (start < 0xC0000) {
1554                         idx = 1 * 8;
1555                         idx += ((start - 0x80000) >> 14);
1556                         return mtrr_state->fixed_ranges[idx];
1557                 } else if (start < 0x1000000) {
1558                         idx = 3 * 8;
1559                         idx += ((start - 0xC0000) >> 12);
1560                         return mtrr_state->fixed_ranges[idx];
1561                 }
1562         }
1563
1564         /*
1565          * Look in variable ranges
1566          * Look of multiple ranges matching this address and pick type
1567          * as per MTRR precedence
1568          */
1569         if (!(mtrr_state->enabled & 2))
1570                 return mtrr_state->def_type;
1571
1572         prev_match = 0xFF;
1573         for (i = 0; i < num_var_ranges; ++i) {
1574                 unsigned short start_state, end_state;
1575
1576                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1577                         continue;
1578
1579                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1580                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1581                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1582                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1583
1584                 start_state = ((start & mask) == (base & mask));
1585                 end_state = ((end & mask) == (base & mask));
1586                 if (start_state != end_state)
1587                         return 0xFE;
1588
1589                 if ((start & mask) != (base & mask))
1590                         continue;
1591
1592                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1593                 if (prev_match == 0xFF) {
1594                         prev_match = curr_match;
1595                         continue;
1596                 }
1597
1598                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1599                     curr_match == MTRR_TYPE_UNCACHABLE)
1600                         return MTRR_TYPE_UNCACHABLE;
1601
1602                 if ((prev_match == MTRR_TYPE_WRBACK &&
1603                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1604                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1605                      curr_match == MTRR_TYPE_WRBACK)) {
1606                         prev_match = MTRR_TYPE_WRTHROUGH;
1607                         curr_match = MTRR_TYPE_WRTHROUGH;
1608                 }
1609
1610                 if (prev_match != curr_match)
1611                         return MTRR_TYPE_UNCACHABLE;
1612         }
1613
1614         if (prev_match != 0xFF)
1615                 return prev_match;
1616
1617         return mtrr_state->def_type;
1618 }
1619
1620 static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1621 {
1622         u8 mtrr;
1623
1624         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1625                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1626         if (mtrr == 0xfe || mtrr == 0xff)
1627                 mtrr = MTRR_TYPE_WRBACK;
1628         return mtrr;
1629 }
1630
1631 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1632 {
1633         unsigned index;
1634         struct hlist_head *bucket;
1635         struct kvm_mmu_page *s;
1636         struct hlist_node *node, *n;
1637
1638         index = kvm_page_table_hashfn(sp->gfn);
1639         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1640         /* don't unsync if pagetable is shadowed with multiple roles */
1641         hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1642                 if (s->gfn != sp->gfn || s->role.metaphysical)
1643                         continue;
1644                 if (s->role.word != sp->role.word)
1645                         return 1;
1646         }
1647         ++vcpu->kvm->stat.mmu_unsync;
1648         sp->unsync = 1;
1649
1650         if (sp->global) {
1651                 list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
1652                 ++vcpu->kvm->stat.mmu_unsync_global;
1653         } else
1654                 kvm_mmu_mark_parents_unsync(vcpu, sp);
1655
1656         mmu_convert_notrap(sp);
1657         return 0;
1658 }
1659
1660 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1661                                   bool can_unsync)
1662 {
1663         struct kvm_mmu_page *shadow;
1664
1665         shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1666         if (shadow) {
1667                 if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1668                         return 1;
1669                 if (shadow->unsync)
1670                         return 0;
1671                 if (can_unsync && oos_shadow)
1672                         return kvm_unsync_page(vcpu, shadow);
1673                 return 1;
1674         }
1675         return 0;
1676 }
1677
1678 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1679                     unsigned pte_access, int user_fault,
1680                     int write_fault, int dirty, int largepage,
1681                     int global, gfn_t gfn, pfn_t pfn, bool speculative,
1682                     bool can_unsync)
1683 {
1684         u64 spte;
1685         int ret = 0;
1686         u64 mt_mask = shadow_mt_mask;
1687         struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
1688
1689         if (!global && sp->global) {
1690                 sp->global = 0;
1691                 if (sp->unsync) {
1692                         kvm_unlink_unsync_global(vcpu->kvm, sp);
1693                         kvm_mmu_mark_parents_unsync(vcpu, sp);
1694                 }
1695         }
1696
1697         /*
1698          * We don't set the accessed bit, since we sometimes want to see
1699          * whether the guest actually used the pte (in order to detect
1700          * demand paging).
1701          */
1702         spte = shadow_base_present_pte | shadow_dirty_mask;
1703         if (!speculative)
1704                 spte |= shadow_accessed_mask;
1705         if (!dirty)
1706                 pte_access &= ~ACC_WRITE_MASK;
1707         if (pte_access & ACC_EXEC_MASK)
1708                 spte |= shadow_x_mask;
1709         else
1710                 spte |= shadow_nx_mask;
1711         if (pte_access & ACC_USER_MASK)
1712                 spte |= shadow_user_mask;
1713         if (largepage)
1714                 spte |= PT_PAGE_SIZE_MASK;
1715         if (mt_mask) {
1716                 if (!kvm_is_mmio_pfn(pfn)) {
1717                         mt_mask = get_memory_type(vcpu, gfn) <<
1718                                 kvm_x86_ops->get_mt_mask_shift();
1719                         mt_mask |= VMX_EPT_IGMT_BIT;
1720                 } else
1721                         mt_mask = MTRR_TYPE_UNCACHABLE <<
1722                                 kvm_x86_ops->get_mt_mask_shift();
1723                 spte |= mt_mask;
1724         }
1725
1726         spte |= (u64)pfn << PAGE_SHIFT;
1727
1728         if ((pte_access & ACC_WRITE_MASK)
1729             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1730
1731                 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1732                         ret = 1;
1733                         spte = shadow_trap_nonpresent_pte;
1734                         goto set_pte;
1735                 }
1736
1737                 spte |= PT_WRITABLE_MASK;
1738
1739                 /*
1740                  * Optimization: for pte sync, if spte was writable the hash
1741                  * lookup is unnecessary (and expensive). Write protection
1742                  * is responsibility of mmu_get_page / kvm_sync_page.
1743                  * Same reasoning can be applied to dirty page accounting.
1744                  */
1745                 if (!can_unsync && is_writeble_pte(*shadow_pte))
1746                         goto set_pte;
1747
1748                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1749                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1750                                  __func__, gfn);
1751                         ret = 1;
1752                         pte_access &= ~ACC_WRITE_MASK;
1753                         if (is_writeble_pte(spte))
1754                                 spte &= ~PT_WRITABLE_MASK;
1755                 }
1756         }
1757
1758         if (pte_access & ACC_WRITE_MASK)
1759                 mark_page_dirty(vcpu->kvm, gfn);
1760
1761 set_pte:
1762         set_shadow_pte(shadow_pte, spte);
1763         return ret;
1764 }
1765
1766 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1767                          unsigned pt_access, unsigned pte_access,
1768                          int user_fault, int write_fault, int dirty,
1769                          int *ptwrite, int largepage, int global,
1770                          gfn_t gfn, pfn_t pfn, bool speculative)
1771 {
1772         int was_rmapped = 0;
1773         int was_writeble = is_writeble_pte(*shadow_pte);
1774
1775         pgprintk("%s: spte %llx access %x write_fault %d"
1776                  " user_fault %d gfn %lx\n",
1777                  __func__, *shadow_pte, pt_access,
1778                  write_fault, user_fault, gfn);
1779
1780         if (is_rmap_pte(*shadow_pte)) {
1781                 /*
1782                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1783                  * the parent of the now unreachable PTE.
1784                  */
1785                 if (largepage && !is_large_pte(*shadow_pte)) {
1786                         struct kvm_mmu_page *child;
1787                         u64 pte = *shadow_pte;
1788
1789                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1790                         mmu_page_remove_parent_pte(child, shadow_pte);
1791                 } else if (pfn != spte_to_pfn(*shadow_pte)) {
1792                         pgprintk("hfn old %lx new %lx\n",
1793                                  spte_to_pfn(*shadow_pte), pfn);
1794                         rmap_remove(vcpu->kvm, shadow_pte);
1795                 } else {
1796                         if (largepage)
1797                                 was_rmapped = is_large_pte(*shadow_pte);
1798                         else
1799                                 was_rmapped = 1;
1800                 }
1801         }
1802         if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1803                       dirty, largepage, global, gfn, pfn, speculative, true)) {
1804                 if (write_fault)
1805                         *ptwrite = 1;
1806                 kvm_x86_ops->tlb_flush(vcpu);
1807         }
1808
1809         pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte);
1810         pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1811                  is_large_pte(*shadow_pte)? "2MB" : "4kB",
1812                  is_present_pte(*shadow_pte)?"RW":"R", gfn,
1813                  *shadow_pte, shadow_pte);
1814         if (!was_rmapped && is_large_pte(*shadow_pte))
1815                 ++vcpu->kvm->stat.lpages;
1816
1817         page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1818         if (!was_rmapped) {
1819                 rmap_add(vcpu, shadow_pte, gfn, largepage);
1820                 if (!is_rmap_pte(*shadow_pte))
1821                         kvm_release_pfn_clean(pfn);
1822         } else {
1823                 if (was_writeble)
1824                         kvm_release_pfn_dirty(pfn);
1825                 else
1826                         kvm_release_pfn_clean(pfn);
1827         }
1828         if (speculative) {
1829                 vcpu->arch.last_pte_updated = shadow_pte;
1830                 vcpu->arch.last_pte_gfn = gfn;
1831         }
1832 }
1833
1834 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1835 {
1836 }
1837
1838 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1839                         int largepage, gfn_t gfn, pfn_t pfn)
1840 {
1841         struct kvm_shadow_walk_iterator iterator;
1842         struct kvm_mmu_page *sp;
1843         int pt_write = 0;
1844         gfn_t pseudo_gfn;
1845
1846         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1847                 if (iterator.level == PT_PAGE_TABLE_LEVEL
1848                     || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1849                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1850                                      0, write, 1, &pt_write,
1851                                      largepage, 0, gfn, pfn, false);
1852                         ++vcpu->stat.pf_fixed;
1853                         break;
1854                 }
1855
1856                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1857                         pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1858                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1859                                               iterator.level - 1,
1860                                               1, ACC_ALL, iterator.sptep);
1861                         if (!sp) {
1862                                 pgprintk("nonpaging_map: ENOMEM\n");
1863                                 kvm_release_pfn_clean(pfn);
1864                                 return -ENOMEM;
1865                         }
1866
1867                         set_shadow_pte(iterator.sptep,
1868                                        __pa(sp->spt)
1869                                        | PT_PRESENT_MASK | PT_WRITABLE_MASK
1870                                        | shadow_user_mask | shadow_x_mask);
1871                 }
1872         }
1873         return pt_write;
1874 }
1875
1876 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1877 {
1878         int r;
1879         int largepage = 0;
1880         pfn_t pfn;
1881         unsigned long mmu_seq;
1882
1883         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1884                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1885                 largepage = 1;
1886         }
1887
1888         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1889         smp_rmb();
1890         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1891
1892         /* mmio */
1893         if (is_error_pfn(pfn)) {
1894                 kvm_release_pfn_clean(pfn);
1895                 return 1;
1896         }
1897
1898         spin_lock(&vcpu->kvm->mmu_lock);
1899         if (mmu_notifier_retry(vcpu, mmu_seq))
1900                 goto out_unlock;
1901         kvm_mmu_free_some_pages(vcpu);
1902         r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
1903         spin_unlock(&vcpu->kvm->mmu_lock);
1904
1905
1906         return r;
1907
1908 out_unlock:
1909         spin_unlock(&vcpu->kvm->mmu_lock);
1910         kvm_release_pfn_clean(pfn);
1911         return 0;
1912 }
1913
1914
1915 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1916 {
1917         int i;
1918         struct kvm_mmu_page *sp;
1919
1920         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1921                 return;
1922         spin_lock(&vcpu->kvm->mmu_lock);
1923         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1924                 hpa_t root = vcpu->arch.mmu.root_hpa;
1925
1926                 sp = page_header(root);
1927                 --sp->root_count;
1928                 if (!sp->root_count && sp->role.invalid)
1929                         kvm_mmu_zap_page(vcpu->kvm, sp);
1930                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1931                 spin_unlock(&vcpu->kvm->mmu_lock);
1932                 return;
1933         }
1934         for (i = 0; i < 4; ++i) {
1935                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1936
1937                 if (root) {
1938                         root &= PT64_BASE_ADDR_MASK;
1939                         sp = page_header(root);
1940                         --sp->root_count;
1941                         if (!sp->root_count && sp->role.invalid)
1942                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1943                 }
1944                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1945         }
1946         spin_unlock(&vcpu->kvm->mmu_lock);
1947         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1948 }
1949
1950 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1951 {
1952         int i;
1953         gfn_t root_gfn;
1954         struct kvm_mmu_page *sp;
1955         int metaphysical = 0;
1956
1957         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1958
1959         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1960                 hpa_t root = vcpu->arch.mmu.root_hpa;
1961
1962                 ASSERT(!VALID_PAGE(root));
1963                 if (tdp_enabled)
1964                         metaphysical = 1;
1965                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1966                                       PT64_ROOT_LEVEL, metaphysical,
1967                                       ACC_ALL, NULL);
1968                 root = __pa(sp->spt);
1969                 ++sp->root_count;
1970                 vcpu->arch.mmu.root_hpa = root;
1971                 return;
1972         }
1973         metaphysical = !is_paging(vcpu);
1974         if (tdp_enabled)
1975                 metaphysical = 1;
1976         for (i = 0; i < 4; ++i) {
1977                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1978
1979                 ASSERT(!VALID_PAGE(root));
1980                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1981                         if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1982                                 vcpu->arch.mmu.pae_root[i] = 0;
1983                                 continue;
1984                         }
1985                         root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1986                 } else if (vcpu->arch.mmu.root_level == 0)
1987                         root_gfn = 0;
1988                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1989                                       PT32_ROOT_LEVEL, metaphysical,
1990                                       ACC_ALL, NULL);
1991                 root = __pa(sp->spt);
1992                 ++sp->root_count;
1993                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1994         }
1995         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1996 }
1997
1998 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
1999 {
2000         int i;
2001         struct kvm_mmu_page *sp;
2002
2003         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2004                 return;
2005         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2006                 hpa_t root = vcpu->arch.mmu.root_hpa;
2007                 sp = page_header(root);
2008                 mmu_sync_children(vcpu, sp);
2009                 return;
2010         }
2011         for (i = 0; i < 4; ++i) {
2012                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2013
2014                 if (root) {
2015                         root &= PT64_BASE_ADDR_MASK;
2016                         sp = page_header(root);
2017                         mmu_sync_children(vcpu, sp);
2018                 }
2019         }
2020 }
2021
2022 static void mmu_sync_global(struct kvm_vcpu *vcpu)
2023 {
2024         struct kvm *kvm = vcpu->kvm;
2025         struct kvm_mmu_page *sp, *n;
2026
2027         list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
2028                 kvm_sync_page(vcpu, sp);
2029 }
2030
2031 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2032 {
2033         spin_lock(&vcpu->kvm->mmu_lock);
2034         mmu_sync_roots(vcpu);
2035         spin_unlock(&vcpu->kvm->mmu_lock);
2036 }
2037
2038 void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
2039 {
2040         spin_lock(&vcpu->kvm->mmu_lock);
2041         mmu_sync_global(vcpu);
2042         spin_unlock(&vcpu->kvm->mmu_lock);
2043 }
2044
2045 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2046 {
2047         return vaddr;
2048 }
2049
2050 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2051                                 u32 error_code)
2052 {
2053         gfn_t gfn;
2054         int r;
2055
2056         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2057         r = mmu_topup_memory_caches(vcpu);
2058         if (r)
2059                 return r;
2060
2061         ASSERT(vcpu);
2062         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2063
2064         gfn = gva >> PAGE_SHIFT;
2065
2066         return nonpaging_map(vcpu, gva & PAGE_MASK,
2067                              error_code & PFERR_WRITE_MASK, gfn);
2068 }
2069
2070 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2071                                 u32 error_code)
2072 {
2073         pfn_t pfn;
2074         int r;
2075         int largepage = 0;
2076         gfn_t gfn = gpa >> PAGE_SHIFT;
2077         unsigned long mmu_seq;
2078
2079         ASSERT(vcpu);
2080         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2081
2082         r = mmu_topup_memory_caches(vcpu);
2083         if (r)
2084                 return r;
2085
2086         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
2087                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2088                 largepage = 1;
2089         }
2090         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2091         smp_rmb();
2092         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2093         if (is_error_pfn(pfn)) {
2094                 kvm_release_pfn_clean(pfn);
2095                 return 1;
2096         }
2097         spin_lock(&vcpu->kvm->mmu_lock);
2098         if (mmu_notifier_retry(vcpu, mmu_seq))
2099                 goto out_unlock;
2100         kvm_mmu_free_some_pages(vcpu);
2101         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2102                          largepage, gfn, pfn);
2103         spin_unlock(&vcpu->kvm->mmu_lock);
2104
2105         return r;
2106
2107 out_unlock:
2108         spin_unlock(&vcpu->kvm->mmu_lock);
2109         kvm_release_pfn_clean(pfn);
2110         return 0;
2111 }
2112
2113 static void nonpaging_free(struct kvm_vcpu *vcpu)
2114 {
2115         mmu_free_roots(vcpu);
2116 }
2117
2118 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2119 {
2120         struct kvm_mmu *context = &vcpu->arch.mmu;
2121
2122         context->new_cr3 = nonpaging_new_cr3;
2123         context->page_fault = nonpaging_page_fault;
2124         context->gva_to_gpa = nonpaging_gva_to_gpa;
2125         context->free = nonpaging_free;
2126         context->prefetch_page = nonpaging_prefetch_page;
2127         context->sync_page = nonpaging_sync_page;
2128         context->invlpg = nonpaging_invlpg;
2129         context->root_level = 0;
2130         context->shadow_root_level = PT32E_ROOT_LEVEL;
2131         context->root_hpa = INVALID_PAGE;
2132         return 0;
2133 }
2134
2135 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2136 {
2137         ++vcpu->stat.tlb_flush;
2138         kvm_x86_ops->tlb_flush(vcpu);
2139 }
2140
2141 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2142 {
2143         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2144         mmu_free_roots(vcpu);
2145 }
2146
2147 static void inject_page_fault(struct kvm_vcpu *vcpu,
2148                               u64 addr,
2149                               u32 err_code)
2150 {
2151         kvm_inject_page_fault(vcpu, addr, err_code);
2152 }
2153
2154 static void paging_free(struct kvm_vcpu *vcpu)
2155 {
2156         nonpaging_free(vcpu);
2157 }
2158
2159 #define PTTYPE 64
2160 #include "paging_tmpl.h"
2161 #undef PTTYPE
2162
2163 #define PTTYPE 32
2164 #include "paging_tmpl.h"
2165 #undef PTTYPE
2166
2167 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2168 {
2169         struct kvm_mmu *context = &vcpu->arch.mmu;
2170
2171         ASSERT(is_pae(vcpu));
2172         context->new_cr3 = paging_new_cr3;
2173         context->page_fault = paging64_page_fault;
2174         context->gva_to_gpa = paging64_gva_to_gpa;
2175         context->prefetch_page = paging64_prefetch_page;
2176         context->sync_page = paging64_sync_page;
2177         context->invlpg = paging64_invlpg;
2178         context->free = paging_free;
2179         context->root_level = level;
2180         context->shadow_root_level = level;
2181         context->root_hpa = INVALID_PAGE;
2182         return 0;
2183 }
2184
2185 static int paging64_init_context(struct kvm_vcpu *vcpu)
2186 {
2187         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2188 }
2189
2190 static int paging32_init_context(struct kvm_vcpu *vcpu)
2191 {
2192         struct kvm_mmu *context = &vcpu->arch.mmu;
2193
2194         context->new_cr3 = paging_new_cr3;
2195         context->page_fault = paging32_page_fault;
2196         context->gva_to_gpa = paging32_gva_to_gpa;
2197         context->free = paging_free;
2198         context->prefetch_page = paging32_prefetch_page;
2199         context->sync_page = paging32_sync_page;
2200         context->invlpg = paging32_invlpg;
2201         context->root_level = PT32_ROOT_LEVEL;
2202         context->shadow_root_level = PT32E_ROOT_LEVEL;
2203         context->root_hpa = INVALID_PAGE;
2204         return 0;
2205 }
2206
2207 static int paging32E_init_context(struct kvm_vcpu *vcpu)
2208 {
2209         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2210 }
2211
2212 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2213 {
2214         struct kvm_mmu *context = &vcpu->arch.mmu;
2215
2216         context->new_cr3 = nonpaging_new_cr3;
2217         context->page_fault = tdp_page_fault;
2218         context->free = nonpaging_free;
2219         context->prefetch_page = nonpaging_prefetch_page;
2220         context->sync_page = nonpaging_sync_page;
2221         context->invlpg = nonpaging_invlpg;
2222         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2223         context->root_hpa = INVALID_PAGE;
2224
2225         if (!is_paging(vcpu)) {
2226                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2227                 context->root_level = 0;
2228         } else if (is_long_mode(vcpu)) {
2229                 context->gva_to_gpa = paging64_gva_to_gpa;
2230                 context->root_level = PT64_ROOT_LEVEL;
2231         } else if (is_pae(vcpu)) {
2232                 context->gva_to_gpa = paging64_gva_to_gpa;
2233                 context->root_level = PT32E_ROOT_LEVEL;
2234         } else {
2235                 context->gva_to_gpa = paging32_gva_to_gpa;
2236                 context->root_level = PT32_ROOT_LEVEL;
2237         }
2238
2239         return 0;
2240 }
2241
2242 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2243 {
2244         int r;
2245
2246         ASSERT(vcpu);
2247         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2248
2249         if (!is_paging(vcpu))
2250                 r = nonpaging_init_context(vcpu);
2251         else if (is_long_mode(vcpu))
2252                 r = paging64_init_context(vcpu);
2253         else if (is_pae(vcpu))
2254                 r = paging32E_init_context(vcpu);
2255         else
2256                 r = paging32_init_context(vcpu);
2257
2258         vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2259
2260         return r;
2261 }
2262
2263 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2264 {
2265         vcpu->arch.update_pte.pfn = bad_pfn;
2266
2267         if (tdp_enabled)
2268                 return init_kvm_tdp_mmu(vcpu);
2269         else
2270                 return init_kvm_softmmu(vcpu);
2271 }
2272
2273 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2274 {
2275         ASSERT(vcpu);
2276         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2277                 vcpu->arch.mmu.free(vcpu);
2278                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2279         }
2280 }
2281
2282 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2283 {
2284         destroy_kvm_mmu(vcpu);
2285         return init_kvm_mmu(vcpu);
2286 }
2287 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2288
2289 int kvm_mmu_load(struct kvm_vcpu *vcpu)
2290 {
2291         int r;
2292
2293         r = mmu_topup_memory_caches(vcpu);
2294         if (r)
2295                 goto out;
2296         spin_lock(&vcpu->kvm->mmu_lock);
2297         kvm_mmu_free_some_pages(vcpu);
2298         mmu_alloc_roots(vcpu);
2299         mmu_sync_roots(vcpu);
2300         spin_unlock(&vcpu->kvm->mmu_lock);
2301         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2302         kvm_mmu_flush_tlb(vcpu);
2303 out:
2304         return r;
2305 }
2306 EXPORT_SYMBOL_GPL(kvm_mmu_load);
2307
2308 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2309 {
2310         mmu_free_roots(vcpu);
2311 }
2312
2313 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2314                                   struct kvm_mmu_page *sp,
2315                                   u64 *spte)
2316 {
2317         u64 pte;
2318         struct kvm_mmu_page *child;
2319
2320         pte = *spte;
2321         if (is_shadow_present_pte(pte)) {
2322                 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
2323                     is_large_pte(pte))
2324                         rmap_remove(vcpu->kvm, spte);
2325                 else {
2326                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2327                         mmu_page_remove_parent_pte(child, spte);
2328                 }
2329         }
2330         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
2331         if (is_large_pte(pte))
2332                 --vcpu->kvm->stat.lpages;
2333 }
2334
2335 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2336                                   struct kvm_mmu_page *sp,
2337                                   u64 *spte,
2338                                   const void *new)
2339 {
2340         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2341                 if (!vcpu->arch.update_pte.largepage ||
2342                     sp->role.glevels == PT32_ROOT_LEVEL) {
2343                         ++vcpu->kvm->stat.mmu_pde_zapped;
2344                         return;
2345                 }
2346         }
2347
2348         ++vcpu->kvm->stat.mmu_pte_updated;
2349         if (sp->role.glevels == PT32_ROOT_LEVEL)
2350                 paging32_update_pte(vcpu, sp, spte, new);
2351         else
2352                 paging64_update_pte(vcpu, sp, spte, new);
2353 }
2354
2355 static bool need_remote_flush(u64 old, u64 new)
2356 {
2357         if (!is_shadow_present_pte(old))
2358                 return false;
2359         if (!is_shadow_present_pte(new))
2360                 return true;
2361         if ((old ^ new) & PT64_BASE_ADDR_MASK)
2362                 return true;
2363         old ^= PT64_NX_MASK;
2364         new ^= PT64_NX_MASK;
2365         return (old & ~new & PT64_PERM_MASK) != 0;
2366 }
2367
2368 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2369 {
2370         if (need_remote_flush(old, new))
2371                 kvm_flush_remote_tlbs(vcpu->kvm);
2372         else
2373                 kvm_mmu_flush_tlb(vcpu);
2374 }
2375
2376 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2377 {
2378         u64 *spte = vcpu->arch.last_pte_updated;
2379
2380         return !!(spte && (*spte & shadow_accessed_mask));
2381 }
2382
2383 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2384                                           const u8 *new, int bytes)
2385 {
2386         gfn_t gfn;
2387         int r;
2388         u64 gpte = 0;
2389         pfn_t pfn;
2390
2391         vcpu->arch.update_pte.largepage = 0;
2392
2393         if (bytes != 4 && bytes != 8)
2394                 return;
2395
2396         /*
2397          * Assume that the pte write on a page table of the same type
2398          * as the current vcpu paging mode.  This is nearly always true
2399          * (might be false while changing modes).  Note it is verified later
2400          * by update_pte().
2401          */
2402         if (is_pae(vcpu)) {
2403                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2404                 if ((bytes == 4) && (gpa % 4 == 0)) {
2405                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2406                         if (r)
2407                                 return;
2408                         memcpy((void *)&gpte + (gpa % 8), new, 4);
2409                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
2410                         memcpy((void *)&gpte, new, 8);
2411                 }
2412         } else {
2413                 if ((bytes == 4) && (gpa % 4 == 0))
2414                         memcpy((void *)&gpte, new, 4);
2415         }
2416         if (!is_present_pte(gpte))
2417                 return;
2418         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2419
2420         if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2421                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2422                 vcpu->arch.update_pte.largepage = 1;
2423         }
2424         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2425         smp_rmb();
2426         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2427
2428         if (is_error_pfn(pfn)) {
2429                 kvm_release_pfn_clean(pfn);
2430                 return;
2431         }
2432         vcpu->arch.update_pte.gfn = gfn;
2433         vcpu->arch.update_pte.pfn = pfn;
2434 }
2435
2436 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2437 {
2438         u64 *spte = vcpu->arch.last_pte_updated;
2439
2440         if (spte
2441             && vcpu->arch.last_pte_gfn == gfn
2442             && shadow_accessed_mask
2443             && !(*spte & shadow_accessed_mask)
2444             && is_shadow_present_pte(*spte))
2445                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2446 }
2447
2448 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2449                        const u8 *new, int bytes,
2450                        bool guest_initiated)
2451 {
2452         gfn_t gfn = gpa >> PAGE_SHIFT;
2453         struct kvm_mmu_page *sp;
2454         struct hlist_node *node, *n;
2455         struct hlist_head *bucket;
2456         unsigned index;
2457         u64 entry, gentry;
2458         u64 *spte;
2459         unsigned offset = offset_in_page(gpa);
2460         unsigned pte_size;
2461         unsigned page_offset;
2462         unsigned misaligned;
2463         unsigned quadrant;
2464         int level;
2465         int flooded = 0;
2466         int npte;
2467         int r;
2468
2469         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2470         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2471         spin_lock(&vcpu->kvm->mmu_lock);
2472         kvm_mmu_access_page(vcpu, gfn);
2473         kvm_mmu_free_some_pages(vcpu);
2474         ++vcpu->kvm->stat.mmu_pte_write;
2475         kvm_mmu_audit(vcpu, "pre pte write");
2476         if (guest_initiated) {
2477                 if (gfn == vcpu->arch.last_pt_write_gfn
2478                     && !last_updated_pte_accessed(vcpu)) {
2479                         ++vcpu->arch.last_pt_write_count;
2480                         if (vcpu->arch.last_pt_write_count >= 3)
2481                                 flooded = 1;
2482                 } else {
2483                         vcpu->arch.last_pt_write_gfn = gfn;
2484                         vcpu->arch.last_pt_write_count = 1;
2485                         vcpu->arch.last_pte_updated = NULL;
2486                 }
2487         }
2488         index = kvm_page_table_hashfn(gfn);
2489         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2490         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2491                 if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid)
2492                         continue;
2493                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2494                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2495                 misaligned |= bytes < 4;
2496                 if (misaligned || flooded) {
2497                         /*
2498                          * Misaligned accesses are too much trouble to fix
2499                          * up; also, they usually indicate a page is not used
2500                          * as a page table.
2501                          *
2502                          * If we're seeing too many writes to a page,
2503                          * it may no longer be a page table, or we may be
2504                          * forking, in which case it is better to unmap the
2505                          * page.
2506                          */
2507                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2508                                  gpa, bytes, sp->role.word);
2509                         if (kvm_mmu_zap_page(vcpu->kvm, sp))
2510                                 n = bucket->first;
2511                         ++vcpu->kvm->stat.mmu_flooded;
2512                         continue;
2513                 }
2514                 page_offset = offset;
2515                 level = sp->role.level;
2516                 npte = 1;
2517                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
2518                         page_offset <<= 1;      /* 32->64 */
2519                         /*
2520                          * A 32-bit pde maps 4MB while the shadow pdes map
2521                          * only 2MB.  So we need to double the offset again
2522                          * and zap two pdes instead of one.
2523                          */
2524                         if (level == PT32_ROOT_LEVEL) {
2525                                 page_offset &= ~7; /* kill rounding error */
2526                                 page_offset <<= 1;
2527                                 npte = 2;
2528                         }
2529                         quadrant = page_offset >> PAGE_SHIFT;
2530                         page_offset &= ~PAGE_MASK;
2531                         if (quadrant != sp->role.quadrant)
2532                                 continue;
2533                 }
2534                 spte = &sp->spt[page_offset / sizeof(*spte)];
2535                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2536                         gentry = 0;
2537                         r = kvm_read_guest_atomic(vcpu->kvm,
2538                                                   gpa & ~(u64)(pte_size - 1),
2539                                                   &gentry, pte_size);
2540                         new = (const void *)&gentry;
2541                         if (r < 0)
2542                                 new = NULL;
2543                 }
2544                 while (npte--) {
2545                         entry = *spte;
2546                         mmu_pte_write_zap_pte(vcpu, sp, spte);
2547                         if (new)
2548                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
2549                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2550                         ++spte;
2551                 }
2552         }
2553         kvm_mmu_audit(vcpu, "post pte write");
2554         spin_unlock(&vcpu->kvm->mmu_lock);
2555         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2556                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2557                 vcpu->arch.update_pte.pfn = bad_pfn;
2558         }
2559 }
2560
2561 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2562 {
2563         gpa_t gpa;
2564         int r;
2565
2566         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2567
2568         spin_lock(&vcpu->kvm->mmu_lock);
2569         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2570         spin_unlock(&vcpu->kvm->mmu_lock);
2571         return r;
2572 }
2573 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2574
2575 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2576 {
2577         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
2578                 struct kvm_mmu_page *sp;
2579
2580                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2581                                   struct kvm_mmu_page, link);
2582                 kvm_mmu_zap_page(vcpu->kvm, sp);
2583                 ++vcpu->kvm->stat.mmu_recycled;
2584         }
2585 }
2586
2587 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2588 {
2589         int r;
2590         enum emulation_result er;
2591
2592         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2593         if (r < 0)
2594                 goto out;
2595
2596         if (!r) {
2597                 r = 1;
2598                 goto out;
2599         }
2600
2601         r = mmu_topup_memory_caches(vcpu);
2602         if (r)
2603                 goto out;
2604
2605         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2606
2607         switch (er) {
2608         case EMULATE_DONE:
2609                 return 1;
2610         case EMULATE_DO_MMIO:
2611                 ++vcpu->stat.mmio_exits;
2612                 return 0;
2613         case EMULATE_FAIL:
2614                 kvm_report_emulation_failure(vcpu, "pagetable");
2615                 return 1;
2616         default:
2617                 BUG();
2618         }
2619 out:
2620         return r;
2621 }
2622 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2623
2624 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2625 {
2626         vcpu->arch.mmu.invlpg(vcpu, gva);
2627         kvm_mmu_flush_tlb(vcpu);
2628         ++vcpu->stat.invlpg;
2629 }
2630 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2631
2632 void kvm_enable_tdp(void)
2633 {
2634         tdp_enabled = true;
2635 }
2636 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2637
2638 void kvm_disable_tdp(void)
2639 {
2640         tdp_enabled = false;
2641 }
2642 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2643
2644 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2645 {
2646         struct kvm_mmu_page *sp;
2647
2648         while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2649                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
2650                                   struct kvm_mmu_page, link);
2651                 kvm_mmu_zap_page(vcpu->kvm, sp);
2652                 cond_resched();
2653         }
2654         free_page((unsigned long)vcpu->arch.mmu.pae_root);
2655 }
2656
2657 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2658 {
2659         struct page *page;
2660         int i;
2661
2662         ASSERT(vcpu);
2663
2664         if (vcpu->kvm->arch.n_requested_mmu_pages)
2665                 vcpu->kvm->arch.n_free_mmu_pages =
2666                                         vcpu->kvm->arch.n_requested_mmu_pages;
2667         else
2668                 vcpu->kvm->arch.n_free_mmu_pages =
2669                                         vcpu->kvm->arch.n_alloc_mmu_pages;
2670         /*
2671          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2672          * Therefore we need to allocate shadow page tables in the first
2673          * 4GB of memory, which happens to fit the DMA32 zone.
2674          */
2675         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2676         if (!page)
2677                 goto error_1;
2678         vcpu->arch.mmu.pae_root = page_address(page);
2679         for (i = 0; i < 4; ++i)
2680                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2681
2682         return 0;
2683
2684 error_1:
2685         free_mmu_pages(vcpu);
2686         return -ENOMEM;
2687 }
2688
2689 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2690 {
2691         ASSERT(vcpu);
2692         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2693
2694         return alloc_mmu_pages(vcpu);
2695 }
2696
2697 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2698 {
2699         ASSERT(vcpu);
2700         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2701
2702         return init_kvm_mmu(vcpu);
2703 }
2704
2705 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2706 {
2707         ASSERT(vcpu);
2708
2709         destroy_kvm_mmu(vcpu);
2710         free_mmu_pages(vcpu);
2711         mmu_free_memory_caches(vcpu);
2712 }
2713
2714 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2715 {
2716         struct kvm_mmu_page *sp;
2717
2718         spin_lock(&kvm->mmu_lock);
2719         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2720                 int i;
2721                 u64 *pt;
2722
2723                 if (!test_bit(slot, sp->slot_bitmap))
2724                         continue;
2725
2726                 pt = sp->spt;
2727                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2728                         /* avoid RMW */
2729                         if (pt[i] & PT_WRITABLE_MASK)
2730                                 pt[i] &= ~PT_WRITABLE_MASK;
2731         }
2732         kvm_flush_remote_tlbs(kvm);
2733         spin_unlock(&kvm->mmu_lock);
2734 }
2735
2736 void kvm_mmu_zap_all(struct kvm *kvm)
2737 {
2738         struct kvm_mmu_page *sp, *node;
2739
2740         spin_lock(&kvm->mmu_lock);
2741         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2742                 if (kvm_mmu_zap_page(kvm, sp))
2743                         node = container_of(kvm->arch.active_mmu_pages.next,
2744                                             struct kvm_mmu_page, link);
2745         spin_unlock(&kvm->mmu_lock);
2746
2747         kvm_flush_remote_tlbs(kvm);
2748 }
2749
2750 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2751 {
2752         struct kvm_mmu_page *page;
2753
2754         page = container_of(kvm->arch.active_mmu_pages.prev,
2755                             struct kvm_mmu_page, link);
2756         kvm_mmu_zap_page(kvm, page);
2757 }
2758
2759 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2760 {
2761         struct kvm *kvm;
2762         struct kvm *kvm_freed = NULL;
2763         int cache_count = 0;
2764
2765         spin_lock(&kvm_lock);
2766
2767         list_for_each_entry(kvm, &vm_list, vm_list) {
2768                 int npages;
2769
2770                 if (!down_read_trylock(&kvm->slots_lock))
2771                         continue;
2772                 spin_lock(&kvm->mmu_lock);
2773                 npages = kvm->arch.n_alloc_mmu_pages -
2774                          kvm->arch.n_free_mmu_pages;
2775                 cache_count += npages;
2776                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2777                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
2778                         cache_count--;
2779                         kvm_freed = kvm;
2780                 }
2781                 nr_to_scan--;
2782
2783                 spin_unlock(&kvm->mmu_lock);
2784                 up_read(&kvm->slots_lock);
2785         }
2786         if (kvm_freed)
2787                 list_move_tail(&kvm_freed->vm_list, &vm_list);
2788
2789         spin_unlock(&kvm_lock);
2790
2791         return cache_count;
2792 }
2793
2794 static struct shrinker mmu_shrinker = {
2795         .shrink = mmu_shrink,
2796         .seeks = DEFAULT_SEEKS * 10,
2797 };
2798
2799 static void mmu_destroy_caches(void)
2800 {
2801         if (pte_chain_cache)
2802                 kmem_cache_destroy(pte_chain_cache);
2803         if (rmap_desc_cache)
2804                 kmem_cache_destroy(rmap_desc_cache);
2805         if (mmu_page_header_cache)
2806                 kmem_cache_destroy(mmu_page_header_cache);
2807 }
2808
2809 void kvm_mmu_module_exit(void)
2810 {
2811         mmu_destroy_caches();
2812         unregister_shrinker(&mmu_shrinker);
2813 }
2814
2815 int kvm_mmu_module_init(void)
2816 {
2817         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2818                                             sizeof(struct kvm_pte_chain),
2819                                             0, 0, NULL);
2820         if (!pte_chain_cache)
2821                 goto nomem;
2822         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2823                                             sizeof(struct kvm_rmap_desc),
2824                                             0, 0, NULL);
2825         if (!rmap_desc_cache)
2826                 goto nomem;
2827
2828         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2829                                                   sizeof(struct kvm_mmu_page),
2830                                                   0, 0, NULL);
2831         if (!mmu_page_header_cache)
2832                 goto nomem;
2833
2834         register_shrinker(&mmu_shrinker);
2835
2836         return 0;
2837
2838 nomem:
2839         mmu_destroy_caches();
2840         return -ENOMEM;
2841 }
2842
2843 /*
2844  * Caculate mmu pages needed for kvm.
2845  */
2846 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2847 {
2848         int i;
2849         unsigned int nr_mmu_pages;
2850         unsigned int  nr_pages = 0;
2851
2852         for (i = 0; i < kvm->nmemslots; i++)
2853                 nr_pages += kvm->memslots[i].npages;
2854
2855         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2856         nr_mmu_pages = max(nr_mmu_pages,
2857                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2858
2859         return nr_mmu_pages;
2860 }
2861
2862 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2863                                 unsigned len)
2864 {
2865         if (len > buffer->len)
2866                 return NULL;
2867         return buffer->ptr;
2868 }
2869
2870 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2871                                 unsigned len)
2872 {
2873         void *ret;
2874
2875         ret = pv_mmu_peek_buffer(buffer, len);
2876         if (!ret)
2877                 return ret;
2878         buffer->ptr += len;
2879         buffer->len -= len;
2880         buffer->processed += len;
2881         return ret;
2882 }
2883
2884 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2885                              gpa_t addr, gpa_t value)
2886 {
2887         int bytes = 8;
2888         int r;
2889
2890         if (!is_long_mode(vcpu) && !is_pae(vcpu))
2891                 bytes = 4;
2892
2893         r = mmu_topup_memory_caches(vcpu);
2894         if (r)
2895                 return r;
2896
2897         if (!emulator_write_phys(vcpu, addr, &value, bytes))
2898                 return -EFAULT;
2899
2900         return 1;
2901 }
2902
2903 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2904 {
2905         kvm_x86_ops->tlb_flush(vcpu);
2906         set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
2907         return 1;
2908 }
2909
2910 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2911 {
2912         spin_lock(&vcpu->kvm->mmu_lock);
2913         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2914         spin_unlock(&vcpu->kvm->mmu_lock);
2915         return 1;
2916 }
2917
2918 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2919                              struct kvm_pv_mmu_op_buffer *buffer)
2920 {
2921         struct kvm_mmu_op_header *header;
2922
2923         header = pv_mmu_peek_buffer(buffer, sizeof *header);
2924         if (!header)
2925                 return 0;
2926         switch (header->op) {
2927         case KVM_MMU_OP_WRITE_PTE: {
2928                 struct kvm_mmu_op_write_pte *wpte;
2929
2930                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2931                 if (!wpte)
2932                         return 0;
2933                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2934                                         wpte->pte_val);
2935         }
2936         case KVM_MMU_OP_FLUSH_TLB: {
2937                 struct kvm_mmu_op_flush_tlb *ftlb;
2938
2939                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2940                 if (!ftlb)
2941                         return 0;
2942                 return kvm_pv_mmu_flush_tlb(vcpu);
2943         }
2944         case KVM_MMU_OP_RELEASE_PT: {
2945                 struct kvm_mmu_op_release_pt *rpt;
2946
2947                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2948                 if (!rpt)
2949                         return 0;
2950                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2951         }
2952         default: return 0;
2953         }
2954 }
2955
2956 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2957                   gpa_t addr, unsigned long *ret)
2958 {
2959         int r;
2960         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
2961
2962         buffer->ptr = buffer->buf;
2963         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
2964         buffer->processed = 0;
2965
2966         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
2967         if (r)
2968                 goto out;
2969
2970         while (buffer->len) {
2971                 r = kvm_pv_mmu_op_one(vcpu, buffer);
2972                 if (r < 0)
2973                         goto out;
2974                 if (r == 0)
2975                         break;
2976         }
2977
2978         r = 1;
2979 out:
2980         *ret = buffer->processed;
2981         return r;
2982 }
2983
2984 #ifdef AUDIT
2985
2986 static const char *audit_msg;
2987
2988 static gva_t canonicalize(gva_t gva)
2989 {
2990 #ifdef CONFIG_X86_64
2991         gva = (long long)(gva << 16) >> 16;
2992 #endif
2993         return gva;
2994 }
2995
2996 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2997                                 gva_t va, int level)
2998 {
2999         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3000         int i;
3001         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3002
3003         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3004                 u64 ent = pt[i];
3005
3006                 if (ent == shadow_trap_nonpresent_pte)
3007                         continue;
3008
3009                 va = canonicalize(va);
3010                 if (level > 1) {
3011                         if (ent == shadow_notrap_nonpresent_pte)
3012                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
3013                                        " in nonleaf level: levels %d gva %lx"
3014                                        " level %d pte %llx\n", audit_msg,
3015                                        vcpu->arch.mmu.root_level, va, level, ent);
3016
3017                         audit_mappings_page(vcpu, ent, va, level - 1);
3018                 } else {
3019                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
3020                         hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
3021
3022                         if (is_shadow_present_pte(ent)
3023                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
3024                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
3025                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3026                                        audit_msg, vcpu->arch.mmu.root_level,
3027                                        va, gpa, hpa, ent,
3028                                        is_shadow_present_pte(ent));
3029                         else if (ent == shadow_notrap_nonpresent_pte
3030                                  && !is_error_hpa(hpa))
3031                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
3032                                        " valid guest gva %lx\n", audit_msg, va);
3033                         kvm_release_pfn_clean(pfn);
3034
3035                 }
3036         }
3037 }
3038
3039 static void audit_mappings(struct kvm_vcpu *vcpu)
3040 {
3041         unsigned i;
3042
3043         if (vcpu->arch.mmu.root_level == 4)
3044                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3045         else
3046                 for (i = 0; i < 4; ++i)
3047                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3048                                 audit_mappings_page(vcpu,
3049                                                     vcpu->arch.mmu.pae_root[i],
3050                                                     i << 30,
3051                                                     2);
3052 }
3053
3054 static int count_rmaps(struct kvm_vcpu *vcpu)
3055 {
3056         int nmaps = 0;
3057         int i, j, k;
3058
3059         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3060                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
3061                 struct kvm_rmap_desc *d;
3062
3063                 for (j = 0; j < m->npages; ++j) {
3064                         unsigned long *rmapp = &m->rmap[j];
3065
3066                         if (!*rmapp)
3067                                 continue;
3068                         if (!(*rmapp & 1)) {
3069                                 ++nmaps;
3070                                 continue;
3071                         }
3072                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3073                         while (d) {
3074                                 for (k = 0; k < RMAP_EXT; ++k)
3075                                         if (d->shadow_ptes[k])
3076                                                 ++nmaps;
3077                                         else
3078                                                 break;
3079                                 d = d->more;
3080                         }
3081                 }
3082         }
3083         return nmaps;
3084 }
3085
3086 static int count_writable_mappings(struct kvm_vcpu *vcpu)
3087 {
3088         int nmaps = 0;
3089         struct kvm_mmu_page *sp;
3090         int i;
3091
3092         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3093                 u64 *pt = sp->spt;
3094
3095                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3096                         continue;
3097
3098                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3099                         u64 ent = pt[i];
3100
3101                         if (!(ent & PT_PRESENT_MASK))
3102                                 continue;
3103                         if (!(ent & PT_WRITABLE_MASK))
3104                                 continue;
3105                         ++nmaps;
3106                 }
3107         }
3108         return nmaps;
3109 }
3110
3111 static void audit_rmap(struct kvm_vcpu *vcpu)
3112 {
3113         int n_rmap = count_rmaps(vcpu);
3114         int n_actual = count_writable_mappings(vcpu);
3115
3116         if (n_rmap != n_actual)
3117                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
3118                        __func__, audit_msg, n_rmap, n_actual);
3119 }
3120
3121 static void audit_write_protection(struct kvm_vcpu *vcpu)
3122 {
3123         struct kvm_mmu_page *sp;
3124         struct kvm_memory_slot *slot;
3125         unsigned long *rmapp;
3126         gfn_t gfn;
3127
3128         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3129                 if (sp->role.metaphysical)
3130                         continue;
3131
3132                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3133                 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3134                 rmapp = &slot->rmap[gfn - slot->base_gfn];
3135                 if (*rmapp)
3136                         printk(KERN_ERR "%s: (%s) shadow page has writable"
3137                                " mappings: gfn %lx role %x\n",
3138                                __func__, audit_msg, sp->gfn,
3139                                sp->role.word);
3140         }
3141 }
3142
3143 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3144 {
3145         int olddbg = dbg;
3146
3147         dbg = 0;
3148         audit_msg = msg;
3149         audit_rmap(vcpu);
3150         audit_write_protection(vcpu);
3151         audit_mappings(vcpu);
3152         dbg = olddbg;
3153 }
3154
3155 #endif