KVM: MMU: Fix auditing code
[safe/jmp/linux-2.6] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "mmu.h"
21
22 #include <linux/kvm_host.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/mm.h>
26 #include <linux/highmem.h>
27 #include <linux/module.h>
28 #include <linux/swap.h>
29 #include <linux/hugetlb.h>
30 #include <linux/compiler.h>
31
32 #include <asm/page.h>
33 #include <asm/cmpxchg.h>
34 #include <asm/io.h>
35 #include <asm/vmx.h>
36
37 /*
38  * When setting this variable to true it enables Two-Dimensional-Paging
39  * where the hardware walks 2 page tables:
40  * 1. the guest-virtual to guest-physical
41  * 2. while doing 1. it walks guest-physical to host-physical
42  * If the hardware supports that we don't need to do shadow paging.
43  */
44 bool tdp_enabled = false;
45
46 #undef MMU_DEBUG
47
48 #undef AUDIT
49
50 #ifdef AUDIT
51 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52 #else
53 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54 #endif
55
56 #ifdef MMU_DEBUG
57
58 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61 #else
62
63 #define pgprintk(x...) do { } while (0)
64 #define rmap_printk(x...) do { } while (0)
65
66 #endif
67
68 #if defined(MMU_DEBUG) || defined(AUDIT)
69 static int dbg = 0;
70 module_param(dbg, bool, 0644);
71 #endif
72
73 static int oos_shadow = 1;
74 module_param(oos_shadow, bool, 0644);
75
76 #ifndef MMU_DEBUG
77 #define ASSERT(x) do { } while (0)
78 #else
79 #define ASSERT(x)                                                       \
80         if (!(x)) {                                                     \
81                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
82                        __FILE__, __LINE__, #x);                         \
83         }
84 #endif
85
86 #define PT_FIRST_AVAIL_BITS_SHIFT 9
87 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
88
89 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
90
91 #define PT64_LEVEL_BITS 9
92
93 #define PT64_LEVEL_SHIFT(level) \
94                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
95
96 #define PT64_LEVEL_MASK(level) \
97                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
98
99 #define PT64_INDEX(address, level)\
100         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
101
102
103 #define PT32_LEVEL_BITS 10
104
105 #define PT32_LEVEL_SHIFT(level) \
106                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
107
108 #define PT32_LEVEL_MASK(level) \
109                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
110
111 #define PT32_INDEX(address, level)\
112         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
113
114
115 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
116 #define PT64_DIR_BASE_ADDR_MASK \
117         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
118
119 #define PT32_BASE_ADDR_MASK PAGE_MASK
120 #define PT32_DIR_BASE_ADDR_MASK \
121         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
122
123 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
124                         | PT64_NX_MASK)
125
126 #define PFERR_PRESENT_MASK (1U << 0)
127 #define PFERR_WRITE_MASK (1U << 1)
128 #define PFERR_USER_MASK (1U << 2)
129 #define PFERR_RSVD_MASK (1U << 3)
130 #define PFERR_FETCH_MASK (1U << 4)
131
132 #define PT_DIRECTORY_LEVEL 2
133 #define PT_PAGE_TABLE_LEVEL 1
134
135 #define RMAP_EXT 4
136
137 #define ACC_EXEC_MASK    1
138 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
139 #define ACC_USER_MASK    PT_USER_MASK
140 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
141
142 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
143
144 struct kvm_rmap_desc {
145         u64 *shadow_ptes[RMAP_EXT];
146         struct kvm_rmap_desc *more;
147 };
148
149 struct kvm_shadow_walk_iterator {
150         u64 addr;
151         hpa_t shadow_addr;
152         int level;
153         u64 *sptep;
154         unsigned index;
155 };
156
157 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
158         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
159              shadow_walk_okay(&(_walker));                      \
160              shadow_walk_next(&(_walker)))
161
162
163 struct kvm_unsync_walk {
164         int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
165 };
166
167 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
168
169 static struct kmem_cache *pte_chain_cache;
170 static struct kmem_cache *rmap_desc_cache;
171 static struct kmem_cache *mmu_page_header_cache;
172
173 static u64 __read_mostly shadow_trap_nonpresent_pte;
174 static u64 __read_mostly shadow_notrap_nonpresent_pte;
175 static u64 __read_mostly shadow_base_present_pte;
176 static u64 __read_mostly shadow_nx_mask;
177 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
178 static u64 __read_mostly shadow_user_mask;
179 static u64 __read_mostly shadow_accessed_mask;
180 static u64 __read_mostly shadow_dirty_mask;
181 static u64 __read_mostly shadow_mt_mask;
182
183 static inline u64 rsvd_bits(int s, int e)
184 {
185         return ((1ULL << (e - s + 1)) - 1) << s;
186 }
187
188 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
189 {
190         shadow_trap_nonpresent_pte = trap_pte;
191         shadow_notrap_nonpresent_pte = notrap_pte;
192 }
193 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
194
195 void kvm_mmu_set_base_ptes(u64 base_pte)
196 {
197         shadow_base_present_pte = base_pte;
198 }
199 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
200
201 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
202                 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask)
203 {
204         shadow_user_mask = user_mask;
205         shadow_accessed_mask = accessed_mask;
206         shadow_dirty_mask = dirty_mask;
207         shadow_nx_mask = nx_mask;
208         shadow_x_mask = x_mask;
209         shadow_mt_mask = mt_mask;
210 }
211 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
212
213 static int is_write_protection(struct kvm_vcpu *vcpu)
214 {
215         return vcpu->arch.cr0 & X86_CR0_WP;
216 }
217
218 static int is_cpuid_PSE36(void)
219 {
220         return 1;
221 }
222
223 static int is_nx(struct kvm_vcpu *vcpu)
224 {
225         return vcpu->arch.shadow_efer & EFER_NX;
226 }
227
228 static int is_shadow_present_pte(u64 pte)
229 {
230         return pte != shadow_trap_nonpresent_pte
231                 && pte != shadow_notrap_nonpresent_pte;
232 }
233
234 static int is_large_pte(u64 pte)
235 {
236         return pte & PT_PAGE_SIZE_MASK;
237 }
238
239 static int is_writeble_pte(unsigned long pte)
240 {
241         return pte & PT_WRITABLE_MASK;
242 }
243
244 static int is_dirty_pte(unsigned long pte)
245 {
246         return pte & shadow_dirty_mask;
247 }
248
249 static int is_rmap_pte(u64 pte)
250 {
251         return is_shadow_present_pte(pte);
252 }
253
254 static pfn_t spte_to_pfn(u64 pte)
255 {
256         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
257 }
258
259 static gfn_t pse36_gfn_delta(u32 gpte)
260 {
261         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
262
263         return (gpte & PT32_DIR_PSE36_MASK) << shift;
264 }
265
266 static void set_shadow_pte(u64 *sptep, u64 spte)
267 {
268 #ifdef CONFIG_X86_64
269         set_64bit((unsigned long *)sptep, spte);
270 #else
271         set_64bit((unsigned long long *)sptep, spte);
272 #endif
273 }
274
275 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
276                                   struct kmem_cache *base_cache, int min)
277 {
278         void *obj;
279
280         if (cache->nobjs >= min)
281                 return 0;
282         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
283                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
284                 if (!obj)
285                         return -ENOMEM;
286                 cache->objects[cache->nobjs++] = obj;
287         }
288         return 0;
289 }
290
291 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
292 {
293         while (mc->nobjs)
294                 kfree(mc->objects[--mc->nobjs]);
295 }
296
297 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
298                                        int min)
299 {
300         struct page *page;
301
302         if (cache->nobjs >= min)
303                 return 0;
304         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
305                 page = alloc_page(GFP_KERNEL);
306                 if (!page)
307                         return -ENOMEM;
308                 set_page_private(page, 0);
309                 cache->objects[cache->nobjs++] = page_address(page);
310         }
311         return 0;
312 }
313
314 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
315 {
316         while (mc->nobjs)
317                 free_page((unsigned long)mc->objects[--mc->nobjs]);
318 }
319
320 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
321 {
322         int r;
323
324         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
325                                    pte_chain_cache, 4);
326         if (r)
327                 goto out;
328         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
329                                    rmap_desc_cache, 4);
330         if (r)
331                 goto out;
332         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
333         if (r)
334                 goto out;
335         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
336                                    mmu_page_header_cache, 4);
337 out:
338         return r;
339 }
340
341 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
342 {
343         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
344         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
345         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
346         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
347 }
348
349 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
350                                     size_t size)
351 {
352         void *p;
353
354         BUG_ON(!mc->nobjs);
355         p = mc->objects[--mc->nobjs];
356         return p;
357 }
358
359 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
360 {
361         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
362                                       sizeof(struct kvm_pte_chain));
363 }
364
365 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
366 {
367         kfree(pc);
368 }
369
370 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
371 {
372         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
373                                       sizeof(struct kvm_rmap_desc));
374 }
375
376 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
377 {
378         kfree(rd);
379 }
380
381 /*
382  * Return the pointer to the largepage write count for a given
383  * gfn, handling slots that are not large page aligned.
384  */
385 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
386 {
387         unsigned long idx;
388
389         idx = (gfn / KVM_PAGES_PER_HPAGE) -
390               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
391         return &slot->lpage_info[idx].write_count;
392 }
393
394 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
395 {
396         int *write_count;
397
398         gfn = unalias_gfn(kvm, gfn);
399         write_count = slot_largepage_idx(gfn,
400                                          gfn_to_memslot_unaliased(kvm, gfn));
401         *write_count += 1;
402 }
403
404 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
405 {
406         int *write_count;
407
408         gfn = unalias_gfn(kvm, gfn);
409         write_count = slot_largepage_idx(gfn,
410                                          gfn_to_memslot_unaliased(kvm, gfn));
411         *write_count -= 1;
412         WARN_ON(*write_count < 0);
413 }
414
415 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
416 {
417         struct kvm_memory_slot *slot;
418         int *largepage_idx;
419
420         gfn = unalias_gfn(kvm, gfn);
421         slot = gfn_to_memslot_unaliased(kvm, gfn);
422         if (slot) {
423                 largepage_idx = slot_largepage_idx(gfn, slot);
424                 return *largepage_idx;
425         }
426
427         return 1;
428 }
429
430 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
431 {
432         struct vm_area_struct *vma;
433         unsigned long addr;
434         int ret = 0;
435
436         addr = gfn_to_hva(kvm, gfn);
437         if (kvm_is_error_hva(addr))
438                 return ret;
439
440         down_read(&current->mm->mmap_sem);
441         vma = find_vma(current->mm, addr);
442         if (vma && is_vm_hugetlb_page(vma))
443                 ret = 1;
444         up_read(&current->mm->mmap_sem);
445
446         return ret;
447 }
448
449 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
450 {
451         struct kvm_memory_slot *slot;
452
453         if (has_wrprotected_page(vcpu->kvm, large_gfn))
454                 return 0;
455
456         if (!host_largepage_backed(vcpu->kvm, large_gfn))
457                 return 0;
458
459         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
460         if (slot && slot->dirty_bitmap)
461                 return 0;
462
463         return 1;
464 }
465
466 /*
467  * Take gfn and return the reverse mapping to it.
468  * Note: gfn must be unaliased before this function get called
469  */
470
471 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
472 {
473         struct kvm_memory_slot *slot;
474         unsigned long idx;
475
476         slot = gfn_to_memslot(kvm, gfn);
477         if (!lpage)
478                 return &slot->rmap[gfn - slot->base_gfn];
479
480         idx = (gfn / KVM_PAGES_PER_HPAGE) -
481               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
482
483         return &slot->lpage_info[idx].rmap_pde;
484 }
485
486 /*
487  * Reverse mapping data structures:
488  *
489  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
490  * that points to page_address(page).
491  *
492  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
493  * containing more mappings.
494  */
495 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
496 {
497         struct kvm_mmu_page *sp;
498         struct kvm_rmap_desc *desc;
499         unsigned long *rmapp;
500         int i;
501
502         if (!is_rmap_pte(*spte))
503                 return;
504         gfn = unalias_gfn(vcpu->kvm, gfn);
505         sp = page_header(__pa(spte));
506         sp->gfns[spte - sp->spt] = gfn;
507         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
508         if (!*rmapp) {
509                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
510                 *rmapp = (unsigned long)spte;
511         } else if (!(*rmapp & 1)) {
512                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
513                 desc = mmu_alloc_rmap_desc(vcpu);
514                 desc->shadow_ptes[0] = (u64 *)*rmapp;
515                 desc->shadow_ptes[1] = spte;
516                 *rmapp = (unsigned long)desc | 1;
517         } else {
518                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
519                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
520                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
521                         desc = desc->more;
522                 if (desc->shadow_ptes[RMAP_EXT-1]) {
523                         desc->more = mmu_alloc_rmap_desc(vcpu);
524                         desc = desc->more;
525                 }
526                 for (i = 0; desc->shadow_ptes[i]; ++i)
527                         ;
528                 desc->shadow_ptes[i] = spte;
529         }
530 }
531
532 static void rmap_desc_remove_entry(unsigned long *rmapp,
533                                    struct kvm_rmap_desc *desc,
534                                    int i,
535                                    struct kvm_rmap_desc *prev_desc)
536 {
537         int j;
538
539         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
540                 ;
541         desc->shadow_ptes[i] = desc->shadow_ptes[j];
542         desc->shadow_ptes[j] = NULL;
543         if (j != 0)
544                 return;
545         if (!prev_desc && !desc->more)
546                 *rmapp = (unsigned long)desc->shadow_ptes[0];
547         else
548                 if (prev_desc)
549                         prev_desc->more = desc->more;
550                 else
551                         *rmapp = (unsigned long)desc->more | 1;
552         mmu_free_rmap_desc(desc);
553 }
554
555 static void rmap_remove(struct kvm *kvm, u64 *spte)
556 {
557         struct kvm_rmap_desc *desc;
558         struct kvm_rmap_desc *prev_desc;
559         struct kvm_mmu_page *sp;
560         pfn_t pfn;
561         unsigned long *rmapp;
562         int i;
563
564         if (!is_rmap_pte(*spte))
565                 return;
566         sp = page_header(__pa(spte));
567         pfn = spte_to_pfn(*spte);
568         if (*spte & shadow_accessed_mask)
569                 kvm_set_pfn_accessed(pfn);
570         if (is_writeble_pte(*spte))
571                 kvm_release_pfn_dirty(pfn);
572         else
573                 kvm_release_pfn_clean(pfn);
574         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
575         if (!*rmapp) {
576                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
577                 BUG();
578         } else if (!(*rmapp & 1)) {
579                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
580                 if ((u64 *)*rmapp != spte) {
581                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
582                                spte, *spte);
583                         BUG();
584                 }
585                 *rmapp = 0;
586         } else {
587                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
588                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
589                 prev_desc = NULL;
590                 while (desc) {
591                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
592                                 if (desc->shadow_ptes[i] == spte) {
593                                         rmap_desc_remove_entry(rmapp,
594                                                                desc, i,
595                                                                prev_desc);
596                                         return;
597                                 }
598                         prev_desc = desc;
599                         desc = desc->more;
600                 }
601                 BUG();
602         }
603 }
604
605 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
606 {
607         struct kvm_rmap_desc *desc;
608         struct kvm_rmap_desc *prev_desc;
609         u64 *prev_spte;
610         int i;
611
612         if (!*rmapp)
613                 return NULL;
614         else if (!(*rmapp & 1)) {
615                 if (!spte)
616                         return (u64 *)*rmapp;
617                 return NULL;
618         }
619         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
620         prev_desc = NULL;
621         prev_spte = NULL;
622         while (desc) {
623                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
624                         if (prev_spte == spte)
625                                 return desc->shadow_ptes[i];
626                         prev_spte = desc->shadow_ptes[i];
627                 }
628                 desc = desc->more;
629         }
630         return NULL;
631 }
632
633 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
634 {
635         unsigned long *rmapp;
636         u64 *spte;
637         int write_protected = 0;
638
639         gfn = unalias_gfn(kvm, gfn);
640         rmapp = gfn_to_rmap(kvm, gfn, 0);
641
642         spte = rmap_next(kvm, rmapp, NULL);
643         while (spte) {
644                 BUG_ON(!spte);
645                 BUG_ON(!(*spte & PT_PRESENT_MASK));
646                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
647                 if (is_writeble_pte(*spte)) {
648                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
649                         write_protected = 1;
650                 }
651                 spte = rmap_next(kvm, rmapp, spte);
652         }
653         if (write_protected) {
654                 pfn_t pfn;
655
656                 spte = rmap_next(kvm, rmapp, NULL);
657                 pfn = spte_to_pfn(*spte);
658                 kvm_set_pfn_dirty(pfn);
659         }
660
661         /* check for huge page mappings */
662         rmapp = gfn_to_rmap(kvm, gfn, 1);
663         spte = rmap_next(kvm, rmapp, NULL);
664         while (spte) {
665                 BUG_ON(!spte);
666                 BUG_ON(!(*spte & PT_PRESENT_MASK));
667                 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
668                 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
669                 if (is_writeble_pte(*spte)) {
670                         rmap_remove(kvm, spte);
671                         --kvm->stat.lpages;
672                         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
673                         spte = NULL;
674                         write_protected = 1;
675                 }
676                 spte = rmap_next(kvm, rmapp, spte);
677         }
678
679         return write_protected;
680 }
681
682 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
683 {
684         u64 *spte;
685         int need_tlb_flush = 0;
686
687         while ((spte = rmap_next(kvm, rmapp, NULL))) {
688                 BUG_ON(!(*spte & PT_PRESENT_MASK));
689                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
690                 rmap_remove(kvm, spte);
691                 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
692                 need_tlb_flush = 1;
693         }
694         return need_tlb_flush;
695 }
696
697 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
698                           int (*handler)(struct kvm *kvm, unsigned long *rmapp))
699 {
700         int i;
701         int retval = 0;
702
703         /*
704          * If mmap_sem isn't taken, we can look the memslots with only
705          * the mmu_lock by skipping over the slots with userspace_addr == 0.
706          */
707         for (i = 0; i < kvm->nmemslots; i++) {
708                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
709                 unsigned long start = memslot->userspace_addr;
710                 unsigned long end;
711
712                 /* mmu_lock protects userspace_addr */
713                 if (!start)
714                         continue;
715
716                 end = start + (memslot->npages << PAGE_SHIFT);
717                 if (hva >= start && hva < end) {
718                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
719                         retval |= handler(kvm, &memslot->rmap[gfn_offset]);
720                         retval |= handler(kvm,
721                                           &memslot->lpage_info[
722                                                   gfn_offset /
723                                                   KVM_PAGES_PER_HPAGE].rmap_pde);
724                 }
725         }
726
727         return retval;
728 }
729
730 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
731 {
732         return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
733 }
734
735 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
736 {
737         u64 *spte;
738         int young = 0;
739
740         /* always return old for EPT */
741         if (!shadow_accessed_mask)
742                 return 0;
743
744         spte = rmap_next(kvm, rmapp, NULL);
745         while (spte) {
746                 int _young;
747                 u64 _spte = *spte;
748                 BUG_ON(!(_spte & PT_PRESENT_MASK));
749                 _young = _spte & PT_ACCESSED_MASK;
750                 if (_young) {
751                         young = 1;
752                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
753                 }
754                 spte = rmap_next(kvm, rmapp, spte);
755         }
756         return young;
757 }
758
759 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
760 {
761         return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
762 }
763
764 #ifdef MMU_DEBUG
765 static int is_empty_shadow_page(u64 *spt)
766 {
767         u64 *pos;
768         u64 *end;
769
770         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
771                 if (is_shadow_present_pte(*pos)) {
772                         printk(KERN_ERR "%s: %p %llx\n", __func__,
773                                pos, *pos);
774                         return 0;
775                 }
776         return 1;
777 }
778 #endif
779
780 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
781 {
782         ASSERT(is_empty_shadow_page(sp->spt));
783         list_del(&sp->link);
784         __free_page(virt_to_page(sp->spt));
785         __free_page(virt_to_page(sp->gfns));
786         kfree(sp);
787         ++kvm->arch.n_free_mmu_pages;
788 }
789
790 static unsigned kvm_page_table_hashfn(gfn_t gfn)
791 {
792         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
793 }
794
795 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
796                                                u64 *parent_pte)
797 {
798         struct kvm_mmu_page *sp;
799
800         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
801         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
802         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
803         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
804         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
805         INIT_LIST_HEAD(&sp->oos_link);
806         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
807         sp->multimapped = 0;
808         sp->parent_pte = parent_pte;
809         --vcpu->kvm->arch.n_free_mmu_pages;
810         return sp;
811 }
812
813 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
814                                     struct kvm_mmu_page *sp, u64 *parent_pte)
815 {
816         struct kvm_pte_chain *pte_chain;
817         struct hlist_node *node;
818         int i;
819
820         if (!parent_pte)
821                 return;
822         if (!sp->multimapped) {
823                 u64 *old = sp->parent_pte;
824
825                 if (!old) {
826                         sp->parent_pte = parent_pte;
827                         return;
828                 }
829                 sp->multimapped = 1;
830                 pte_chain = mmu_alloc_pte_chain(vcpu);
831                 INIT_HLIST_HEAD(&sp->parent_ptes);
832                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
833                 pte_chain->parent_ptes[0] = old;
834         }
835         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
836                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
837                         continue;
838                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
839                         if (!pte_chain->parent_ptes[i]) {
840                                 pte_chain->parent_ptes[i] = parent_pte;
841                                 return;
842                         }
843         }
844         pte_chain = mmu_alloc_pte_chain(vcpu);
845         BUG_ON(!pte_chain);
846         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
847         pte_chain->parent_ptes[0] = parent_pte;
848 }
849
850 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
851                                        u64 *parent_pte)
852 {
853         struct kvm_pte_chain *pte_chain;
854         struct hlist_node *node;
855         int i;
856
857         if (!sp->multimapped) {
858                 BUG_ON(sp->parent_pte != parent_pte);
859                 sp->parent_pte = NULL;
860                 return;
861         }
862         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
863                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
864                         if (!pte_chain->parent_ptes[i])
865                                 break;
866                         if (pte_chain->parent_ptes[i] != parent_pte)
867                                 continue;
868                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
869                                 && pte_chain->parent_ptes[i + 1]) {
870                                 pte_chain->parent_ptes[i]
871                                         = pte_chain->parent_ptes[i + 1];
872                                 ++i;
873                         }
874                         pte_chain->parent_ptes[i] = NULL;
875                         if (i == 0) {
876                                 hlist_del(&pte_chain->link);
877                                 mmu_free_pte_chain(pte_chain);
878                                 if (hlist_empty(&sp->parent_ptes)) {
879                                         sp->multimapped = 0;
880                                         sp->parent_pte = NULL;
881                                 }
882                         }
883                         return;
884                 }
885         BUG();
886 }
887
888
889 static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
890                             mmu_parent_walk_fn fn)
891 {
892         struct kvm_pte_chain *pte_chain;
893         struct hlist_node *node;
894         struct kvm_mmu_page *parent_sp;
895         int i;
896
897         if (!sp->multimapped && sp->parent_pte) {
898                 parent_sp = page_header(__pa(sp->parent_pte));
899                 fn(vcpu, parent_sp);
900                 mmu_parent_walk(vcpu, parent_sp, fn);
901                 return;
902         }
903         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
904                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
905                         if (!pte_chain->parent_ptes[i])
906                                 break;
907                         parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
908                         fn(vcpu, parent_sp);
909                         mmu_parent_walk(vcpu, parent_sp, fn);
910                 }
911 }
912
913 static void kvm_mmu_update_unsync_bitmap(u64 *spte)
914 {
915         unsigned int index;
916         struct kvm_mmu_page *sp = page_header(__pa(spte));
917
918         index = spte - sp->spt;
919         if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
920                 sp->unsync_children++;
921         WARN_ON(!sp->unsync_children);
922 }
923
924 static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
925 {
926         struct kvm_pte_chain *pte_chain;
927         struct hlist_node *node;
928         int i;
929
930         if (!sp->parent_pte)
931                 return;
932
933         if (!sp->multimapped) {
934                 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
935                 return;
936         }
937
938         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
939                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
940                         if (!pte_chain->parent_ptes[i])
941                                 break;
942                         kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
943                 }
944 }
945
946 static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
947 {
948         kvm_mmu_update_parents_unsync(sp);
949         return 1;
950 }
951
952 static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
953                                         struct kvm_mmu_page *sp)
954 {
955         mmu_parent_walk(vcpu, sp, unsync_walk_fn);
956         kvm_mmu_update_parents_unsync(sp);
957 }
958
959 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
960                                     struct kvm_mmu_page *sp)
961 {
962         int i;
963
964         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
965                 sp->spt[i] = shadow_trap_nonpresent_pte;
966 }
967
968 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
969                                struct kvm_mmu_page *sp)
970 {
971         return 1;
972 }
973
974 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
975 {
976 }
977
978 #define KVM_PAGE_ARRAY_NR 16
979
980 struct kvm_mmu_pages {
981         struct mmu_page_and_offset {
982                 struct kvm_mmu_page *sp;
983                 unsigned int idx;
984         } page[KVM_PAGE_ARRAY_NR];
985         unsigned int nr;
986 };
987
988 #define for_each_unsync_children(bitmap, idx)           \
989         for (idx = find_first_bit(bitmap, 512);         \
990              idx < 512;                                 \
991              idx = find_next_bit(bitmap, 512, idx+1))
992
993 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
994                          int idx)
995 {
996         int i;
997
998         if (sp->unsync)
999                 for (i=0; i < pvec->nr; i++)
1000                         if (pvec->page[i].sp == sp)
1001                                 return 0;
1002
1003         pvec->page[pvec->nr].sp = sp;
1004         pvec->page[pvec->nr].idx = idx;
1005         pvec->nr++;
1006         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1007 }
1008
1009 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1010                            struct kvm_mmu_pages *pvec)
1011 {
1012         int i, ret, nr_unsync_leaf = 0;
1013
1014         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1015                 u64 ent = sp->spt[i];
1016
1017                 if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
1018                         struct kvm_mmu_page *child;
1019                         child = page_header(ent & PT64_BASE_ADDR_MASK);
1020
1021                         if (child->unsync_children) {
1022                                 if (mmu_pages_add(pvec, child, i))
1023                                         return -ENOSPC;
1024
1025                                 ret = __mmu_unsync_walk(child, pvec);
1026                                 if (!ret)
1027                                         __clear_bit(i, sp->unsync_child_bitmap);
1028                                 else if (ret > 0)
1029                                         nr_unsync_leaf += ret;
1030                                 else
1031                                         return ret;
1032                         }
1033
1034                         if (child->unsync) {
1035                                 nr_unsync_leaf++;
1036                                 if (mmu_pages_add(pvec, child, i))
1037                                         return -ENOSPC;
1038                         }
1039                 }
1040         }
1041
1042         if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
1043                 sp->unsync_children = 0;
1044
1045         return nr_unsync_leaf;
1046 }
1047
1048 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1049                            struct kvm_mmu_pages *pvec)
1050 {
1051         if (!sp->unsync_children)
1052                 return 0;
1053
1054         mmu_pages_add(pvec, sp, 0);
1055         return __mmu_unsync_walk(sp, pvec);
1056 }
1057
1058 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1059 {
1060         unsigned index;
1061         struct hlist_head *bucket;
1062         struct kvm_mmu_page *sp;
1063         struct hlist_node *node;
1064
1065         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1066         index = kvm_page_table_hashfn(gfn);
1067         bucket = &kvm->arch.mmu_page_hash[index];
1068         hlist_for_each_entry(sp, node, bucket, hash_link)
1069                 if (sp->gfn == gfn && !sp->role.direct
1070                     && !sp->role.invalid) {
1071                         pgprintk("%s: found role %x\n",
1072                                  __func__, sp->role.word);
1073                         return sp;
1074                 }
1075         return NULL;
1076 }
1077
1078 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1079 {
1080         WARN_ON(!sp->unsync);
1081         sp->unsync = 0;
1082         --kvm->stat.mmu_unsync;
1083 }
1084
1085 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1086
1087 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1088 {
1089         if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1090                 kvm_mmu_zap_page(vcpu->kvm, sp);
1091                 return 1;
1092         }
1093
1094         if (rmap_write_protect(vcpu->kvm, sp->gfn))
1095                 kvm_flush_remote_tlbs(vcpu->kvm);
1096         kvm_unlink_unsync_page(vcpu->kvm, sp);
1097         if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1098                 kvm_mmu_zap_page(vcpu->kvm, sp);
1099                 return 1;
1100         }
1101
1102         kvm_mmu_flush_tlb(vcpu);
1103         return 0;
1104 }
1105
1106 struct mmu_page_path {
1107         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1108         unsigned int idx[PT64_ROOT_LEVEL-1];
1109 };
1110
1111 #define for_each_sp(pvec, sp, parents, i)                       \
1112                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1113                         sp = pvec.page[i].sp;                   \
1114                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1115                         i = mmu_pages_next(&pvec, &parents, i))
1116
1117 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1118                           struct mmu_page_path *parents,
1119                           int i)
1120 {
1121         int n;
1122
1123         for (n = i+1; n < pvec->nr; n++) {
1124                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1125
1126                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1127                         parents->idx[0] = pvec->page[n].idx;
1128                         return n;
1129                 }
1130
1131                 parents->parent[sp->role.level-2] = sp;
1132                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1133         }
1134
1135         return n;
1136 }
1137
1138 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1139 {
1140         struct kvm_mmu_page *sp;
1141         unsigned int level = 0;
1142
1143         do {
1144                 unsigned int idx = parents->idx[level];
1145
1146                 sp = parents->parent[level];
1147                 if (!sp)
1148                         return;
1149
1150                 --sp->unsync_children;
1151                 WARN_ON((int)sp->unsync_children < 0);
1152                 __clear_bit(idx, sp->unsync_child_bitmap);
1153                 level++;
1154         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1155 }
1156
1157 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1158                                struct mmu_page_path *parents,
1159                                struct kvm_mmu_pages *pvec)
1160 {
1161         parents->parent[parent->role.level-1] = NULL;
1162         pvec->nr = 0;
1163 }
1164
1165 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1166                               struct kvm_mmu_page *parent)
1167 {
1168         int i;
1169         struct kvm_mmu_page *sp;
1170         struct mmu_page_path parents;
1171         struct kvm_mmu_pages pages;
1172
1173         kvm_mmu_pages_init(parent, &parents, &pages);
1174         while (mmu_unsync_walk(parent, &pages)) {
1175                 int protected = 0;
1176
1177                 for_each_sp(pages, sp, parents, i)
1178                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1179
1180                 if (protected)
1181                         kvm_flush_remote_tlbs(vcpu->kvm);
1182
1183                 for_each_sp(pages, sp, parents, i) {
1184                         kvm_sync_page(vcpu, sp);
1185                         mmu_pages_clear_parents(&parents);
1186                 }
1187                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1188                 kvm_mmu_pages_init(parent, &parents, &pages);
1189         }
1190 }
1191
1192 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1193                                              gfn_t gfn,
1194                                              gva_t gaddr,
1195                                              unsigned level,
1196                                              int direct,
1197                                              unsigned access,
1198                                              u64 *parent_pte)
1199 {
1200         union kvm_mmu_page_role role;
1201         unsigned index;
1202         unsigned quadrant;
1203         struct hlist_head *bucket;
1204         struct kvm_mmu_page *sp;
1205         struct hlist_node *node, *tmp;
1206
1207         role = vcpu->arch.mmu.base_role;
1208         role.level = level;
1209         role.direct = direct;
1210         role.access = access;
1211         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1212                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1213                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1214                 role.quadrant = quadrant;
1215         }
1216         pgprintk("%s: looking gfn %lx role %x\n", __func__,
1217                  gfn, role.word);
1218         index = kvm_page_table_hashfn(gfn);
1219         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1220         hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1221                 if (sp->gfn == gfn) {
1222                         if (sp->unsync)
1223                                 if (kvm_sync_page(vcpu, sp))
1224                                         continue;
1225
1226                         if (sp->role.word != role.word)
1227                                 continue;
1228
1229                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1230                         if (sp->unsync_children) {
1231                                 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1232                                 kvm_mmu_mark_parents_unsync(vcpu, sp);
1233                         }
1234                         pgprintk("%s: found\n", __func__);
1235                         return sp;
1236                 }
1237         ++vcpu->kvm->stat.mmu_cache_miss;
1238         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1239         if (!sp)
1240                 return sp;
1241         pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1242         sp->gfn = gfn;
1243         sp->role = role;
1244         hlist_add_head(&sp->hash_link, bucket);
1245         if (!direct) {
1246                 if (rmap_write_protect(vcpu->kvm, gfn))
1247                         kvm_flush_remote_tlbs(vcpu->kvm);
1248                 account_shadowed(vcpu->kvm, gfn);
1249         }
1250         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1251                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1252         else
1253                 nonpaging_prefetch_page(vcpu, sp);
1254         return sp;
1255 }
1256
1257 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1258                              struct kvm_vcpu *vcpu, u64 addr)
1259 {
1260         iterator->addr = addr;
1261         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1262         iterator->level = vcpu->arch.mmu.shadow_root_level;
1263         if (iterator->level == PT32E_ROOT_LEVEL) {
1264                 iterator->shadow_addr
1265                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1266                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1267                 --iterator->level;
1268                 if (!iterator->shadow_addr)
1269                         iterator->level = 0;
1270         }
1271 }
1272
1273 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1274 {
1275         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1276                 return false;
1277         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1278         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1279         return true;
1280 }
1281
1282 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1283 {
1284         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1285         --iterator->level;
1286 }
1287
1288 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1289                                          struct kvm_mmu_page *sp)
1290 {
1291         unsigned i;
1292         u64 *pt;
1293         u64 ent;
1294
1295         pt = sp->spt;
1296
1297         if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1298                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1299                         if (is_shadow_present_pte(pt[i]))
1300                                 rmap_remove(kvm, &pt[i]);
1301                         pt[i] = shadow_trap_nonpresent_pte;
1302                 }
1303                 return;
1304         }
1305
1306         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1307                 ent = pt[i];
1308
1309                 if (is_shadow_present_pte(ent)) {
1310                         if (!is_large_pte(ent)) {
1311                                 ent &= PT64_BASE_ADDR_MASK;
1312                                 mmu_page_remove_parent_pte(page_header(ent),
1313                                                            &pt[i]);
1314                         } else {
1315                                 --kvm->stat.lpages;
1316                                 rmap_remove(kvm, &pt[i]);
1317                         }
1318                 }
1319                 pt[i] = shadow_trap_nonpresent_pte;
1320         }
1321 }
1322
1323 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1324 {
1325         mmu_page_remove_parent_pte(sp, parent_pte);
1326 }
1327
1328 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1329 {
1330         int i;
1331
1332         for (i = 0; i < KVM_MAX_VCPUS; ++i)
1333                 if (kvm->vcpus[i])
1334                         kvm->vcpus[i]->arch.last_pte_updated = NULL;
1335 }
1336
1337 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1338 {
1339         u64 *parent_pte;
1340
1341         while (sp->multimapped || sp->parent_pte) {
1342                 if (!sp->multimapped)
1343                         parent_pte = sp->parent_pte;
1344                 else {
1345                         struct kvm_pte_chain *chain;
1346
1347                         chain = container_of(sp->parent_ptes.first,
1348                                              struct kvm_pte_chain, link);
1349                         parent_pte = chain->parent_ptes[0];
1350                 }
1351                 BUG_ON(!parent_pte);
1352                 kvm_mmu_put_page(sp, parent_pte);
1353                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
1354         }
1355 }
1356
1357 static int mmu_zap_unsync_children(struct kvm *kvm,
1358                                    struct kvm_mmu_page *parent)
1359 {
1360         int i, zapped = 0;
1361         struct mmu_page_path parents;
1362         struct kvm_mmu_pages pages;
1363
1364         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1365                 return 0;
1366
1367         kvm_mmu_pages_init(parent, &parents, &pages);
1368         while (mmu_unsync_walk(parent, &pages)) {
1369                 struct kvm_mmu_page *sp;
1370
1371                 for_each_sp(pages, sp, parents, i) {
1372                         kvm_mmu_zap_page(kvm, sp);
1373                         mmu_pages_clear_parents(&parents);
1374                 }
1375                 zapped += pages.nr;
1376                 kvm_mmu_pages_init(parent, &parents, &pages);
1377         }
1378
1379         return zapped;
1380 }
1381
1382 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1383 {
1384         int ret;
1385         ++kvm->stat.mmu_shadow_zapped;
1386         ret = mmu_zap_unsync_children(kvm, sp);
1387         kvm_mmu_page_unlink_children(kvm, sp);
1388         kvm_mmu_unlink_parents(kvm, sp);
1389         kvm_flush_remote_tlbs(kvm);
1390         if (!sp->role.invalid && !sp->role.direct)
1391                 unaccount_shadowed(kvm, sp->gfn);
1392         if (sp->unsync)
1393                 kvm_unlink_unsync_page(kvm, sp);
1394         if (!sp->root_count) {
1395                 hlist_del(&sp->hash_link);
1396                 kvm_mmu_free_page(kvm, sp);
1397         } else {
1398                 sp->role.invalid = 1;
1399                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1400                 kvm_reload_remote_mmus(kvm);
1401         }
1402         kvm_mmu_reset_last_pte_updated(kvm);
1403         return ret;
1404 }
1405
1406 /*
1407  * Changing the number of mmu pages allocated to the vm
1408  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1409  */
1410 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1411 {
1412         /*
1413          * If we set the number of mmu pages to be smaller be than the
1414          * number of actived pages , we must to free some mmu pages before we
1415          * change the value
1416          */
1417
1418         if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
1419             kvm_nr_mmu_pages) {
1420                 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1421                                        - kvm->arch.n_free_mmu_pages;
1422
1423                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1424                         struct kvm_mmu_page *page;
1425
1426                         page = container_of(kvm->arch.active_mmu_pages.prev,
1427                                             struct kvm_mmu_page, link);
1428                         kvm_mmu_zap_page(kvm, page);
1429                         n_used_mmu_pages--;
1430                 }
1431                 kvm->arch.n_free_mmu_pages = 0;
1432         }
1433         else
1434                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1435                                          - kvm->arch.n_alloc_mmu_pages;
1436
1437         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1438 }
1439
1440 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1441 {
1442         unsigned index;
1443         struct hlist_head *bucket;
1444         struct kvm_mmu_page *sp;
1445         struct hlist_node *node, *n;
1446         int r;
1447
1448         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1449         r = 0;
1450         index = kvm_page_table_hashfn(gfn);
1451         bucket = &kvm->arch.mmu_page_hash[index];
1452         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1453                 if (sp->gfn == gfn && !sp->role.direct) {
1454                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1455                                  sp->role.word);
1456                         r = 1;
1457                         if (kvm_mmu_zap_page(kvm, sp))
1458                                 n = bucket->first;
1459                 }
1460         return r;
1461 }
1462
1463 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1464 {
1465         unsigned index;
1466         struct hlist_head *bucket;
1467         struct kvm_mmu_page *sp;
1468         struct hlist_node *node, *nn;
1469
1470         index = kvm_page_table_hashfn(gfn);
1471         bucket = &kvm->arch.mmu_page_hash[index];
1472         hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1473                 if (sp->gfn == gfn && !sp->role.direct
1474                     && !sp->role.invalid) {
1475                         pgprintk("%s: zap %lx %x\n",
1476                                  __func__, gfn, sp->role.word);
1477                         kvm_mmu_zap_page(kvm, sp);
1478                 }
1479         }
1480 }
1481
1482 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1483 {
1484         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1485         struct kvm_mmu_page *sp = page_header(__pa(pte));
1486
1487         __set_bit(slot, sp->slot_bitmap);
1488 }
1489
1490 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1491 {
1492         int i;
1493         u64 *pt = sp->spt;
1494
1495         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1496                 return;
1497
1498         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1499                 if (pt[i] == shadow_notrap_nonpresent_pte)
1500                         set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte);
1501         }
1502 }
1503
1504 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1505 {
1506         struct page *page;
1507
1508         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1509
1510         if (gpa == UNMAPPED_GVA)
1511                 return NULL;
1512
1513         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1514
1515         return page;
1516 }
1517
1518 /*
1519  * The function is based on mtrr_type_lookup() in
1520  * arch/x86/kernel/cpu/mtrr/generic.c
1521  */
1522 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1523                          u64 start, u64 end)
1524 {
1525         int i;
1526         u64 base, mask;
1527         u8 prev_match, curr_match;
1528         int num_var_ranges = KVM_NR_VAR_MTRR;
1529
1530         if (!mtrr_state->enabled)
1531                 return 0xFF;
1532
1533         /* Make end inclusive end, instead of exclusive */
1534         end--;
1535
1536         /* Look in fixed ranges. Just return the type as per start */
1537         if (mtrr_state->have_fixed && (start < 0x100000)) {
1538                 int idx;
1539
1540                 if (start < 0x80000) {
1541                         idx = 0;
1542                         idx += (start >> 16);
1543                         return mtrr_state->fixed_ranges[idx];
1544                 } else if (start < 0xC0000) {
1545                         idx = 1 * 8;
1546                         idx += ((start - 0x80000) >> 14);
1547                         return mtrr_state->fixed_ranges[idx];
1548                 } else if (start < 0x1000000) {
1549                         idx = 3 * 8;
1550                         idx += ((start - 0xC0000) >> 12);
1551                         return mtrr_state->fixed_ranges[idx];
1552                 }
1553         }
1554
1555         /*
1556          * Look in variable ranges
1557          * Look of multiple ranges matching this address and pick type
1558          * as per MTRR precedence
1559          */
1560         if (!(mtrr_state->enabled & 2))
1561                 return mtrr_state->def_type;
1562
1563         prev_match = 0xFF;
1564         for (i = 0; i < num_var_ranges; ++i) {
1565                 unsigned short start_state, end_state;
1566
1567                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1568                         continue;
1569
1570                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1571                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1572                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1573                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1574
1575                 start_state = ((start & mask) == (base & mask));
1576                 end_state = ((end & mask) == (base & mask));
1577                 if (start_state != end_state)
1578                         return 0xFE;
1579
1580                 if ((start & mask) != (base & mask))
1581                         continue;
1582
1583                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1584                 if (prev_match == 0xFF) {
1585                         prev_match = curr_match;
1586                         continue;
1587                 }
1588
1589                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1590                     curr_match == MTRR_TYPE_UNCACHABLE)
1591                         return MTRR_TYPE_UNCACHABLE;
1592
1593                 if ((prev_match == MTRR_TYPE_WRBACK &&
1594                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1595                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1596                      curr_match == MTRR_TYPE_WRBACK)) {
1597                         prev_match = MTRR_TYPE_WRTHROUGH;
1598                         curr_match = MTRR_TYPE_WRTHROUGH;
1599                 }
1600
1601                 if (prev_match != curr_match)
1602                         return MTRR_TYPE_UNCACHABLE;
1603         }
1604
1605         if (prev_match != 0xFF)
1606                 return prev_match;
1607
1608         return mtrr_state->def_type;
1609 }
1610
1611 static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1612 {
1613         u8 mtrr;
1614
1615         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1616                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1617         if (mtrr == 0xfe || mtrr == 0xff)
1618                 mtrr = MTRR_TYPE_WRBACK;
1619         return mtrr;
1620 }
1621
1622 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1623 {
1624         unsigned index;
1625         struct hlist_head *bucket;
1626         struct kvm_mmu_page *s;
1627         struct hlist_node *node, *n;
1628
1629         index = kvm_page_table_hashfn(sp->gfn);
1630         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1631         /* don't unsync if pagetable is shadowed with multiple roles */
1632         hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1633                 if (s->gfn != sp->gfn || s->role.direct)
1634                         continue;
1635                 if (s->role.word != sp->role.word)
1636                         return 1;
1637         }
1638         ++vcpu->kvm->stat.mmu_unsync;
1639         sp->unsync = 1;
1640
1641         kvm_mmu_mark_parents_unsync(vcpu, sp);
1642
1643         mmu_convert_notrap(sp);
1644         return 0;
1645 }
1646
1647 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1648                                   bool can_unsync)
1649 {
1650         struct kvm_mmu_page *shadow;
1651
1652         shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1653         if (shadow) {
1654                 if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1655                         return 1;
1656                 if (shadow->unsync)
1657                         return 0;
1658                 if (can_unsync && oos_shadow)
1659                         return kvm_unsync_page(vcpu, shadow);
1660                 return 1;
1661         }
1662         return 0;
1663 }
1664
1665 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1666                     unsigned pte_access, int user_fault,
1667                     int write_fault, int dirty, int largepage,
1668                     gfn_t gfn, pfn_t pfn, bool speculative,
1669                     bool can_unsync)
1670 {
1671         u64 spte;
1672         int ret = 0;
1673         u64 mt_mask = shadow_mt_mask;
1674
1675         /*
1676          * We don't set the accessed bit, since we sometimes want to see
1677          * whether the guest actually used the pte (in order to detect
1678          * demand paging).
1679          */
1680         spte = shadow_base_present_pte | shadow_dirty_mask;
1681         if (!speculative)
1682                 spte |= shadow_accessed_mask;
1683         if (!dirty)
1684                 pte_access &= ~ACC_WRITE_MASK;
1685         if (pte_access & ACC_EXEC_MASK)
1686                 spte |= shadow_x_mask;
1687         else
1688                 spte |= shadow_nx_mask;
1689         if (pte_access & ACC_USER_MASK)
1690                 spte |= shadow_user_mask;
1691         if (largepage)
1692                 spte |= PT_PAGE_SIZE_MASK;
1693         if (mt_mask) {
1694                 if (!kvm_is_mmio_pfn(pfn)) {
1695                         mt_mask = get_memory_type(vcpu, gfn) <<
1696                                 kvm_x86_ops->get_mt_mask_shift();
1697                         mt_mask |= VMX_EPT_IGMT_BIT;
1698                 } else
1699                         mt_mask = MTRR_TYPE_UNCACHABLE <<
1700                                 kvm_x86_ops->get_mt_mask_shift();
1701                 spte |= mt_mask;
1702         }
1703
1704         spte |= (u64)pfn << PAGE_SHIFT;
1705
1706         if ((pte_access & ACC_WRITE_MASK)
1707             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1708
1709                 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1710                         ret = 1;
1711                         spte = shadow_trap_nonpresent_pte;
1712                         goto set_pte;
1713                 }
1714
1715                 spte |= PT_WRITABLE_MASK;
1716
1717                 /*
1718                  * Optimization: for pte sync, if spte was writable the hash
1719                  * lookup is unnecessary (and expensive). Write protection
1720                  * is responsibility of mmu_get_page / kvm_sync_page.
1721                  * Same reasoning can be applied to dirty page accounting.
1722                  */
1723                 if (!can_unsync && is_writeble_pte(*shadow_pte))
1724                         goto set_pte;
1725
1726                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1727                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1728                                  __func__, gfn);
1729                         ret = 1;
1730                         pte_access &= ~ACC_WRITE_MASK;
1731                         if (is_writeble_pte(spte))
1732                                 spte &= ~PT_WRITABLE_MASK;
1733                 }
1734         }
1735
1736         if (pte_access & ACC_WRITE_MASK)
1737                 mark_page_dirty(vcpu->kvm, gfn);
1738
1739 set_pte:
1740         set_shadow_pte(shadow_pte, spte);
1741         return ret;
1742 }
1743
1744 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1745                          unsigned pt_access, unsigned pte_access,
1746                          int user_fault, int write_fault, int dirty,
1747                          int *ptwrite, int largepage, gfn_t gfn,
1748                          pfn_t pfn, bool speculative)
1749 {
1750         int was_rmapped = 0;
1751         int was_writeble = is_writeble_pte(*shadow_pte);
1752
1753         pgprintk("%s: spte %llx access %x write_fault %d"
1754                  " user_fault %d gfn %lx\n",
1755                  __func__, *shadow_pte, pt_access,
1756                  write_fault, user_fault, gfn);
1757
1758         if (is_rmap_pte(*shadow_pte)) {
1759                 /*
1760                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1761                  * the parent of the now unreachable PTE.
1762                  */
1763                 if (largepage && !is_large_pte(*shadow_pte)) {
1764                         struct kvm_mmu_page *child;
1765                         u64 pte = *shadow_pte;
1766
1767                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1768                         mmu_page_remove_parent_pte(child, shadow_pte);
1769                 } else if (pfn != spte_to_pfn(*shadow_pte)) {
1770                         pgprintk("hfn old %lx new %lx\n",
1771                                  spte_to_pfn(*shadow_pte), pfn);
1772                         rmap_remove(vcpu->kvm, shadow_pte);
1773                 } else
1774                         was_rmapped = 1;
1775         }
1776         if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1777                       dirty, largepage, gfn, pfn, speculative, true)) {
1778                 if (write_fault)
1779                         *ptwrite = 1;
1780                 kvm_x86_ops->tlb_flush(vcpu);
1781         }
1782
1783         pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte);
1784         pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1785                  is_large_pte(*shadow_pte)? "2MB" : "4kB",
1786                  is_present_pte(*shadow_pte)?"RW":"R", gfn,
1787                  *shadow_pte, shadow_pte);
1788         if (!was_rmapped && is_large_pte(*shadow_pte))
1789                 ++vcpu->kvm->stat.lpages;
1790
1791         page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1792         if (!was_rmapped) {
1793                 rmap_add(vcpu, shadow_pte, gfn, largepage);
1794                 if (!is_rmap_pte(*shadow_pte))
1795                         kvm_release_pfn_clean(pfn);
1796         } else {
1797                 if (was_writeble)
1798                         kvm_release_pfn_dirty(pfn);
1799                 else
1800                         kvm_release_pfn_clean(pfn);
1801         }
1802         if (speculative) {
1803                 vcpu->arch.last_pte_updated = shadow_pte;
1804                 vcpu->arch.last_pte_gfn = gfn;
1805         }
1806 }
1807
1808 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1809 {
1810 }
1811
1812 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1813                         int largepage, gfn_t gfn, pfn_t pfn)
1814 {
1815         struct kvm_shadow_walk_iterator iterator;
1816         struct kvm_mmu_page *sp;
1817         int pt_write = 0;
1818         gfn_t pseudo_gfn;
1819
1820         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1821                 if (iterator.level == PT_PAGE_TABLE_LEVEL
1822                     || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1823                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1824                                      0, write, 1, &pt_write,
1825                                      largepage, gfn, pfn, false);
1826                         ++vcpu->stat.pf_fixed;
1827                         break;
1828                 }
1829
1830                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1831                         pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1832                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1833                                               iterator.level - 1,
1834                                               1, ACC_ALL, iterator.sptep);
1835                         if (!sp) {
1836                                 pgprintk("nonpaging_map: ENOMEM\n");
1837                                 kvm_release_pfn_clean(pfn);
1838                                 return -ENOMEM;
1839                         }
1840
1841                         set_shadow_pte(iterator.sptep,
1842                                        __pa(sp->spt)
1843                                        | PT_PRESENT_MASK | PT_WRITABLE_MASK
1844                                        | shadow_user_mask | shadow_x_mask);
1845                 }
1846         }
1847         return pt_write;
1848 }
1849
1850 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1851 {
1852         int r;
1853         int largepage = 0;
1854         pfn_t pfn;
1855         unsigned long mmu_seq;
1856
1857         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1858                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1859                 largepage = 1;
1860         }
1861
1862         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1863         smp_rmb();
1864         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1865
1866         /* mmio */
1867         if (is_error_pfn(pfn)) {
1868                 kvm_release_pfn_clean(pfn);
1869                 return 1;
1870         }
1871
1872         spin_lock(&vcpu->kvm->mmu_lock);
1873         if (mmu_notifier_retry(vcpu, mmu_seq))
1874                 goto out_unlock;
1875         kvm_mmu_free_some_pages(vcpu);
1876         r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
1877         spin_unlock(&vcpu->kvm->mmu_lock);
1878
1879
1880         return r;
1881
1882 out_unlock:
1883         spin_unlock(&vcpu->kvm->mmu_lock);
1884         kvm_release_pfn_clean(pfn);
1885         return 0;
1886 }
1887
1888
1889 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1890 {
1891         int i;
1892         struct kvm_mmu_page *sp;
1893
1894         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1895                 return;
1896         spin_lock(&vcpu->kvm->mmu_lock);
1897         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1898                 hpa_t root = vcpu->arch.mmu.root_hpa;
1899
1900                 sp = page_header(root);
1901                 --sp->root_count;
1902                 if (!sp->root_count && sp->role.invalid)
1903                         kvm_mmu_zap_page(vcpu->kvm, sp);
1904                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1905                 spin_unlock(&vcpu->kvm->mmu_lock);
1906                 return;
1907         }
1908         for (i = 0; i < 4; ++i) {
1909                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1910
1911                 if (root) {
1912                         root &= PT64_BASE_ADDR_MASK;
1913                         sp = page_header(root);
1914                         --sp->root_count;
1915                         if (!sp->root_count && sp->role.invalid)
1916                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1917                 }
1918                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1919         }
1920         spin_unlock(&vcpu->kvm->mmu_lock);
1921         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1922 }
1923
1924 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1925 {
1926         int i;
1927         gfn_t root_gfn;
1928         struct kvm_mmu_page *sp;
1929         int direct = 0;
1930
1931         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1932
1933         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1934                 hpa_t root = vcpu->arch.mmu.root_hpa;
1935
1936                 ASSERT(!VALID_PAGE(root));
1937                 if (tdp_enabled)
1938                         direct = 1;
1939                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1940                                       PT64_ROOT_LEVEL, direct,
1941                                       ACC_ALL, NULL);
1942                 root = __pa(sp->spt);
1943                 ++sp->root_count;
1944                 vcpu->arch.mmu.root_hpa = root;
1945                 return;
1946         }
1947         direct = !is_paging(vcpu);
1948         if (tdp_enabled)
1949                 direct = 1;
1950         for (i = 0; i < 4; ++i) {
1951                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1952
1953                 ASSERT(!VALID_PAGE(root));
1954                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1955                         if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1956                                 vcpu->arch.mmu.pae_root[i] = 0;
1957                                 continue;
1958                         }
1959                         root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1960                 } else if (vcpu->arch.mmu.root_level == 0)
1961                         root_gfn = 0;
1962                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1963                                       PT32_ROOT_LEVEL, direct,
1964                                       ACC_ALL, NULL);
1965                 root = __pa(sp->spt);
1966                 ++sp->root_count;
1967                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1968         }
1969         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1970 }
1971
1972 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
1973 {
1974         int i;
1975         struct kvm_mmu_page *sp;
1976
1977         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1978                 return;
1979         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1980                 hpa_t root = vcpu->arch.mmu.root_hpa;
1981                 sp = page_header(root);
1982                 mmu_sync_children(vcpu, sp);
1983                 return;
1984         }
1985         for (i = 0; i < 4; ++i) {
1986                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1987
1988                 if (root) {
1989                         root &= PT64_BASE_ADDR_MASK;
1990                         sp = page_header(root);
1991                         mmu_sync_children(vcpu, sp);
1992                 }
1993         }
1994 }
1995
1996 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
1997 {
1998         spin_lock(&vcpu->kvm->mmu_lock);
1999         mmu_sync_roots(vcpu);
2000         spin_unlock(&vcpu->kvm->mmu_lock);
2001 }
2002
2003 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2004 {
2005         return vaddr;
2006 }
2007
2008 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2009                                 u32 error_code)
2010 {
2011         gfn_t gfn;
2012         int r;
2013
2014         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2015         r = mmu_topup_memory_caches(vcpu);
2016         if (r)
2017                 return r;
2018
2019         ASSERT(vcpu);
2020         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2021
2022         gfn = gva >> PAGE_SHIFT;
2023
2024         return nonpaging_map(vcpu, gva & PAGE_MASK,
2025                              error_code & PFERR_WRITE_MASK, gfn);
2026 }
2027
2028 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2029                                 u32 error_code)
2030 {
2031         pfn_t pfn;
2032         int r;
2033         int largepage = 0;
2034         gfn_t gfn = gpa >> PAGE_SHIFT;
2035         unsigned long mmu_seq;
2036
2037         ASSERT(vcpu);
2038         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2039
2040         r = mmu_topup_memory_caches(vcpu);
2041         if (r)
2042                 return r;
2043
2044         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
2045                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2046                 largepage = 1;
2047         }
2048         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2049         smp_rmb();
2050         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2051         if (is_error_pfn(pfn)) {
2052                 kvm_release_pfn_clean(pfn);
2053                 return 1;
2054         }
2055         spin_lock(&vcpu->kvm->mmu_lock);
2056         if (mmu_notifier_retry(vcpu, mmu_seq))
2057                 goto out_unlock;
2058         kvm_mmu_free_some_pages(vcpu);
2059         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2060                          largepage, gfn, pfn);
2061         spin_unlock(&vcpu->kvm->mmu_lock);
2062
2063         return r;
2064
2065 out_unlock:
2066         spin_unlock(&vcpu->kvm->mmu_lock);
2067         kvm_release_pfn_clean(pfn);
2068         return 0;
2069 }
2070
2071 static void nonpaging_free(struct kvm_vcpu *vcpu)
2072 {
2073         mmu_free_roots(vcpu);
2074 }
2075
2076 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2077 {
2078         struct kvm_mmu *context = &vcpu->arch.mmu;
2079
2080         context->new_cr3 = nonpaging_new_cr3;
2081         context->page_fault = nonpaging_page_fault;
2082         context->gva_to_gpa = nonpaging_gva_to_gpa;
2083         context->free = nonpaging_free;
2084         context->prefetch_page = nonpaging_prefetch_page;
2085         context->sync_page = nonpaging_sync_page;
2086         context->invlpg = nonpaging_invlpg;
2087         context->root_level = 0;
2088         context->shadow_root_level = PT32E_ROOT_LEVEL;
2089         context->root_hpa = INVALID_PAGE;
2090         return 0;
2091 }
2092
2093 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2094 {
2095         ++vcpu->stat.tlb_flush;
2096         kvm_x86_ops->tlb_flush(vcpu);
2097 }
2098
2099 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2100 {
2101         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2102         mmu_free_roots(vcpu);
2103 }
2104
2105 static void inject_page_fault(struct kvm_vcpu *vcpu,
2106                               u64 addr,
2107                               u32 err_code)
2108 {
2109         kvm_inject_page_fault(vcpu, addr, err_code);
2110 }
2111
2112 static void paging_free(struct kvm_vcpu *vcpu)
2113 {
2114         nonpaging_free(vcpu);
2115 }
2116
2117 static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2118 {
2119         int bit7;
2120
2121         bit7 = (gpte >> 7) & 1;
2122         return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
2123 }
2124
2125 #define PTTYPE 64
2126 #include "paging_tmpl.h"
2127 #undef PTTYPE
2128
2129 #define PTTYPE 32
2130 #include "paging_tmpl.h"
2131 #undef PTTYPE
2132
2133 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2134 {
2135         struct kvm_mmu *context = &vcpu->arch.mmu;
2136         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2137         u64 exb_bit_rsvd = 0;
2138
2139         if (!is_nx(vcpu))
2140                 exb_bit_rsvd = rsvd_bits(63, 63);
2141         switch (level) {
2142         case PT32_ROOT_LEVEL:
2143                 /* no rsvd bits for 2 level 4K page table entries */
2144                 context->rsvd_bits_mask[0][1] = 0;
2145                 context->rsvd_bits_mask[0][0] = 0;
2146                 if (is_cpuid_PSE36())
2147                         /* 36bits PSE 4MB page */
2148                         context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2149                 else
2150                         /* 32 bits PSE 4MB page */
2151                         context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2152                 context->rsvd_bits_mask[1][0] = ~0ull;
2153                 break;
2154         case PT32E_ROOT_LEVEL:
2155                 context->rsvd_bits_mask[0][2] =
2156                         rsvd_bits(maxphyaddr, 63) |
2157                         rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2158                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2159                         rsvd_bits(maxphyaddr, 62);      /* PDE */
2160                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2161                         rsvd_bits(maxphyaddr, 62);      /* PTE */
2162                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2163                         rsvd_bits(maxphyaddr, 62) |
2164                         rsvd_bits(13, 20);              /* large page */
2165                 context->rsvd_bits_mask[1][0] = ~0ull;
2166                 break;
2167         case PT64_ROOT_LEVEL:
2168                 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2169                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2170                 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2171                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2172                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2173                         rsvd_bits(maxphyaddr, 51);
2174                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2175                         rsvd_bits(maxphyaddr, 51);
2176                 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2177                 context->rsvd_bits_mask[1][2] = context->rsvd_bits_mask[0][2];
2178                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2179                         rsvd_bits(maxphyaddr, 51) |
2180                         rsvd_bits(13, 20);              /* large page */
2181                 context->rsvd_bits_mask[1][0] = ~0ull;
2182                 break;
2183         }
2184 }
2185
2186 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2187 {
2188         struct kvm_mmu *context = &vcpu->arch.mmu;
2189
2190         ASSERT(is_pae(vcpu));
2191         context->new_cr3 = paging_new_cr3;
2192         context->page_fault = paging64_page_fault;
2193         context->gva_to_gpa = paging64_gva_to_gpa;
2194         context->prefetch_page = paging64_prefetch_page;
2195         context->sync_page = paging64_sync_page;
2196         context->invlpg = paging64_invlpg;
2197         context->free = paging_free;
2198         context->root_level = level;
2199         context->shadow_root_level = level;
2200         context->root_hpa = INVALID_PAGE;
2201         return 0;
2202 }
2203
2204 static int paging64_init_context(struct kvm_vcpu *vcpu)
2205 {
2206         reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2207         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2208 }
2209
2210 static int paging32_init_context(struct kvm_vcpu *vcpu)
2211 {
2212         struct kvm_mmu *context = &vcpu->arch.mmu;
2213
2214         reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2215         context->new_cr3 = paging_new_cr3;
2216         context->page_fault = paging32_page_fault;
2217         context->gva_to_gpa = paging32_gva_to_gpa;
2218         context->free = paging_free;
2219         context->prefetch_page = paging32_prefetch_page;
2220         context->sync_page = paging32_sync_page;
2221         context->invlpg = paging32_invlpg;
2222         context->root_level = PT32_ROOT_LEVEL;
2223         context->shadow_root_level = PT32E_ROOT_LEVEL;
2224         context->root_hpa = INVALID_PAGE;
2225         return 0;
2226 }
2227
2228 static int paging32E_init_context(struct kvm_vcpu *vcpu)
2229 {
2230         reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2231         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2232 }
2233
2234 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2235 {
2236         struct kvm_mmu *context = &vcpu->arch.mmu;
2237
2238         context->new_cr3 = nonpaging_new_cr3;
2239         context->page_fault = tdp_page_fault;
2240         context->free = nonpaging_free;
2241         context->prefetch_page = nonpaging_prefetch_page;
2242         context->sync_page = nonpaging_sync_page;
2243         context->invlpg = nonpaging_invlpg;
2244         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2245         context->root_hpa = INVALID_PAGE;
2246
2247         if (!is_paging(vcpu)) {
2248                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2249                 context->root_level = 0;
2250         } else if (is_long_mode(vcpu)) {
2251                 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2252                 context->gva_to_gpa = paging64_gva_to_gpa;
2253                 context->root_level = PT64_ROOT_LEVEL;
2254         } else if (is_pae(vcpu)) {
2255                 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2256                 context->gva_to_gpa = paging64_gva_to_gpa;
2257                 context->root_level = PT32E_ROOT_LEVEL;
2258         } else {
2259                 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2260                 context->gva_to_gpa = paging32_gva_to_gpa;
2261                 context->root_level = PT32_ROOT_LEVEL;
2262         }
2263
2264         return 0;
2265 }
2266
2267 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2268 {
2269         int r;
2270
2271         ASSERT(vcpu);
2272         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2273
2274         if (!is_paging(vcpu))
2275                 r = nonpaging_init_context(vcpu);
2276         else if (is_long_mode(vcpu))
2277                 r = paging64_init_context(vcpu);
2278         else if (is_pae(vcpu))
2279                 r = paging32E_init_context(vcpu);
2280         else
2281                 r = paging32_init_context(vcpu);
2282
2283         vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2284
2285         return r;
2286 }
2287
2288 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2289 {
2290         vcpu->arch.update_pte.pfn = bad_pfn;
2291
2292         if (tdp_enabled)
2293                 return init_kvm_tdp_mmu(vcpu);
2294         else
2295                 return init_kvm_softmmu(vcpu);
2296 }
2297
2298 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2299 {
2300         ASSERT(vcpu);
2301         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2302                 vcpu->arch.mmu.free(vcpu);
2303                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2304         }
2305 }
2306
2307 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2308 {
2309         destroy_kvm_mmu(vcpu);
2310         return init_kvm_mmu(vcpu);
2311 }
2312 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2313
2314 int kvm_mmu_load(struct kvm_vcpu *vcpu)
2315 {
2316         int r;
2317
2318         r = mmu_topup_memory_caches(vcpu);
2319         if (r)
2320                 goto out;
2321         spin_lock(&vcpu->kvm->mmu_lock);
2322         kvm_mmu_free_some_pages(vcpu);
2323         mmu_alloc_roots(vcpu);
2324         mmu_sync_roots(vcpu);
2325         spin_unlock(&vcpu->kvm->mmu_lock);
2326         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2327         kvm_mmu_flush_tlb(vcpu);
2328 out:
2329         return r;
2330 }
2331 EXPORT_SYMBOL_GPL(kvm_mmu_load);
2332
2333 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2334 {
2335         mmu_free_roots(vcpu);
2336 }
2337
2338 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2339                                   struct kvm_mmu_page *sp,
2340                                   u64 *spte)
2341 {
2342         u64 pte;
2343         struct kvm_mmu_page *child;
2344
2345         pte = *spte;
2346         if (is_shadow_present_pte(pte)) {
2347                 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
2348                     is_large_pte(pte))
2349                         rmap_remove(vcpu->kvm, spte);
2350                 else {
2351                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2352                         mmu_page_remove_parent_pte(child, spte);
2353                 }
2354         }
2355         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
2356         if (is_large_pte(pte))
2357                 --vcpu->kvm->stat.lpages;
2358 }
2359
2360 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2361                                   struct kvm_mmu_page *sp,
2362                                   u64 *spte,
2363                                   const void *new)
2364 {
2365         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2366                 if (!vcpu->arch.update_pte.largepage ||
2367                     sp->role.glevels == PT32_ROOT_LEVEL) {
2368                         ++vcpu->kvm->stat.mmu_pde_zapped;
2369                         return;
2370                 }
2371         }
2372
2373         ++vcpu->kvm->stat.mmu_pte_updated;
2374         if (sp->role.glevels == PT32_ROOT_LEVEL)
2375                 paging32_update_pte(vcpu, sp, spte, new);
2376         else
2377                 paging64_update_pte(vcpu, sp, spte, new);
2378 }
2379
2380 static bool need_remote_flush(u64 old, u64 new)
2381 {
2382         if (!is_shadow_present_pte(old))
2383                 return false;
2384         if (!is_shadow_present_pte(new))
2385                 return true;
2386         if ((old ^ new) & PT64_BASE_ADDR_MASK)
2387                 return true;
2388         old ^= PT64_NX_MASK;
2389         new ^= PT64_NX_MASK;
2390         return (old & ~new & PT64_PERM_MASK) != 0;
2391 }
2392
2393 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2394 {
2395         if (need_remote_flush(old, new))
2396                 kvm_flush_remote_tlbs(vcpu->kvm);
2397         else
2398                 kvm_mmu_flush_tlb(vcpu);
2399 }
2400
2401 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2402 {
2403         u64 *spte = vcpu->arch.last_pte_updated;
2404
2405         return !!(spte && (*spte & shadow_accessed_mask));
2406 }
2407
2408 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2409                                           const u8 *new, int bytes)
2410 {
2411         gfn_t gfn;
2412         int r;
2413         u64 gpte = 0;
2414         pfn_t pfn;
2415
2416         vcpu->arch.update_pte.largepage = 0;
2417
2418         if (bytes != 4 && bytes != 8)
2419                 return;
2420
2421         /*
2422          * Assume that the pte write on a page table of the same type
2423          * as the current vcpu paging mode.  This is nearly always true
2424          * (might be false while changing modes).  Note it is verified later
2425          * by update_pte().
2426          */
2427         if (is_pae(vcpu)) {
2428                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2429                 if ((bytes == 4) && (gpa % 4 == 0)) {
2430                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2431                         if (r)
2432                                 return;
2433                         memcpy((void *)&gpte + (gpa % 8), new, 4);
2434                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
2435                         memcpy((void *)&gpte, new, 8);
2436                 }
2437         } else {
2438                 if ((bytes == 4) && (gpa % 4 == 0))
2439                         memcpy((void *)&gpte, new, 4);
2440         }
2441         if (!is_present_pte(gpte))
2442                 return;
2443         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2444
2445         if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2446                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2447                 vcpu->arch.update_pte.largepage = 1;
2448         }
2449         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2450         smp_rmb();
2451         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2452
2453         if (is_error_pfn(pfn)) {
2454                 kvm_release_pfn_clean(pfn);
2455                 return;
2456         }
2457         vcpu->arch.update_pte.gfn = gfn;
2458         vcpu->arch.update_pte.pfn = pfn;
2459 }
2460
2461 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2462 {
2463         u64 *spte = vcpu->arch.last_pte_updated;
2464
2465         if (spte
2466             && vcpu->arch.last_pte_gfn == gfn
2467             && shadow_accessed_mask
2468             && !(*spte & shadow_accessed_mask)
2469             && is_shadow_present_pte(*spte))
2470                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2471 }
2472
2473 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2474                        const u8 *new, int bytes,
2475                        bool guest_initiated)
2476 {
2477         gfn_t gfn = gpa >> PAGE_SHIFT;
2478         struct kvm_mmu_page *sp;
2479         struct hlist_node *node, *n;
2480         struct hlist_head *bucket;
2481         unsigned index;
2482         u64 entry, gentry;
2483         u64 *spte;
2484         unsigned offset = offset_in_page(gpa);
2485         unsigned pte_size;
2486         unsigned page_offset;
2487         unsigned misaligned;
2488         unsigned quadrant;
2489         int level;
2490         int flooded = 0;
2491         int npte;
2492         int r;
2493
2494         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2495         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2496         spin_lock(&vcpu->kvm->mmu_lock);
2497         kvm_mmu_access_page(vcpu, gfn);
2498         kvm_mmu_free_some_pages(vcpu);
2499         ++vcpu->kvm->stat.mmu_pte_write;
2500         kvm_mmu_audit(vcpu, "pre pte write");
2501         if (guest_initiated) {
2502                 if (gfn == vcpu->arch.last_pt_write_gfn
2503                     && !last_updated_pte_accessed(vcpu)) {
2504                         ++vcpu->arch.last_pt_write_count;
2505                         if (vcpu->arch.last_pt_write_count >= 3)
2506                                 flooded = 1;
2507                 } else {
2508                         vcpu->arch.last_pt_write_gfn = gfn;
2509                         vcpu->arch.last_pt_write_count = 1;
2510                         vcpu->arch.last_pte_updated = NULL;
2511                 }
2512         }
2513         index = kvm_page_table_hashfn(gfn);
2514         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2515         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2516                 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2517                         continue;
2518                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2519                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2520                 misaligned |= bytes < 4;
2521                 if (misaligned || flooded) {
2522                         /*
2523                          * Misaligned accesses are too much trouble to fix
2524                          * up; also, they usually indicate a page is not used
2525                          * as a page table.
2526                          *
2527                          * If we're seeing too many writes to a page,
2528                          * it may no longer be a page table, or we may be
2529                          * forking, in which case it is better to unmap the
2530                          * page.
2531                          */
2532                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2533                                  gpa, bytes, sp->role.word);
2534                         if (kvm_mmu_zap_page(vcpu->kvm, sp))
2535                                 n = bucket->first;
2536                         ++vcpu->kvm->stat.mmu_flooded;
2537                         continue;
2538                 }
2539                 page_offset = offset;
2540                 level = sp->role.level;
2541                 npte = 1;
2542                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
2543                         page_offset <<= 1;      /* 32->64 */
2544                         /*
2545                          * A 32-bit pde maps 4MB while the shadow pdes map
2546                          * only 2MB.  So we need to double the offset again
2547                          * and zap two pdes instead of one.
2548                          */
2549                         if (level == PT32_ROOT_LEVEL) {
2550                                 page_offset &= ~7; /* kill rounding error */
2551                                 page_offset <<= 1;
2552                                 npte = 2;
2553                         }
2554                         quadrant = page_offset >> PAGE_SHIFT;
2555                         page_offset &= ~PAGE_MASK;
2556                         if (quadrant != sp->role.quadrant)
2557                                 continue;
2558                 }
2559                 spte = &sp->spt[page_offset / sizeof(*spte)];
2560                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2561                         gentry = 0;
2562                         r = kvm_read_guest_atomic(vcpu->kvm,
2563                                                   gpa & ~(u64)(pte_size - 1),
2564                                                   &gentry, pte_size);
2565                         new = (const void *)&gentry;
2566                         if (r < 0)
2567                                 new = NULL;
2568                 }
2569                 while (npte--) {
2570                         entry = *spte;
2571                         mmu_pte_write_zap_pte(vcpu, sp, spte);
2572                         if (new)
2573                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
2574                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2575                         ++spte;
2576                 }
2577         }
2578         kvm_mmu_audit(vcpu, "post pte write");
2579         spin_unlock(&vcpu->kvm->mmu_lock);
2580         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2581                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2582                 vcpu->arch.update_pte.pfn = bad_pfn;
2583         }
2584 }
2585
2586 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2587 {
2588         gpa_t gpa;
2589         int r;
2590
2591         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2592
2593         spin_lock(&vcpu->kvm->mmu_lock);
2594         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2595         spin_unlock(&vcpu->kvm->mmu_lock);
2596         return r;
2597 }
2598 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2599
2600 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2601 {
2602         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
2603                 struct kvm_mmu_page *sp;
2604
2605                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2606                                   struct kvm_mmu_page, link);
2607                 kvm_mmu_zap_page(vcpu->kvm, sp);
2608                 ++vcpu->kvm->stat.mmu_recycled;
2609         }
2610 }
2611
2612 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2613 {
2614         int r;
2615         enum emulation_result er;
2616
2617         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2618         if (r < 0)
2619                 goto out;
2620
2621         if (!r) {
2622                 r = 1;
2623                 goto out;
2624         }
2625
2626         r = mmu_topup_memory_caches(vcpu);
2627         if (r)
2628                 goto out;
2629
2630         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2631
2632         switch (er) {
2633         case EMULATE_DONE:
2634                 return 1;
2635         case EMULATE_DO_MMIO:
2636                 ++vcpu->stat.mmio_exits;
2637                 return 0;
2638         case EMULATE_FAIL:
2639                 kvm_report_emulation_failure(vcpu, "pagetable");
2640                 return 1;
2641         default:
2642                 BUG();
2643         }
2644 out:
2645         return r;
2646 }
2647 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2648
2649 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2650 {
2651         vcpu->arch.mmu.invlpg(vcpu, gva);
2652         kvm_mmu_flush_tlb(vcpu);
2653         ++vcpu->stat.invlpg;
2654 }
2655 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2656
2657 void kvm_enable_tdp(void)
2658 {
2659         tdp_enabled = true;
2660 }
2661 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2662
2663 void kvm_disable_tdp(void)
2664 {
2665         tdp_enabled = false;
2666 }
2667 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2668
2669 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2670 {
2671         free_page((unsigned long)vcpu->arch.mmu.pae_root);
2672 }
2673
2674 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2675 {
2676         struct page *page;
2677         int i;
2678
2679         ASSERT(vcpu);
2680
2681         if (vcpu->kvm->arch.n_requested_mmu_pages)
2682                 vcpu->kvm->arch.n_free_mmu_pages =
2683                                         vcpu->kvm->arch.n_requested_mmu_pages;
2684         else
2685                 vcpu->kvm->arch.n_free_mmu_pages =
2686                                         vcpu->kvm->arch.n_alloc_mmu_pages;
2687         /*
2688          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2689          * Therefore we need to allocate shadow page tables in the first
2690          * 4GB of memory, which happens to fit the DMA32 zone.
2691          */
2692         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2693         if (!page)
2694                 goto error_1;
2695         vcpu->arch.mmu.pae_root = page_address(page);
2696         for (i = 0; i < 4; ++i)
2697                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2698
2699         return 0;
2700
2701 error_1:
2702         free_mmu_pages(vcpu);
2703         return -ENOMEM;
2704 }
2705
2706 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2707 {
2708         ASSERT(vcpu);
2709         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2710
2711         return alloc_mmu_pages(vcpu);
2712 }
2713
2714 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2715 {
2716         ASSERT(vcpu);
2717         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2718
2719         return init_kvm_mmu(vcpu);
2720 }
2721
2722 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2723 {
2724         ASSERT(vcpu);
2725
2726         destroy_kvm_mmu(vcpu);
2727         free_mmu_pages(vcpu);
2728         mmu_free_memory_caches(vcpu);
2729 }
2730
2731 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2732 {
2733         struct kvm_mmu_page *sp;
2734
2735         spin_lock(&kvm->mmu_lock);
2736         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2737                 int i;
2738                 u64 *pt;
2739
2740                 if (!test_bit(slot, sp->slot_bitmap))
2741                         continue;
2742
2743                 pt = sp->spt;
2744                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2745                         /* avoid RMW */
2746                         if (pt[i] & PT_WRITABLE_MASK)
2747                                 pt[i] &= ~PT_WRITABLE_MASK;
2748         }
2749         kvm_flush_remote_tlbs(kvm);
2750         spin_unlock(&kvm->mmu_lock);
2751 }
2752
2753 void kvm_mmu_zap_all(struct kvm *kvm)
2754 {
2755         struct kvm_mmu_page *sp, *node;
2756
2757         spin_lock(&kvm->mmu_lock);
2758         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2759                 if (kvm_mmu_zap_page(kvm, sp))
2760                         node = container_of(kvm->arch.active_mmu_pages.next,
2761                                             struct kvm_mmu_page, link);
2762         spin_unlock(&kvm->mmu_lock);
2763
2764         kvm_flush_remote_tlbs(kvm);
2765 }
2766
2767 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2768 {
2769         struct kvm_mmu_page *page;
2770
2771         page = container_of(kvm->arch.active_mmu_pages.prev,
2772                             struct kvm_mmu_page, link);
2773         kvm_mmu_zap_page(kvm, page);
2774 }
2775
2776 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2777 {
2778         struct kvm *kvm;
2779         struct kvm *kvm_freed = NULL;
2780         int cache_count = 0;
2781
2782         spin_lock(&kvm_lock);
2783
2784         list_for_each_entry(kvm, &vm_list, vm_list) {
2785                 int npages;
2786
2787                 if (!down_read_trylock(&kvm->slots_lock))
2788                         continue;
2789                 spin_lock(&kvm->mmu_lock);
2790                 npages = kvm->arch.n_alloc_mmu_pages -
2791                          kvm->arch.n_free_mmu_pages;
2792                 cache_count += npages;
2793                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2794                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
2795                         cache_count--;
2796                         kvm_freed = kvm;
2797                 }
2798                 nr_to_scan--;
2799
2800                 spin_unlock(&kvm->mmu_lock);
2801                 up_read(&kvm->slots_lock);
2802         }
2803         if (kvm_freed)
2804                 list_move_tail(&kvm_freed->vm_list, &vm_list);
2805
2806         spin_unlock(&kvm_lock);
2807
2808         return cache_count;
2809 }
2810
2811 static struct shrinker mmu_shrinker = {
2812         .shrink = mmu_shrink,
2813         .seeks = DEFAULT_SEEKS * 10,
2814 };
2815
2816 static void mmu_destroy_caches(void)
2817 {
2818         if (pte_chain_cache)
2819                 kmem_cache_destroy(pte_chain_cache);
2820         if (rmap_desc_cache)
2821                 kmem_cache_destroy(rmap_desc_cache);
2822         if (mmu_page_header_cache)
2823                 kmem_cache_destroy(mmu_page_header_cache);
2824 }
2825
2826 void kvm_mmu_module_exit(void)
2827 {
2828         mmu_destroy_caches();
2829         unregister_shrinker(&mmu_shrinker);
2830 }
2831
2832 int kvm_mmu_module_init(void)
2833 {
2834         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2835                                             sizeof(struct kvm_pte_chain),
2836                                             0, 0, NULL);
2837         if (!pte_chain_cache)
2838                 goto nomem;
2839         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2840                                             sizeof(struct kvm_rmap_desc),
2841                                             0, 0, NULL);
2842         if (!rmap_desc_cache)
2843                 goto nomem;
2844
2845         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2846                                                   sizeof(struct kvm_mmu_page),
2847                                                   0, 0, NULL);
2848         if (!mmu_page_header_cache)
2849                 goto nomem;
2850
2851         register_shrinker(&mmu_shrinker);
2852
2853         return 0;
2854
2855 nomem:
2856         mmu_destroy_caches();
2857         return -ENOMEM;
2858 }
2859
2860 /*
2861  * Caculate mmu pages needed for kvm.
2862  */
2863 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2864 {
2865         int i;
2866         unsigned int nr_mmu_pages;
2867         unsigned int  nr_pages = 0;
2868
2869         for (i = 0; i < kvm->nmemslots; i++)
2870                 nr_pages += kvm->memslots[i].npages;
2871
2872         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2873         nr_mmu_pages = max(nr_mmu_pages,
2874                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2875
2876         return nr_mmu_pages;
2877 }
2878
2879 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2880                                 unsigned len)
2881 {
2882         if (len > buffer->len)
2883                 return NULL;
2884         return buffer->ptr;
2885 }
2886
2887 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2888                                 unsigned len)
2889 {
2890         void *ret;
2891
2892         ret = pv_mmu_peek_buffer(buffer, len);
2893         if (!ret)
2894                 return ret;
2895         buffer->ptr += len;
2896         buffer->len -= len;
2897         buffer->processed += len;
2898         return ret;
2899 }
2900
2901 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2902                              gpa_t addr, gpa_t value)
2903 {
2904         int bytes = 8;
2905         int r;
2906
2907         if (!is_long_mode(vcpu) && !is_pae(vcpu))
2908                 bytes = 4;
2909
2910         r = mmu_topup_memory_caches(vcpu);
2911         if (r)
2912                 return r;
2913
2914         if (!emulator_write_phys(vcpu, addr, &value, bytes))
2915                 return -EFAULT;
2916
2917         return 1;
2918 }
2919
2920 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2921 {
2922         kvm_set_cr3(vcpu, vcpu->arch.cr3);
2923         return 1;
2924 }
2925
2926 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2927 {
2928         spin_lock(&vcpu->kvm->mmu_lock);
2929         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2930         spin_unlock(&vcpu->kvm->mmu_lock);
2931         return 1;
2932 }
2933
2934 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2935                              struct kvm_pv_mmu_op_buffer *buffer)
2936 {
2937         struct kvm_mmu_op_header *header;
2938
2939         header = pv_mmu_peek_buffer(buffer, sizeof *header);
2940         if (!header)
2941                 return 0;
2942         switch (header->op) {
2943         case KVM_MMU_OP_WRITE_PTE: {
2944                 struct kvm_mmu_op_write_pte *wpte;
2945
2946                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2947                 if (!wpte)
2948                         return 0;
2949                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2950                                         wpte->pte_val);
2951         }
2952         case KVM_MMU_OP_FLUSH_TLB: {
2953                 struct kvm_mmu_op_flush_tlb *ftlb;
2954
2955                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2956                 if (!ftlb)
2957                         return 0;
2958                 return kvm_pv_mmu_flush_tlb(vcpu);
2959         }
2960         case KVM_MMU_OP_RELEASE_PT: {
2961                 struct kvm_mmu_op_release_pt *rpt;
2962
2963                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2964                 if (!rpt)
2965                         return 0;
2966                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2967         }
2968         default: return 0;
2969         }
2970 }
2971
2972 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2973                   gpa_t addr, unsigned long *ret)
2974 {
2975         int r;
2976         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
2977
2978         buffer->ptr = buffer->buf;
2979         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
2980         buffer->processed = 0;
2981
2982         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
2983         if (r)
2984                 goto out;
2985
2986         while (buffer->len) {
2987                 r = kvm_pv_mmu_op_one(vcpu, buffer);
2988                 if (r < 0)
2989                         goto out;
2990                 if (r == 0)
2991                         break;
2992         }
2993
2994         r = 1;
2995 out:
2996         *ret = buffer->processed;
2997         return r;
2998 }
2999
3000 #ifdef AUDIT
3001
3002 static const char *audit_msg;
3003
3004 static gva_t canonicalize(gva_t gva)
3005 {
3006 #ifdef CONFIG_X86_64
3007         gva = (long long)(gva << 16) >> 16;
3008 #endif
3009         return gva;
3010 }
3011
3012 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3013                                 gva_t va, int level)
3014 {
3015         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3016         int i;
3017         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3018
3019         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3020                 u64 ent = pt[i];
3021
3022                 if (ent == shadow_trap_nonpresent_pte)
3023                         continue;
3024
3025                 va = canonicalize(va);
3026                 if (level > 1) {
3027                         if (ent == shadow_notrap_nonpresent_pte)
3028                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
3029                                        " in nonleaf level: levels %d gva %lx"
3030                                        " level %d pte %llx\n", audit_msg,
3031                                        vcpu->arch.mmu.root_level, va, level, ent);
3032                         else
3033                                 audit_mappings_page(vcpu, ent, va, level - 1);
3034                 } else {
3035                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
3036                         gfn_t gfn = gpa >> PAGE_SHIFT;
3037                         pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3038                         hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3039
3040                         if (is_shadow_present_pte(ent)
3041                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
3042                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
3043                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3044                                        audit_msg, vcpu->arch.mmu.root_level,
3045                                        va, gpa, hpa, ent,
3046                                        is_shadow_present_pte(ent));
3047                         else if (ent == shadow_notrap_nonpresent_pte
3048                                  && !is_error_hpa(hpa))
3049                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
3050                                        " valid guest gva %lx\n", audit_msg, va);
3051                         kvm_release_pfn_clean(pfn);
3052
3053                 }
3054         }
3055 }
3056
3057 static void audit_mappings(struct kvm_vcpu *vcpu)
3058 {
3059         unsigned i;
3060
3061         if (vcpu->arch.mmu.root_level == 4)
3062                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3063         else
3064                 for (i = 0; i < 4; ++i)
3065                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3066                                 audit_mappings_page(vcpu,
3067                                                     vcpu->arch.mmu.pae_root[i],
3068                                                     i << 30,
3069                                                     2);
3070 }
3071
3072 static int count_rmaps(struct kvm_vcpu *vcpu)
3073 {
3074         int nmaps = 0;
3075         int i, j, k;
3076
3077         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3078                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
3079                 struct kvm_rmap_desc *d;
3080
3081                 for (j = 0; j < m->npages; ++j) {
3082                         unsigned long *rmapp = &m->rmap[j];
3083
3084                         if (!*rmapp)
3085                                 continue;
3086                         if (!(*rmapp & 1)) {
3087                                 ++nmaps;
3088                                 continue;
3089                         }
3090                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3091                         while (d) {
3092                                 for (k = 0; k < RMAP_EXT; ++k)
3093                                         if (d->shadow_ptes[k])
3094                                                 ++nmaps;
3095                                         else
3096                                                 break;
3097                                 d = d->more;
3098                         }
3099                 }
3100         }
3101         return nmaps;
3102 }
3103
3104 static int count_writable_mappings(struct kvm_vcpu *vcpu)
3105 {
3106         int nmaps = 0;
3107         struct kvm_mmu_page *sp;
3108         int i;
3109
3110         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3111                 u64 *pt = sp->spt;
3112
3113                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3114                         continue;
3115
3116                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3117                         u64 ent = pt[i];
3118
3119                         if (!(ent & PT_PRESENT_MASK))
3120                                 continue;
3121                         if (!(ent & PT_WRITABLE_MASK))
3122                                 continue;
3123                         ++nmaps;
3124                 }
3125         }
3126         return nmaps;
3127 }
3128
3129 static void audit_rmap(struct kvm_vcpu *vcpu)
3130 {
3131         int n_rmap = count_rmaps(vcpu);
3132         int n_actual = count_writable_mappings(vcpu);
3133
3134         if (n_rmap != n_actual)
3135                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
3136                        __func__, audit_msg, n_rmap, n_actual);
3137 }
3138
3139 static void audit_write_protection(struct kvm_vcpu *vcpu)
3140 {
3141         struct kvm_mmu_page *sp;
3142         struct kvm_memory_slot *slot;
3143         unsigned long *rmapp;
3144         gfn_t gfn;
3145
3146         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3147                 if (sp->role.direct)
3148                         continue;
3149
3150                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3151                 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3152                 rmapp = &slot->rmap[gfn - slot->base_gfn];
3153                 if (*rmapp)
3154                         printk(KERN_ERR "%s: (%s) shadow page has writable"
3155                                " mappings: gfn %lx role %x\n",
3156                                __func__, audit_msg, sp->gfn,
3157                                sp->role.word);
3158         }
3159 }
3160
3161 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3162 {
3163         int olddbg = dbg;
3164
3165         dbg = 0;
3166         audit_msg = msg;
3167         audit_rmap(vcpu);
3168         audit_write_protection(vcpu);
3169         audit_mappings(vcpu);
3170         dbg = olddbg;
3171 }
3172
3173 #endif