KVM: Use macro to iterate over vcpus.
[safe/jmp/linux-2.6] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31 #include <linux/compiler.h>
32
33 #include <asm/page.h>
34 #include <asm/cmpxchg.h>
35 #include <asm/io.h>
36 #include <asm/vmx.h>
37
38 /*
39  * When setting this variable to true it enables Two-Dimensional-Paging
40  * where the hardware walks 2 page tables:
41  * 1. the guest-virtual to guest-physical
42  * 2. while doing 1. it walks guest-physical to host-physical
43  * If the hardware supports that we don't need to do shadow paging.
44  */
45 bool tdp_enabled = false;
46
47 #undef MMU_DEBUG
48
49 #undef AUDIT
50
51 #ifdef AUDIT
52 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
53 #else
54 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
55 #endif
56
57 #ifdef MMU_DEBUG
58
59 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
60 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
61
62 #else
63
64 #define pgprintk(x...) do { } while (0)
65 #define rmap_printk(x...) do { } while (0)
66
67 #endif
68
69 #if defined(MMU_DEBUG) || defined(AUDIT)
70 static int dbg = 0;
71 module_param(dbg, bool, 0644);
72 #endif
73
74 static int oos_shadow = 1;
75 module_param(oos_shadow, bool, 0644);
76
77 #ifndef MMU_DEBUG
78 #define ASSERT(x) do { } while (0)
79 #else
80 #define ASSERT(x)                                                       \
81         if (!(x)) {                                                     \
82                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
83                        __FILE__, __LINE__, #x);                         \
84         }
85 #endif
86
87 #define PT_FIRST_AVAIL_BITS_SHIFT 9
88 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
89
90 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
91
92 #define PT64_LEVEL_BITS 9
93
94 #define PT64_LEVEL_SHIFT(level) \
95                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
96
97 #define PT64_LEVEL_MASK(level) \
98                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
99
100 #define PT64_INDEX(address, level)\
101         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
102
103
104 #define PT32_LEVEL_BITS 10
105
106 #define PT32_LEVEL_SHIFT(level) \
107                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
108
109 #define PT32_LEVEL_MASK(level) \
110                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
111
112 #define PT32_INDEX(address, level)\
113         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
114
115
116 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
117 #define PT64_DIR_BASE_ADDR_MASK \
118         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
119
120 #define PT32_BASE_ADDR_MASK PAGE_MASK
121 #define PT32_DIR_BASE_ADDR_MASK \
122         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
123
124 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
125                         | PT64_NX_MASK)
126
127 #define PFERR_PRESENT_MASK (1U << 0)
128 #define PFERR_WRITE_MASK (1U << 1)
129 #define PFERR_USER_MASK (1U << 2)
130 #define PFERR_RSVD_MASK (1U << 3)
131 #define PFERR_FETCH_MASK (1U << 4)
132
133 #define PT_DIRECTORY_LEVEL 2
134 #define PT_PAGE_TABLE_LEVEL 1
135
136 #define RMAP_EXT 4
137
138 #define ACC_EXEC_MASK    1
139 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
140 #define ACC_USER_MASK    PT_USER_MASK
141 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
142
143 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
144
145 struct kvm_rmap_desc {
146         u64 *sptes[RMAP_EXT];
147         struct kvm_rmap_desc *more;
148 };
149
150 struct kvm_shadow_walk_iterator {
151         u64 addr;
152         hpa_t shadow_addr;
153         int level;
154         u64 *sptep;
155         unsigned index;
156 };
157
158 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
159         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
160              shadow_walk_okay(&(_walker));                      \
161              shadow_walk_next(&(_walker)))
162
163
164 struct kvm_unsync_walk {
165         int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
166 };
167
168 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
169
170 static struct kmem_cache *pte_chain_cache;
171 static struct kmem_cache *rmap_desc_cache;
172 static struct kmem_cache *mmu_page_header_cache;
173
174 static u64 __read_mostly shadow_trap_nonpresent_pte;
175 static u64 __read_mostly shadow_notrap_nonpresent_pte;
176 static u64 __read_mostly shadow_base_present_pte;
177 static u64 __read_mostly shadow_nx_mask;
178 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
179 static u64 __read_mostly shadow_user_mask;
180 static u64 __read_mostly shadow_accessed_mask;
181 static u64 __read_mostly shadow_dirty_mask;
182
183 static inline u64 rsvd_bits(int s, int e)
184 {
185         return ((1ULL << (e - s + 1)) - 1) << s;
186 }
187
188 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
189 {
190         shadow_trap_nonpresent_pte = trap_pte;
191         shadow_notrap_nonpresent_pte = notrap_pte;
192 }
193 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
194
195 void kvm_mmu_set_base_ptes(u64 base_pte)
196 {
197         shadow_base_present_pte = base_pte;
198 }
199 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
200
201 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
202                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
203 {
204         shadow_user_mask = user_mask;
205         shadow_accessed_mask = accessed_mask;
206         shadow_dirty_mask = dirty_mask;
207         shadow_nx_mask = nx_mask;
208         shadow_x_mask = x_mask;
209 }
210 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
211
212 static int is_write_protection(struct kvm_vcpu *vcpu)
213 {
214         return vcpu->arch.cr0 & X86_CR0_WP;
215 }
216
217 static int is_cpuid_PSE36(void)
218 {
219         return 1;
220 }
221
222 static int is_nx(struct kvm_vcpu *vcpu)
223 {
224         return vcpu->arch.shadow_efer & EFER_NX;
225 }
226
227 static int is_shadow_present_pte(u64 pte)
228 {
229         return pte != shadow_trap_nonpresent_pte
230                 && pte != shadow_notrap_nonpresent_pte;
231 }
232
233 static int is_large_pte(u64 pte)
234 {
235         return pte & PT_PAGE_SIZE_MASK;
236 }
237
238 static int is_writeble_pte(unsigned long pte)
239 {
240         return pte & PT_WRITABLE_MASK;
241 }
242
243 static int is_dirty_gpte(unsigned long pte)
244 {
245         return pte & PT_DIRTY_MASK;
246 }
247
248 static int is_rmap_spte(u64 pte)
249 {
250         return is_shadow_present_pte(pte);
251 }
252
253 static pfn_t spte_to_pfn(u64 pte)
254 {
255         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
256 }
257
258 static gfn_t pse36_gfn_delta(u32 gpte)
259 {
260         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
261
262         return (gpte & PT32_DIR_PSE36_MASK) << shift;
263 }
264
265 static void __set_spte(u64 *sptep, u64 spte)
266 {
267 #ifdef CONFIG_X86_64
268         set_64bit((unsigned long *)sptep, spte);
269 #else
270         set_64bit((unsigned long long *)sptep, spte);
271 #endif
272 }
273
274 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
275                                   struct kmem_cache *base_cache, int min)
276 {
277         void *obj;
278
279         if (cache->nobjs >= min)
280                 return 0;
281         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
282                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
283                 if (!obj)
284                         return -ENOMEM;
285                 cache->objects[cache->nobjs++] = obj;
286         }
287         return 0;
288 }
289
290 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
291 {
292         while (mc->nobjs)
293                 kfree(mc->objects[--mc->nobjs]);
294 }
295
296 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
297                                        int min)
298 {
299         struct page *page;
300
301         if (cache->nobjs >= min)
302                 return 0;
303         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
304                 page = alloc_page(GFP_KERNEL);
305                 if (!page)
306                         return -ENOMEM;
307                 set_page_private(page, 0);
308                 cache->objects[cache->nobjs++] = page_address(page);
309         }
310         return 0;
311 }
312
313 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
314 {
315         while (mc->nobjs)
316                 free_page((unsigned long)mc->objects[--mc->nobjs]);
317 }
318
319 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
320 {
321         int r;
322
323         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
324                                    pte_chain_cache, 4);
325         if (r)
326                 goto out;
327         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
328                                    rmap_desc_cache, 4);
329         if (r)
330                 goto out;
331         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
332         if (r)
333                 goto out;
334         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
335                                    mmu_page_header_cache, 4);
336 out:
337         return r;
338 }
339
340 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
341 {
342         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
343         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
344         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
345         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
346 }
347
348 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
349                                     size_t size)
350 {
351         void *p;
352
353         BUG_ON(!mc->nobjs);
354         p = mc->objects[--mc->nobjs];
355         return p;
356 }
357
358 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
359 {
360         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
361                                       sizeof(struct kvm_pte_chain));
362 }
363
364 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
365 {
366         kfree(pc);
367 }
368
369 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
370 {
371         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
372                                       sizeof(struct kvm_rmap_desc));
373 }
374
375 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
376 {
377         kfree(rd);
378 }
379
380 /*
381  * Return the pointer to the largepage write count for a given
382  * gfn, handling slots that are not large page aligned.
383  */
384 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
385 {
386         unsigned long idx;
387
388         idx = (gfn / KVM_PAGES_PER_HPAGE) -
389               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
390         return &slot->lpage_info[idx].write_count;
391 }
392
393 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
394 {
395         int *write_count;
396
397         gfn = unalias_gfn(kvm, gfn);
398         write_count = slot_largepage_idx(gfn,
399                                          gfn_to_memslot_unaliased(kvm, gfn));
400         *write_count += 1;
401 }
402
403 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
404 {
405         int *write_count;
406
407         gfn = unalias_gfn(kvm, gfn);
408         write_count = slot_largepage_idx(gfn,
409                                          gfn_to_memslot_unaliased(kvm, gfn));
410         *write_count -= 1;
411         WARN_ON(*write_count < 0);
412 }
413
414 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
415 {
416         struct kvm_memory_slot *slot;
417         int *largepage_idx;
418
419         gfn = unalias_gfn(kvm, gfn);
420         slot = gfn_to_memslot_unaliased(kvm, gfn);
421         if (slot) {
422                 largepage_idx = slot_largepage_idx(gfn, slot);
423                 return *largepage_idx;
424         }
425
426         return 1;
427 }
428
429 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
430 {
431         struct vm_area_struct *vma;
432         unsigned long addr;
433         int ret = 0;
434
435         addr = gfn_to_hva(kvm, gfn);
436         if (kvm_is_error_hva(addr))
437                 return ret;
438
439         down_read(&current->mm->mmap_sem);
440         vma = find_vma(current->mm, addr);
441         if (vma && is_vm_hugetlb_page(vma))
442                 ret = 1;
443         up_read(&current->mm->mmap_sem);
444
445         return ret;
446 }
447
448 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
449 {
450         struct kvm_memory_slot *slot;
451
452         if (has_wrprotected_page(vcpu->kvm, large_gfn))
453                 return 0;
454
455         if (!host_largepage_backed(vcpu->kvm, large_gfn))
456                 return 0;
457
458         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
459         if (slot && slot->dirty_bitmap)
460                 return 0;
461
462         return 1;
463 }
464
465 /*
466  * Take gfn and return the reverse mapping to it.
467  * Note: gfn must be unaliased before this function get called
468  */
469
470 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
471 {
472         struct kvm_memory_slot *slot;
473         unsigned long idx;
474
475         slot = gfn_to_memslot(kvm, gfn);
476         if (!lpage)
477                 return &slot->rmap[gfn - slot->base_gfn];
478
479         idx = (gfn / KVM_PAGES_PER_HPAGE) -
480               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
481
482         return &slot->lpage_info[idx].rmap_pde;
483 }
484
485 /*
486  * Reverse mapping data structures:
487  *
488  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
489  * that points to page_address(page).
490  *
491  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
492  * containing more mappings.
493  *
494  * Returns the number of rmap entries before the spte was added or zero if
495  * the spte was not added.
496  *
497  */
498 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
499 {
500         struct kvm_mmu_page *sp;
501         struct kvm_rmap_desc *desc;
502         unsigned long *rmapp;
503         int i, count = 0;
504
505         if (!is_rmap_spte(*spte))
506                 return count;
507         gfn = unalias_gfn(vcpu->kvm, gfn);
508         sp = page_header(__pa(spte));
509         sp->gfns[spte - sp->spt] = gfn;
510         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
511         if (!*rmapp) {
512                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
513                 *rmapp = (unsigned long)spte;
514         } else if (!(*rmapp & 1)) {
515                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
516                 desc = mmu_alloc_rmap_desc(vcpu);
517                 desc->sptes[0] = (u64 *)*rmapp;
518                 desc->sptes[1] = spte;
519                 *rmapp = (unsigned long)desc | 1;
520         } else {
521                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
522                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
523                 while (desc->sptes[RMAP_EXT-1] && desc->more) {
524                         desc = desc->more;
525                         count += RMAP_EXT;
526                 }
527                 if (desc->sptes[RMAP_EXT-1]) {
528                         desc->more = mmu_alloc_rmap_desc(vcpu);
529                         desc = desc->more;
530                 }
531                 for (i = 0; desc->sptes[i]; ++i)
532                         ;
533                 desc->sptes[i] = spte;
534         }
535         return count;
536 }
537
538 static void rmap_desc_remove_entry(unsigned long *rmapp,
539                                    struct kvm_rmap_desc *desc,
540                                    int i,
541                                    struct kvm_rmap_desc *prev_desc)
542 {
543         int j;
544
545         for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
546                 ;
547         desc->sptes[i] = desc->sptes[j];
548         desc->sptes[j] = NULL;
549         if (j != 0)
550                 return;
551         if (!prev_desc && !desc->more)
552                 *rmapp = (unsigned long)desc->sptes[0];
553         else
554                 if (prev_desc)
555                         prev_desc->more = desc->more;
556                 else
557                         *rmapp = (unsigned long)desc->more | 1;
558         mmu_free_rmap_desc(desc);
559 }
560
561 static void rmap_remove(struct kvm *kvm, u64 *spte)
562 {
563         struct kvm_rmap_desc *desc;
564         struct kvm_rmap_desc *prev_desc;
565         struct kvm_mmu_page *sp;
566         pfn_t pfn;
567         unsigned long *rmapp;
568         int i;
569
570         if (!is_rmap_spte(*spte))
571                 return;
572         sp = page_header(__pa(spte));
573         pfn = spte_to_pfn(*spte);
574         if (*spte & shadow_accessed_mask)
575                 kvm_set_pfn_accessed(pfn);
576         if (is_writeble_pte(*spte))
577                 kvm_release_pfn_dirty(pfn);
578         else
579                 kvm_release_pfn_clean(pfn);
580         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
581         if (!*rmapp) {
582                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
583                 BUG();
584         } else if (!(*rmapp & 1)) {
585                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
586                 if ((u64 *)*rmapp != spte) {
587                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
588                                spte, *spte);
589                         BUG();
590                 }
591                 *rmapp = 0;
592         } else {
593                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
594                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
595                 prev_desc = NULL;
596                 while (desc) {
597                         for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
598                                 if (desc->sptes[i] == spte) {
599                                         rmap_desc_remove_entry(rmapp,
600                                                                desc, i,
601                                                                prev_desc);
602                                         return;
603                                 }
604                         prev_desc = desc;
605                         desc = desc->more;
606                 }
607                 BUG();
608         }
609 }
610
611 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
612 {
613         struct kvm_rmap_desc *desc;
614         struct kvm_rmap_desc *prev_desc;
615         u64 *prev_spte;
616         int i;
617
618         if (!*rmapp)
619                 return NULL;
620         else if (!(*rmapp & 1)) {
621                 if (!spte)
622                         return (u64 *)*rmapp;
623                 return NULL;
624         }
625         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
626         prev_desc = NULL;
627         prev_spte = NULL;
628         while (desc) {
629                 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
630                         if (prev_spte == spte)
631                                 return desc->sptes[i];
632                         prev_spte = desc->sptes[i];
633                 }
634                 desc = desc->more;
635         }
636         return NULL;
637 }
638
639 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
640 {
641         unsigned long *rmapp;
642         u64 *spte;
643         int write_protected = 0;
644
645         gfn = unalias_gfn(kvm, gfn);
646         rmapp = gfn_to_rmap(kvm, gfn, 0);
647
648         spte = rmap_next(kvm, rmapp, NULL);
649         while (spte) {
650                 BUG_ON(!spte);
651                 BUG_ON(!(*spte & PT_PRESENT_MASK));
652                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
653                 if (is_writeble_pte(*spte)) {
654                         __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
655                         write_protected = 1;
656                 }
657                 spte = rmap_next(kvm, rmapp, spte);
658         }
659         if (write_protected) {
660                 pfn_t pfn;
661
662                 spte = rmap_next(kvm, rmapp, NULL);
663                 pfn = spte_to_pfn(*spte);
664                 kvm_set_pfn_dirty(pfn);
665         }
666
667         /* check for huge page mappings */
668         rmapp = gfn_to_rmap(kvm, gfn, 1);
669         spte = rmap_next(kvm, rmapp, NULL);
670         while (spte) {
671                 BUG_ON(!spte);
672                 BUG_ON(!(*spte & PT_PRESENT_MASK));
673                 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
674                 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
675                 if (is_writeble_pte(*spte)) {
676                         rmap_remove(kvm, spte);
677                         --kvm->stat.lpages;
678                         __set_spte(spte, shadow_trap_nonpresent_pte);
679                         spte = NULL;
680                         write_protected = 1;
681                 }
682                 spte = rmap_next(kvm, rmapp, spte);
683         }
684
685         return write_protected;
686 }
687
688 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
689 {
690         u64 *spte;
691         int need_tlb_flush = 0;
692
693         while ((spte = rmap_next(kvm, rmapp, NULL))) {
694                 BUG_ON(!(*spte & PT_PRESENT_MASK));
695                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
696                 rmap_remove(kvm, spte);
697                 __set_spte(spte, shadow_trap_nonpresent_pte);
698                 need_tlb_flush = 1;
699         }
700         return need_tlb_flush;
701 }
702
703 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
704                           int (*handler)(struct kvm *kvm, unsigned long *rmapp))
705 {
706         int i;
707         int retval = 0;
708
709         /*
710          * If mmap_sem isn't taken, we can look the memslots with only
711          * the mmu_lock by skipping over the slots with userspace_addr == 0.
712          */
713         for (i = 0; i < kvm->nmemslots; i++) {
714                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
715                 unsigned long start = memslot->userspace_addr;
716                 unsigned long end;
717
718                 /* mmu_lock protects userspace_addr */
719                 if (!start)
720                         continue;
721
722                 end = start + (memslot->npages << PAGE_SHIFT);
723                 if (hva >= start && hva < end) {
724                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
725                         retval |= handler(kvm, &memslot->rmap[gfn_offset]);
726                         retval |= handler(kvm,
727                                           &memslot->lpage_info[
728                                                   gfn_offset /
729                                                   KVM_PAGES_PER_HPAGE].rmap_pde);
730                 }
731         }
732
733         return retval;
734 }
735
736 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
737 {
738         return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
739 }
740
741 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
742 {
743         u64 *spte;
744         int young = 0;
745
746         /* always return old for EPT */
747         if (!shadow_accessed_mask)
748                 return 0;
749
750         spte = rmap_next(kvm, rmapp, NULL);
751         while (spte) {
752                 int _young;
753                 u64 _spte = *spte;
754                 BUG_ON(!(_spte & PT_PRESENT_MASK));
755                 _young = _spte & PT_ACCESSED_MASK;
756                 if (_young) {
757                         young = 1;
758                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
759                 }
760                 spte = rmap_next(kvm, rmapp, spte);
761         }
762         return young;
763 }
764
765 #define RMAP_RECYCLE_THRESHOLD 1000
766
767 static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage)
768 {
769         unsigned long *rmapp;
770
771         gfn = unalias_gfn(vcpu->kvm, gfn);
772         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
773
774         kvm_unmap_rmapp(vcpu->kvm, rmapp);
775         kvm_flush_remote_tlbs(vcpu->kvm);
776 }
777
778 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
779 {
780         return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
781 }
782
783 #ifdef MMU_DEBUG
784 static int is_empty_shadow_page(u64 *spt)
785 {
786         u64 *pos;
787         u64 *end;
788
789         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
790                 if (is_shadow_present_pte(*pos)) {
791                         printk(KERN_ERR "%s: %p %llx\n", __func__,
792                                pos, *pos);
793                         return 0;
794                 }
795         return 1;
796 }
797 #endif
798
799 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
800 {
801         ASSERT(is_empty_shadow_page(sp->spt));
802         list_del(&sp->link);
803         __free_page(virt_to_page(sp->spt));
804         __free_page(virt_to_page(sp->gfns));
805         kfree(sp);
806         ++kvm->arch.n_free_mmu_pages;
807 }
808
809 static unsigned kvm_page_table_hashfn(gfn_t gfn)
810 {
811         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
812 }
813
814 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
815                                                u64 *parent_pte)
816 {
817         struct kvm_mmu_page *sp;
818
819         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
820         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
821         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
822         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
823         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
824         INIT_LIST_HEAD(&sp->oos_link);
825         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
826         sp->multimapped = 0;
827         sp->parent_pte = parent_pte;
828         --vcpu->kvm->arch.n_free_mmu_pages;
829         return sp;
830 }
831
832 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
833                                     struct kvm_mmu_page *sp, u64 *parent_pte)
834 {
835         struct kvm_pte_chain *pte_chain;
836         struct hlist_node *node;
837         int i;
838
839         if (!parent_pte)
840                 return;
841         if (!sp->multimapped) {
842                 u64 *old = sp->parent_pte;
843
844                 if (!old) {
845                         sp->parent_pte = parent_pte;
846                         return;
847                 }
848                 sp->multimapped = 1;
849                 pte_chain = mmu_alloc_pte_chain(vcpu);
850                 INIT_HLIST_HEAD(&sp->parent_ptes);
851                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
852                 pte_chain->parent_ptes[0] = old;
853         }
854         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
855                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
856                         continue;
857                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
858                         if (!pte_chain->parent_ptes[i]) {
859                                 pte_chain->parent_ptes[i] = parent_pte;
860                                 return;
861                         }
862         }
863         pte_chain = mmu_alloc_pte_chain(vcpu);
864         BUG_ON(!pte_chain);
865         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
866         pte_chain->parent_ptes[0] = parent_pte;
867 }
868
869 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
870                                        u64 *parent_pte)
871 {
872         struct kvm_pte_chain *pte_chain;
873         struct hlist_node *node;
874         int i;
875
876         if (!sp->multimapped) {
877                 BUG_ON(sp->parent_pte != parent_pte);
878                 sp->parent_pte = NULL;
879                 return;
880         }
881         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
882                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
883                         if (!pte_chain->parent_ptes[i])
884                                 break;
885                         if (pte_chain->parent_ptes[i] != parent_pte)
886                                 continue;
887                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
888                                 && pte_chain->parent_ptes[i + 1]) {
889                                 pte_chain->parent_ptes[i]
890                                         = pte_chain->parent_ptes[i + 1];
891                                 ++i;
892                         }
893                         pte_chain->parent_ptes[i] = NULL;
894                         if (i == 0) {
895                                 hlist_del(&pte_chain->link);
896                                 mmu_free_pte_chain(pte_chain);
897                                 if (hlist_empty(&sp->parent_ptes)) {
898                                         sp->multimapped = 0;
899                                         sp->parent_pte = NULL;
900                                 }
901                         }
902                         return;
903                 }
904         BUG();
905 }
906
907
908 static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
909                             mmu_parent_walk_fn fn)
910 {
911         struct kvm_pte_chain *pte_chain;
912         struct hlist_node *node;
913         struct kvm_mmu_page *parent_sp;
914         int i;
915
916         if (!sp->multimapped && sp->parent_pte) {
917                 parent_sp = page_header(__pa(sp->parent_pte));
918                 fn(vcpu, parent_sp);
919                 mmu_parent_walk(vcpu, parent_sp, fn);
920                 return;
921         }
922         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
923                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
924                         if (!pte_chain->parent_ptes[i])
925                                 break;
926                         parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
927                         fn(vcpu, parent_sp);
928                         mmu_parent_walk(vcpu, parent_sp, fn);
929                 }
930 }
931
932 static void kvm_mmu_update_unsync_bitmap(u64 *spte)
933 {
934         unsigned int index;
935         struct kvm_mmu_page *sp = page_header(__pa(spte));
936
937         index = spte - sp->spt;
938         if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
939                 sp->unsync_children++;
940         WARN_ON(!sp->unsync_children);
941 }
942
943 static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
944 {
945         struct kvm_pte_chain *pte_chain;
946         struct hlist_node *node;
947         int i;
948
949         if (!sp->parent_pte)
950                 return;
951
952         if (!sp->multimapped) {
953                 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
954                 return;
955         }
956
957         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
958                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
959                         if (!pte_chain->parent_ptes[i])
960                                 break;
961                         kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
962                 }
963 }
964
965 static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
966 {
967         kvm_mmu_update_parents_unsync(sp);
968         return 1;
969 }
970
971 static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
972                                         struct kvm_mmu_page *sp)
973 {
974         mmu_parent_walk(vcpu, sp, unsync_walk_fn);
975         kvm_mmu_update_parents_unsync(sp);
976 }
977
978 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
979                                     struct kvm_mmu_page *sp)
980 {
981         int i;
982
983         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
984                 sp->spt[i] = shadow_trap_nonpresent_pte;
985 }
986
987 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
988                                struct kvm_mmu_page *sp)
989 {
990         return 1;
991 }
992
993 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
994 {
995 }
996
997 #define KVM_PAGE_ARRAY_NR 16
998
999 struct kvm_mmu_pages {
1000         struct mmu_page_and_offset {
1001                 struct kvm_mmu_page *sp;
1002                 unsigned int idx;
1003         } page[KVM_PAGE_ARRAY_NR];
1004         unsigned int nr;
1005 };
1006
1007 #define for_each_unsync_children(bitmap, idx)           \
1008         for (idx = find_first_bit(bitmap, 512);         \
1009              idx < 512;                                 \
1010              idx = find_next_bit(bitmap, 512, idx+1))
1011
1012 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1013                          int idx)
1014 {
1015         int i;
1016
1017         if (sp->unsync)
1018                 for (i=0; i < pvec->nr; i++)
1019                         if (pvec->page[i].sp == sp)
1020                                 return 0;
1021
1022         pvec->page[pvec->nr].sp = sp;
1023         pvec->page[pvec->nr].idx = idx;
1024         pvec->nr++;
1025         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1026 }
1027
1028 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1029                            struct kvm_mmu_pages *pvec)
1030 {
1031         int i, ret, nr_unsync_leaf = 0;
1032
1033         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1034                 u64 ent = sp->spt[i];
1035
1036                 if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
1037                         struct kvm_mmu_page *child;
1038                         child = page_header(ent & PT64_BASE_ADDR_MASK);
1039
1040                         if (child->unsync_children) {
1041                                 if (mmu_pages_add(pvec, child, i))
1042                                         return -ENOSPC;
1043
1044                                 ret = __mmu_unsync_walk(child, pvec);
1045                                 if (!ret)
1046                                         __clear_bit(i, sp->unsync_child_bitmap);
1047                                 else if (ret > 0)
1048                                         nr_unsync_leaf += ret;
1049                                 else
1050                                         return ret;
1051                         }
1052
1053                         if (child->unsync) {
1054                                 nr_unsync_leaf++;
1055                                 if (mmu_pages_add(pvec, child, i))
1056                                         return -ENOSPC;
1057                         }
1058                 }
1059         }
1060
1061         if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
1062                 sp->unsync_children = 0;
1063
1064         return nr_unsync_leaf;
1065 }
1066
1067 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1068                            struct kvm_mmu_pages *pvec)
1069 {
1070         if (!sp->unsync_children)
1071                 return 0;
1072
1073         mmu_pages_add(pvec, sp, 0);
1074         return __mmu_unsync_walk(sp, pvec);
1075 }
1076
1077 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1078 {
1079         unsigned index;
1080         struct hlist_head *bucket;
1081         struct kvm_mmu_page *sp;
1082         struct hlist_node *node;
1083
1084         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1085         index = kvm_page_table_hashfn(gfn);
1086         bucket = &kvm->arch.mmu_page_hash[index];
1087         hlist_for_each_entry(sp, node, bucket, hash_link)
1088                 if (sp->gfn == gfn && !sp->role.direct
1089                     && !sp->role.invalid) {
1090                         pgprintk("%s: found role %x\n",
1091                                  __func__, sp->role.word);
1092                         return sp;
1093                 }
1094         return NULL;
1095 }
1096
1097 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1098 {
1099         WARN_ON(!sp->unsync);
1100         sp->unsync = 0;
1101         --kvm->stat.mmu_unsync;
1102 }
1103
1104 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1105
1106 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1107 {
1108         if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1109                 kvm_mmu_zap_page(vcpu->kvm, sp);
1110                 return 1;
1111         }
1112
1113         if (rmap_write_protect(vcpu->kvm, sp->gfn))
1114                 kvm_flush_remote_tlbs(vcpu->kvm);
1115         kvm_unlink_unsync_page(vcpu->kvm, sp);
1116         if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1117                 kvm_mmu_zap_page(vcpu->kvm, sp);
1118                 return 1;
1119         }
1120
1121         kvm_mmu_flush_tlb(vcpu);
1122         return 0;
1123 }
1124
1125 struct mmu_page_path {
1126         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1127         unsigned int idx[PT64_ROOT_LEVEL-1];
1128 };
1129
1130 #define for_each_sp(pvec, sp, parents, i)                       \
1131                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1132                         sp = pvec.page[i].sp;                   \
1133                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1134                         i = mmu_pages_next(&pvec, &parents, i))
1135
1136 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1137                           struct mmu_page_path *parents,
1138                           int i)
1139 {
1140         int n;
1141
1142         for (n = i+1; n < pvec->nr; n++) {
1143                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1144
1145                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1146                         parents->idx[0] = pvec->page[n].idx;
1147                         return n;
1148                 }
1149
1150                 parents->parent[sp->role.level-2] = sp;
1151                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1152         }
1153
1154         return n;
1155 }
1156
1157 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1158 {
1159         struct kvm_mmu_page *sp;
1160         unsigned int level = 0;
1161
1162         do {
1163                 unsigned int idx = parents->idx[level];
1164
1165                 sp = parents->parent[level];
1166                 if (!sp)
1167                         return;
1168
1169                 --sp->unsync_children;
1170                 WARN_ON((int)sp->unsync_children < 0);
1171                 __clear_bit(idx, sp->unsync_child_bitmap);
1172                 level++;
1173         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1174 }
1175
1176 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1177                                struct mmu_page_path *parents,
1178                                struct kvm_mmu_pages *pvec)
1179 {
1180         parents->parent[parent->role.level-1] = NULL;
1181         pvec->nr = 0;
1182 }
1183
1184 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1185                               struct kvm_mmu_page *parent)
1186 {
1187         int i;
1188         struct kvm_mmu_page *sp;
1189         struct mmu_page_path parents;
1190         struct kvm_mmu_pages pages;
1191
1192         kvm_mmu_pages_init(parent, &parents, &pages);
1193         while (mmu_unsync_walk(parent, &pages)) {
1194                 int protected = 0;
1195
1196                 for_each_sp(pages, sp, parents, i)
1197                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1198
1199                 if (protected)
1200                         kvm_flush_remote_tlbs(vcpu->kvm);
1201
1202                 for_each_sp(pages, sp, parents, i) {
1203                         kvm_sync_page(vcpu, sp);
1204                         mmu_pages_clear_parents(&parents);
1205                 }
1206                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1207                 kvm_mmu_pages_init(parent, &parents, &pages);
1208         }
1209 }
1210
1211 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1212                                              gfn_t gfn,
1213                                              gva_t gaddr,
1214                                              unsigned level,
1215                                              int direct,
1216                                              unsigned access,
1217                                              u64 *parent_pte)
1218 {
1219         union kvm_mmu_page_role role;
1220         unsigned index;
1221         unsigned quadrant;
1222         struct hlist_head *bucket;
1223         struct kvm_mmu_page *sp;
1224         struct hlist_node *node, *tmp;
1225
1226         role = vcpu->arch.mmu.base_role;
1227         role.level = level;
1228         role.direct = direct;
1229         role.access = access;
1230         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1231                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1232                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1233                 role.quadrant = quadrant;
1234         }
1235         pgprintk("%s: looking gfn %lx role %x\n", __func__,
1236                  gfn, role.word);
1237         index = kvm_page_table_hashfn(gfn);
1238         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1239         hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1240                 if (sp->gfn == gfn) {
1241                         if (sp->unsync)
1242                                 if (kvm_sync_page(vcpu, sp))
1243                                         continue;
1244
1245                         if (sp->role.word != role.word)
1246                                 continue;
1247
1248                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1249                         if (sp->unsync_children) {
1250                                 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1251                                 kvm_mmu_mark_parents_unsync(vcpu, sp);
1252                         }
1253                         pgprintk("%s: found\n", __func__);
1254                         return sp;
1255                 }
1256         ++vcpu->kvm->stat.mmu_cache_miss;
1257         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1258         if (!sp)
1259                 return sp;
1260         pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1261         sp->gfn = gfn;
1262         sp->role = role;
1263         hlist_add_head(&sp->hash_link, bucket);
1264         if (!direct) {
1265                 if (rmap_write_protect(vcpu->kvm, gfn))
1266                         kvm_flush_remote_tlbs(vcpu->kvm);
1267                 account_shadowed(vcpu->kvm, gfn);
1268         }
1269         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1270                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1271         else
1272                 nonpaging_prefetch_page(vcpu, sp);
1273         return sp;
1274 }
1275
1276 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1277                              struct kvm_vcpu *vcpu, u64 addr)
1278 {
1279         iterator->addr = addr;
1280         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1281         iterator->level = vcpu->arch.mmu.shadow_root_level;
1282         if (iterator->level == PT32E_ROOT_LEVEL) {
1283                 iterator->shadow_addr
1284                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1285                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1286                 --iterator->level;
1287                 if (!iterator->shadow_addr)
1288                         iterator->level = 0;
1289         }
1290 }
1291
1292 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1293 {
1294         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1295                 return false;
1296         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1297         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1298         return true;
1299 }
1300
1301 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1302 {
1303         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1304         --iterator->level;
1305 }
1306
1307 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1308                                          struct kvm_mmu_page *sp)
1309 {
1310         unsigned i;
1311         u64 *pt;
1312         u64 ent;
1313
1314         pt = sp->spt;
1315
1316         if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1317                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1318                         if (is_shadow_present_pte(pt[i]))
1319                                 rmap_remove(kvm, &pt[i]);
1320                         pt[i] = shadow_trap_nonpresent_pte;
1321                 }
1322                 return;
1323         }
1324
1325         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1326                 ent = pt[i];
1327
1328                 if (is_shadow_present_pte(ent)) {
1329                         if (!is_large_pte(ent)) {
1330                                 ent &= PT64_BASE_ADDR_MASK;
1331                                 mmu_page_remove_parent_pte(page_header(ent),
1332                                                            &pt[i]);
1333                         } else {
1334                                 --kvm->stat.lpages;
1335                                 rmap_remove(kvm, &pt[i]);
1336                         }
1337                 }
1338                 pt[i] = shadow_trap_nonpresent_pte;
1339         }
1340 }
1341
1342 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1343 {
1344         mmu_page_remove_parent_pte(sp, parent_pte);
1345 }
1346
1347 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1348 {
1349         int i;
1350         struct kvm_vcpu *vcpu;
1351
1352         kvm_for_each_vcpu(i, vcpu, kvm)
1353                 vcpu->arch.last_pte_updated = NULL;
1354 }
1355
1356 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1357 {
1358         u64 *parent_pte;
1359
1360         while (sp->multimapped || sp->parent_pte) {
1361                 if (!sp->multimapped)
1362                         parent_pte = sp->parent_pte;
1363                 else {
1364                         struct kvm_pte_chain *chain;
1365
1366                         chain = container_of(sp->parent_ptes.first,
1367                                              struct kvm_pte_chain, link);
1368                         parent_pte = chain->parent_ptes[0];
1369                 }
1370                 BUG_ON(!parent_pte);
1371                 kvm_mmu_put_page(sp, parent_pte);
1372                 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1373         }
1374 }
1375
1376 static int mmu_zap_unsync_children(struct kvm *kvm,
1377                                    struct kvm_mmu_page *parent)
1378 {
1379         int i, zapped = 0;
1380         struct mmu_page_path parents;
1381         struct kvm_mmu_pages pages;
1382
1383         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1384                 return 0;
1385
1386         kvm_mmu_pages_init(parent, &parents, &pages);
1387         while (mmu_unsync_walk(parent, &pages)) {
1388                 struct kvm_mmu_page *sp;
1389
1390                 for_each_sp(pages, sp, parents, i) {
1391                         kvm_mmu_zap_page(kvm, sp);
1392                         mmu_pages_clear_parents(&parents);
1393                 }
1394                 zapped += pages.nr;
1395                 kvm_mmu_pages_init(parent, &parents, &pages);
1396         }
1397
1398         return zapped;
1399 }
1400
1401 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1402 {
1403         int ret;
1404         ++kvm->stat.mmu_shadow_zapped;
1405         ret = mmu_zap_unsync_children(kvm, sp);
1406         kvm_mmu_page_unlink_children(kvm, sp);
1407         kvm_mmu_unlink_parents(kvm, sp);
1408         kvm_flush_remote_tlbs(kvm);
1409         if (!sp->role.invalid && !sp->role.direct)
1410                 unaccount_shadowed(kvm, sp->gfn);
1411         if (sp->unsync)
1412                 kvm_unlink_unsync_page(kvm, sp);
1413         if (!sp->root_count) {
1414                 hlist_del(&sp->hash_link);
1415                 kvm_mmu_free_page(kvm, sp);
1416         } else {
1417                 sp->role.invalid = 1;
1418                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1419                 kvm_reload_remote_mmus(kvm);
1420         }
1421         kvm_mmu_reset_last_pte_updated(kvm);
1422         return ret;
1423 }
1424
1425 /*
1426  * Changing the number of mmu pages allocated to the vm
1427  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1428  */
1429 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1430 {
1431         int used_pages;
1432
1433         used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
1434         used_pages = max(0, used_pages);
1435
1436         /*
1437          * If we set the number of mmu pages to be smaller be than the
1438          * number of actived pages , we must to free some mmu pages before we
1439          * change the value
1440          */
1441
1442         if (used_pages > kvm_nr_mmu_pages) {
1443                 while (used_pages > kvm_nr_mmu_pages) {
1444                         struct kvm_mmu_page *page;
1445
1446                         page = container_of(kvm->arch.active_mmu_pages.prev,
1447                                             struct kvm_mmu_page, link);
1448                         kvm_mmu_zap_page(kvm, page);
1449                         used_pages--;
1450                 }
1451                 kvm->arch.n_free_mmu_pages = 0;
1452         }
1453         else
1454                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1455                                          - kvm->arch.n_alloc_mmu_pages;
1456
1457         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1458 }
1459
1460 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1461 {
1462         unsigned index;
1463         struct hlist_head *bucket;
1464         struct kvm_mmu_page *sp;
1465         struct hlist_node *node, *n;
1466         int r;
1467
1468         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1469         r = 0;
1470         index = kvm_page_table_hashfn(gfn);
1471         bucket = &kvm->arch.mmu_page_hash[index];
1472         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1473                 if (sp->gfn == gfn && !sp->role.direct) {
1474                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1475                                  sp->role.word);
1476                         r = 1;
1477                         if (kvm_mmu_zap_page(kvm, sp))
1478                                 n = bucket->first;
1479                 }
1480         return r;
1481 }
1482
1483 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1484 {
1485         unsigned index;
1486         struct hlist_head *bucket;
1487         struct kvm_mmu_page *sp;
1488         struct hlist_node *node, *nn;
1489
1490         index = kvm_page_table_hashfn(gfn);
1491         bucket = &kvm->arch.mmu_page_hash[index];
1492         hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1493                 if (sp->gfn == gfn && !sp->role.direct
1494                     && !sp->role.invalid) {
1495                         pgprintk("%s: zap %lx %x\n",
1496                                  __func__, gfn, sp->role.word);
1497                         kvm_mmu_zap_page(kvm, sp);
1498                 }
1499         }
1500 }
1501
1502 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1503 {
1504         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1505         struct kvm_mmu_page *sp = page_header(__pa(pte));
1506
1507         __set_bit(slot, sp->slot_bitmap);
1508 }
1509
1510 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1511 {
1512         int i;
1513         u64 *pt = sp->spt;
1514
1515         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1516                 return;
1517
1518         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1519                 if (pt[i] == shadow_notrap_nonpresent_pte)
1520                         __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1521         }
1522 }
1523
1524 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1525 {
1526         struct page *page;
1527
1528         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1529
1530         if (gpa == UNMAPPED_GVA)
1531                 return NULL;
1532
1533         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1534
1535         return page;
1536 }
1537
1538 /*
1539  * The function is based on mtrr_type_lookup() in
1540  * arch/x86/kernel/cpu/mtrr/generic.c
1541  */
1542 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1543                          u64 start, u64 end)
1544 {
1545         int i;
1546         u64 base, mask;
1547         u8 prev_match, curr_match;
1548         int num_var_ranges = KVM_NR_VAR_MTRR;
1549
1550         if (!mtrr_state->enabled)
1551                 return 0xFF;
1552
1553         /* Make end inclusive end, instead of exclusive */
1554         end--;
1555
1556         /* Look in fixed ranges. Just return the type as per start */
1557         if (mtrr_state->have_fixed && (start < 0x100000)) {
1558                 int idx;
1559
1560                 if (start < 0x80000) {
1561                         idx = 0;
1562                         idx += (start >> 16);
1563                         return mtrr_state->fixed_ranges[idx];
1564                 } else if (start < 0xC0000) {
1565                         idx = 1 * 8;
1566                         idx += ((start - 0x80000) >> 14);
1567                         return mtrr_state->fixed_ranges[idx];
1568                 } else if (start < 0x1000000) {
1569                         idx = 3 * 8;
1570                         idx += ((start - 0xC0000) >> 12);
1571                         return mtrr_state->fixed_ranges[idx];
1572                 }
1573         }
1574
1575         /*
1576          * Look in variable ranges
1577          * Look of multiple ranges matching this address and pick type
1578          * as per MTRR precedence
1579          */
1580         if (!(mtrr_state->enabled & 2))
1581                 return mtrr_state->def_type;
1582
1583         prev_match = 0xFF;
1584         for (i = 0; i < num_var_ranges; ++i) {
1585                 unsigned short start_state, end_state;
1586
1587                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1588                         continue;
1589
1590                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1591                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1592                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1593                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1594
1595                 start_state = ((start & mask) == (base & mask));
1596                 end_state = ((end & mask) == (base & mask));
1597                 if (start_state != end_state)
1598                         return 0xFE;
1599
1600                 if ((start & mask) != (base & mask))
1601                         continue;
1602
1603                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1604                 if (prev_match == 0xFF) {
1605                         prev_match = curr_match;
1606                         continue;
1607                 }
1608
1609                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1610                     curr_match == MTRR_TYPE_UNCACHABLE)
1611                         return MTRR_TYPE_UNCACHABLE;
1612
1613                 if ((prev_match == MTRR_TYPE_WRBACK &&
1614                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1615                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1616                      curr_match == MTRR_TYPE_WRBACK)) {
1617                         prev_match = MTRR_TYPE_WRTHROUGH;
1618                         curr_match = MTRR_TYPE_WRTHROUGH;
1619                 }
1620
1621                 if (prev_match != curr_match)
1622                         return MTRR_TYPE_UNCACHABLE;
1623         }
1624
1625         if (prev_match != 0xFF)
1626                 return prev_match;
1627
1628         return mtrr_state->def_type;
1629 }
1630
1631 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1632 {
1633         u8 mtrr;
1634
1635         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1636                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1637         if (mtrr == 0xfe || mtrr == 0xff)
1638                 mtrr = MTRR_TYPE_WRBACK;
1639         return mtrr;
1640 }
1641 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1642
1643 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1644 {
1645         unsigned index;
1646         struct hlist_head *bucket;
1647         struct kvm_mmu_page *s;
1648         struct hlist_node *node, *n;
1649
1650         index = kvm_page_table_hashfn(sp->gfn);
1651         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1652         /* don't unsync if pagetable is shadowed with multiple roles */
1653         hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1654                 if (s->gfn != sp->gfn || s->role.direct)
1655                         continue;
1656                 if (s->role.word != sp->role.word)
1657                         return 1;
1658         }
1659         ++vcpu->kvm->stat.mmu_unsync;
1660         sp->unsync = 1;
1661
1662         kvm_mmu_mark_parents_unsync(vcpu, sp);
1663
1664         mmu_convert_notrap(sp);
1665         return 0;
1666 }
1667
1668 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1669                                   bool can_unsync)
1670 {
1671         struct kvm_mmu_page *shadow;
1672
1673         shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1674         if (shadow) {
1675                 if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1676                         return 1;
1677                 if (shadow->unsync)
1678                         return 0;
1679                 if (can_unsync && oos_shadow)
1680                         return kvm_unsync_page(vcpu, shadow);
1681                 return 1;
1682         }
1683         return 0;
1684 }
1685
1686 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1687                     unsigned pte_access, int user_fault,
1688                     int write_fault, int dirty, int largepage,
1689                     gfn_t gfn, pfn_t pfn, bool speculative,
1690                     bool can_unsync)
1691 {
1692         u64 spte;
1693         int ret = 0;
1694
1695         /*
1696          * We don't set the accessed bit, since we sometimes want to see
1697          * whether the guest actually used the pte (in order to detect
1698          * demand paging).
1699          */
1700         spte = shadow_base_present_pte | shadow_dirty_mask;
1701         if (!speculative)
1702                 spte |= shadow_accessed_mask;
1703         if (!dirty)
1704                 pte_access &= ~ACC_WRITE_MASK;
1705         if (pte_access & ACC_EXEC_MASK)
1706                 spte |= shadow_x_mask;
1707         else
1708                 spte |= shadow_nx_mask;
1709         if (pte_access & ACC_USER_MASK)
1710                 spte |= shadow_user_mask;
1711         if (largepage)
1712                 spte |= PT_PAGE_SIZE_MASK;
1713         if (tdp_enabled)
1714                 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1715                         kvm_is_mmio_pfn(pfn));
1716
1717         spte |= (u64)pfn << PAGE_SHIFT;
1718
1719         if ((pte_access & ACC_WRITE_MASK)
1720             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1721
1722                 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1723                         ret = 1;
1724                         spte = shadow_trap_nonpresent_pte;
1725                         goto set_pte;
1726                 }
1727
1728                 spte |= PT_WRITABLE_MASK;
1729
1730                 /*
1731                  * Optimization: for pte sync, if spte was writable the hash
1732                  * lookup is unnecessary (and expensive). Write protection
1733                  * is responsibility of mmu_get_page / kvm_sync_page.
1734                  * Same reasoning can be applied to dirty page accounting.
1735                  */
1736                 if (!can_unsync && is_writeble_pte(*sptep))
1737                         goto set_pte;
1738
1739                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1740                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1741                                  __func__, gfn);
1742                         ret = 1;
1743                         pte_access &= ~ACC_WRITE_MASK;
1744                         if (is_writeble_pte(spte))
1745                                 spte &= ~PT_WRITABLE_MASK;
1746                 }
1747         }
1748
1749         if (pte_access & ACC_WRITE_MASK)
1750                 mark_page_dirty(vcpu->kvm, gfn);
1751
1752 set_pte:
1753         __set_spte(sptep, spte);
1754         return ret;
1755 }
1756
1757 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1758                          unsigned pt_access, unsigned pte_access,
1759                          int user_fault, int write_fault, int dirty,
1760                          int *ptwrite, int largepage, gfn_t gfn,
1761                          pfn_t pfn, bool speculative)
1762 {
1763         int was_rmapped = 0;
1764         int was_writeble = is_writeble_pte(*sptep);
1765         int rmap_count;
1766
1767         pgprintk("%s: spte %llx access %x write_fault %d"
1768                  " user_fault %d gfn %lx\n",
1769                  __func__, *sptep, pt_access,
1770                  write_fault, user_fault, gfn);
1771
1772         if (is_rmap_spte(*sptep)) {
1773                 /*
1774                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1775                  * the parent of the now unreachable PTE.
1776                  */
1777                 if (largepage && !is_large_pte(*sptep)) {
1778                         struct kvm_mmu_page *child;
1779                         u64 pte = *sptep;
1780
1781                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1782                         mmu_page_remove_parent_pte(child, sptep);
1783                 } else if (pfn != spte_to_pfn(*sptep)) {
1784                         pgprintk("hfn old %lx new %lx\n",
1785                                  spte_to_pfn(*sptep), pfn);
1786                         rmap_remove(vcpu->kvm, sptep);
1787                 } else
1788                         was_rmapped = 1;
1789         }
1790         if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1791                       dirty, largepage, gfn, pfn, speculative, true)) {
1792                 if (write_fault)
1793                         *ptwrite = 1;
1794                 kvm_x86_ops->tlb_flush(vcpu);
1795         }
1796
1797         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
1798         pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1799                  is_large_pte(*sptep)? "2MB" : "4kB",
1800                  is_present_pte(*sptep)?"RW":"R", gfn,
1801                  *shadow_pte, sptep);
1802         if (!was_rmapped && is_large_pte(*sptep))
1803                 ++vcpu->kvm->stat.lpages;
1804
1805         page_header_update_slot(vcpu->kvm, sptep, gfn);
1806         if (!was_rmapped) {
1807                 rmap_count = rmap_add(vcpu, sptep, gfn, largepage);
1808                 if (!is_rmap_spte(*sptep))
1809                         kvm_release_pfn_clean(pfn);
1810                 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1811                         rmap_recycle(vcpu, gfn, largepage);
1812         } else {
1813                 if (was_writeble)
1814                         kvm_release_pfn_dirty(pfn);
1815                 else
1816                         kvm_release_pfn_clean(pfn);
1817         }
1818         if (speculative) {
1819                 vcpu->arch.last_pte_updated = sptep;
1820                 vcpu->arch.last_pte_gfn = gfn;
1821         }
1822 }
1823
1824 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1825 {
1826 }
1827
1828 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1829                         int largepage, gfn_t gfn, pfn_t pfn)
1830 {
1831         struct kvm_shadow_walk_iterator iterator;
1832         struct kvm_mmu_page *sp;
1833         int pt_write = 0;
1834         gfn_t pseudo_gfn;
1835
1836         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1837                 if (iterator.level == PT_PAGE_TABLE_LEVEL
1838                     || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1839                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1840                                      0, write, 1, &pt_write,
1841                                      largepage, gfn, pfn, false);
1842                         ++vcpu->stat.pf_fixed;
1843                         break;
1844                 }
1845
1846                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1847                         pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1848                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1849                                               iterator.level - 1,
1850                                               1, ACC_ALL, iterator.sptep);
1851                         if (!sp) {
1852                                 pgprintk("nonpaging_map: ENOMEM\n");
1853                                 kvm_release_pfn_clean(pfn);
1854                                 return -ENOMEM;
1855                         }
1856
1857                         __set_spte(iterator.sptep,
1858                                    __pa(sp->spt)
1859                                    | PT_PRESENT_MASK | PT_WRITABLE_MASK
1860                                    | shadow_user_mask | shadow_x_mask);
1861                 }
1862         }
1863         return pt_write;
1864 }
1865
1866 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1867 {
1868         int r;
1869         int largepage = 0;
1870         pfn_t pfn;
1871         unsigned long mmu_seq;
1872
1873         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1874                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1875                 largepage = 1;
1876         }
1877
1878         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1879         smp_rmb();
1880         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1881
1882         /* mmio */
1883         if (is_error_pfn(pfn)) {
1884                 kvm_release_pfn_clean(pfn);
1885                 return 1;
1886         }
1887
1888         spin_lock(&vcpu->kvm->mmu_lock);
1889         if (mmu_notifier_retry(vcpu, mmu_seq))
1890                 goto out_unlock;
1891         kvm_mmu_free_some_pages(vcpu);
1892         r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
1893         spin_unlock(&vcpu->kvm->mmu_lock);
1894
1895
1896         return r;
1897
1898 out_unlock:
1899         spin_unlock(&vcpu->kvm->mmu_lock);
1900         kvm_release_pfn_clean(pfn);
1901         return 0;
1902 }
1903
1904
1905 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1906 {
1907         int i;
1908         struct kvm_mmu_page *sp;
1909
1910         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1911                 return;
1912         spin_lock(&vcpu->kvm->mmu_lock);
1913         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1914                 hpa_t root = vcpu->arch.mmu.root_hpa;
1915
1916                 sp = page_header(root);
1917                 --sp->root_count;
1918                 if (!sp->root_count && sp->role.invalid)
1919                         kvm_mmu_zap_page(vcpu->kvm, sp);
1920                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1921                 spin_unlock(&vcpu->kvm->mmu_lock);
1922                 return;
1923         }
1924         for (i = 0; i < 4; ++i) {
1925                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1926
1927                 if (root) {
1928                         root &= PT64_BASE_ADDR_MASK;
1929                         sp = page_header(root);
1930                         --sp->root_count;
1931                         if (!sp->root_count && sp->role.invalid)
1932                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1933                 }
1934                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1935         }
1936         spin_unlock(&vcpu->kvm->mmu_lock);
1937         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1938 }
1939
1940 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
1941 {
1942         int ret = 0;
1943
1944         if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
1945                 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
1946                 ret = 1;
1947         }
1948
1949         return ret;
1950 }
1951
1952 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1953 {
1954         int i;
1955         gfn_t root_gfn;
1956         struct kvm_mmu_page *sp;
1957         int direct = 0;
1958         u64 pdptr;
1959
1960         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1961
1962         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1963                 hpa_t root = vcpu->arch.mmu.root_hpa;
1964
1965                 ASSERT(!VALID_PAGE(root));
1966                 if (tdp_enabled)
1967                         direct = 1;
1968                 if (mmu_check_root(vcpu, root_gfn))
1969                         return 1;
1970                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1971                                       PT64_ROOT_LEVEL, direct,
1972                                       ACC_ALL, NULL);
1973                 root = __pa(sp->spt);
1974                 ++sp->root_count;
1975                 vcpu->arch.mmu.root_hpa = root;
1976                 return 0;
1977         }
1978         direct = !is_paging(vcpu);
1979         if (tdp_enabled)
1980                 direct = 1;
1981         for (i = 0; i < 4; ++i) {
1982                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1983
1984                 ASSERT(!VALID_PAGE(root));
1985                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1986                         pdptr = kvm_pdptr_read(vcpu, i);
1987                         if (!is_present_gpte(pdptr)) {
1988                                 vcpu->arch.mmu.pae_root[i] = 0;
1989                                 continue;
1990                         }
1991                         root_gfn = pdptr >> PAGE_SHIFT;
1992                 } else if (vcpu->arch.mmu.root_level == 0)
1993                         root_gfn = 0;
1994                 if (mmu_check_root(vcpu, root_gfn))
1995                         return 1;
1996                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1997                                       PT32_ROOT_LEVEL, direct,
1998                                       ACC_ALL, NULL);
1999                 root = __pa(sp->spt);
2000                 ++sp->root_count;
2001                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2002         }
2003         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2004         return 0;
2005 }
2006
2007 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2008 {
2009         int i;
2010         struct kvm_mmu_page *sp;
2011
2012         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2013                 return;
2014         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2015                 hpa_t root = vcpu->arch.mmu.root_hpa;
2016                 sp = page_header(root);
2017                 mmu_sync_children(vcpu, sp);
2018                 return;
2019         }
2020         for (i = 0; i < 4; ++i) {
2021                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2022
2023                 if (root && VALID_PAGE(root)) {
2024                         root &= PT64_BASE_ADDR_MASK;
2025                         sp = page_header(root);
2026                         mmu_sync_children(vcpu, sp);
2027                 }
2028         }
2029 }
2030
2031 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2032 {
2033         spin_lock(&vcpu->kvm->mmu_lock);
2034         mmu_sync_roots(vcpu);
2035         spin_unlock(&vcpu->kvm->mmu_lock);
2036 }
2037
2038 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2039 {
2040         return vaddr;
2041 }
2042
2043 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2044                                 u32 error_code)
2045 {
2046         gfn_t gfn;
2047         int r;
2048
2049         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2050         r = mmu_topup_memory_caches(vcpu);
2051         if (r)
2052                 return r;
2053
2054         ASSERT(vcpu);
2055         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2056
2057         gfn = gva >> PAGE_SHIFT;
2058
2059         return nonpaging_map(vcpu, gva & PAGE_MASK,
2060                              error_code & PFERR_WRITE_MASK, gfn);
2061 }
2062
2063 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2064                                 u32 error_code)
2065 {
2066         pfn_t pfn;
2067         int r;
2068         int largepage = 0;
2069         gfn_t gfn = gpa >> PAGE_SHIFT;
2070         unsigned long mmu_seq;
2071
2072         ASSERT(vcpu);
2073         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2074
2075         r = mmu_topup_memory_caches(vcpu);
2076         if (r)
2077                 return r;
2078
2079         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
2080                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2081                 largepage = 1;
2082         }
2083         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2084         smp_rmb();
2085         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2086         if (is_error_pfn(pfn)) {
2087                 kvm_release_pfn_clean(pfn);
2088                 return 1;
2089         }
2090         spin_lock(&vcpu->kvm->mmu_lock);
2091         if (mmu_notifier_retry(vcpu, mmu_seq))
2092                 goto out_unlock;
2093         kvm_mmu_free_some_pages(vcpu);
2094         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2095                          largepage, gfn, pfn);
2096         spin_unlock(&vcpu->kvm->mmu_lock);
2097
2098         return r;
2099
2100 out_unlock:
2101         spin_unlock(&vcpu->kvm->mmu_lock);
2102         kvm_release_pfn_clean(pfn);
2103         return 0;
2104 }
2105
2106 static void nonpaging_free(struct kvm_vcpu *vcpu)
2107 {
2108         mmu_free_roots(vcpu);
2109 }
2110
2111 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2112 {
2113         struct kvm_mmu *context = &vcpu->arch.mmu;
2114
2115         context->new_cr3 = nonpaging_new_cr3;
2116         context->page_fault = nonpaging_page_fault;
2117         context->gva_to_gpa = nonpaging_gva_to_gpa;
2118         context->free = nonpaging_free;
2119         context->prefetch_page = nonpaging_prefetch_page;
2120         context->sync_page = nonpaging_sync_page;
2121         context->invlpg = nonpaging_invlpg;
2122         context->root_level = 0;
2123         context->shadow_root_level = PT32E_ROOT_LEVEL;
2124         context->root_hpa = INVALID_PAGE;
2125         return 0;
2126 }
2127
2128 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2129 {
2130         ++vcpu->stat.tlb_flush;
2131         kvm_x86_ops->tlb_flush(vcpu);
2132 }
2133
2134 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2135 {
2136         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2137         mmu_free_roots(vcpu);
2138 }
2139
2140 static void inject_page_fault(struct kvm_vcpu *vcpu,
2141                               u64 addr,
2142                               u32 err_code)
2143 {
2144         kvm_inject_page_fault(vcpu, addr, err_code);
2145 }
2146
2147 static void paging_free(struct kvm_vcpu *vcpu)
2148 {
2149         nonpaging_free(vcpu);
2150 }
2151
2152 static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2153 {
2154         int bit7;
2155
2156         bit7 = (gpte >> 7) & 1;
2157         return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
2158 }
2159
2160 #define PTTYPE 64
2161 #include "paging_tmpl.h"
2162 #undef PTTYPE
2163
2164 #define PTTYPE 32
2165 #include "paging_tmpl.h"
2166 #undef PTTYPE
2167
2168 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2169 {
2170         struct kvm_mmu *context = &vcpu->arch.mmu;
2171         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2172         u64 exb_bit_rsvd = 0;
2173
2174         if (!is_nx(vcpu))
2175                 exb_bit_rsvd = rsvd_bits(63, 63);
2176         switch (level) {
2177         case PT32_ROOT_LEVEL:
2178                 /* no rsvd bits for 2 level 4K page table entries */
2179                 context->rsvd_bits_mask[0][1] = 0;
2180                 context->rsvd_bits_mask[0][0] = 0;
2181                 if (is_cpuid_PSE36())
2182                         /* 36bits PSE 4MB page */
2183                         context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2184                 else
2185                         /* 32 bits PSE 4MB page */
2186                         context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2187                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2188                 break;
2189         case PT32E_ROOT_LEVEL:
2190                 context->rsvd_bits_mask[0][2] =
2191                         rsvd_bits(maxphyaddr, 63) |
2192                         rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2193                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2194                         rsvd_bits(maxphyaddr, 62);      /* PDE */
2195                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2196                         rsvd_bits(maxphyaddr, 62);      /* PTE */
2197                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2198                         rsvd_bits(maxphyaddr, 62) |
2199                         rsvd_bits(13, 20);              /* large page */
2200                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2201                 break;
2202         case PT64_ROOT_LEVEL:
2203                 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2204                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2205                 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2206                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2207                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2208                         rsvd_bits(maxphyaddr, 51);
2209                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2210                         rsvd_bits(maxphyaddr, 51);
2211                 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2212                 context->rsvd_bits_mask[1][2] = context->rsvd_bits_mask[0][2];
2213                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2214                         rsvd_bits(maxphyaddr, 51) |
2215                         rsvd_bits(13, 20);              /* large page */
2216                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2217                 break;
2218         }
2219 }
2220
2221 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2222 {
2223         struct kvm_mmu *context = &vcpu->arch.mmu;
2224
2225         ASSERT(is_pae(vcpu));
2226         context->new_cr3 = paging_new_cr3;
2227         context->page_fault = paging64_page_fault;
2228         context->gva_to_gpa = paging64_gva_to_gpa;
2229         context->prefetch_page = paging64_prefetch_page;
2230         context->sync_page = paging64_sync_page;
2231         context->invlpg = paging64_invlpg;
2232         context->free = paging_free;
2233         context->root_level = level;
2234         context->shadow_root_level = level;
2235         context->root_hpa = INVALID_PAGE;
2236         return 0;
2237 }
2238
2239 static int paging64_init_context(struct kvm_vcpu *vcpu)
2240 {
2241         reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2242         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2243 }
2244
2245 static int paging32_init_context(struct kvm_vcpu *vcpu)
2246 {
2247         struct kvm_mmu *context = &vcpu->arch.mmu;
2248
2249         reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2250         context->new_cr3 = paging_new_cr3;
2251         context->page_fault = paging32_page_fault;
2252         context->gva_to_gpa = paging32_gva_to_gpa;
2253         context->free = paging_free;
2254         context->prefetch_page = paging32_prefetch_page;
2255         context->sync_page = paging32_sync_page;
2256         context->invlpg = paging32_invlpg;
2257         context->root_level = PT32_ROOT_LEVEL;
2258         context->shadow_root_level = PT32E_ROOT_LEVEL;
2259         context->root_hpa = INVALID_PAGE;
2260         return 0;
2261 }
2262
2263 static int paging32E_init_context(struct kvm_vcpu *vcpu)
2264 {
2265         reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2266         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2267 }
2268
2269 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2270 {
2271         struct kvm_mmu *context = &vcpu->arch.mmu;
2272
2273         context->new_cr3 = nonpaging_new_cr3;
2274         context->page_fault = tdp_page_fault;
2275         context->free = nonpaging_free;
2276         context->prefetch_page = nonpaging_prefetch_page;
2277         context->sync_page = nonpaging_sync_page;
2278         context->invlpg = nonpaging_invlpg;
2279         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2280         context->root_hpa = INVALID_PAGE;
2281
2282         if (!is_paging(vcpu)) {
2283                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2284                 context->root_level = 0;
2285         } else if (is_long_mode(vcpu)) {
2286                 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2287                 context->gva_to_gpa = paging64_gva_to_gpa;
2288                 context->root_level = PT64_ROOT_LEVEL;
2289         } else if (is_pae(vcpu)) {
2290                 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2291                 context->gva_to_gpa = paging64_gva_to_gpa;
2292                 context->root_level = PT32E_ROOT_LEVEL;
2293         } else {
2294                 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2295                 context->gva_to_gpa = paging32_gva_to_gpa;
2296                 context->root_level = PT32_ROOT_LEVEL;
2297         }
2298
2299         return 0;
2300 }
2301
2302 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2303 {
2304         int r;
2305
2306         ASSERT(vcpu);
2307         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2308
2309         if (!is_paging(vcpu))
2310                 r = nonpaging_init_context(vcpu);
2311         else if (is_long_mode(vcpu))
2312                 r = paging64_init_context(vcpu);
2313         else if (is_pae(vcpu))
2314                 r = paging32E_init_context(vcpu);
2315         else
2316                 r = paging32_init_context(vcpu);
2317
2318         vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2319
2320         return r;
2321 }
2322
2323 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2324 {
2325         vcpu->arch.update_pte.pfn = bad_pfn;
2326
2327         if (tdp_enabled)
2328                 return init_kvm_tdp_mmu(vcpu);
2329         else
2330                 return init_kvm_softmmu(vcpu);
2331 }
2332
2333 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2334 {
2335         ASSERT(vcpu);
2336         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2337                 vcpu->arch.mmu.free(vcpu);
2338                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2339         }
2340 }
2341
2342 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2343 {
2344         destroy_kvm_mmu(vcpu);
2345         return init_kvm_mmu(vcpu);
2346 }
2347 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2348
2349 int kvm_mmu_load(struct kvm_vcpu *vcpu)
2350 {
2351         int r;
2352
2353         r = mmu_topup_memory_caches(vcpu);
2354         if (r)
2355                 goto out;
2356         spin_lock(&vcpu->kvm->mmu_lock);
2357         kvm_mmu_free_some_pages(vcpu);
2358         r = mmu_alloc_roots(vcpu);
2359         mmu_sync_roots(vcpu);
2360         spin_unlock(&vcpu->kvm->mmu_lock);
2361         if (r)
2362                 goto out;
2363         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2364         kvm_mmu_flush_tlb(vcpu);
2365 out:
2366         return r;
2367 }
2368 EXPORT_SYMBOL_GPL(kvm_mmu_load);
2369
2370 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2371 {
2372         mmu_free_roots(vcpu);
2373 }
2374
2375 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2376                                   struct kvm_mmu_page *sp,
2377                                   u64 *spte)
2378 {
2379         u64 pte;
2380         struct kvm_mmu_page *child;
2381
2382         pte = *spte;
2383         if (is_shadow_present_pte(pte)) {
2384                 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
2385                     is_large_pte(pte))
2386                         rmap_remove(vcpu->kvm, spte);
2387                 else {
2388                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2389                         mmu_page_remove_parent_pte(child, spte);
2390                 }
2391         }
2392         __set_spte(spte, shadow_trap_nonpresent_pte);
2393         if (is_large_pte(pte))
2394                 --vcpu->kvm->stat.lpages;
2395 }
2396
2397 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2398                                   struct kvm_mmu_page *sp,
2399                                   u64 *spte,
2400                                   const void *new)
2401 {
2402         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2403                 if (!vcpu->arch.update_pte.largepage ||
2404                     sp->role.glevels == PT32_ROOT_LEVEL) {
2405                         ++vcpu->kvm->stat.mmu_pde_zapped;
2406                         return;
2407                 }
2408         }
2409
2410         ++vcpu->kvm->stat.mmu_pte_updated;
2411         if (sp->role.glevels == PT32_ROOT_LEVEL)
2412                 paging32_update_pte(vcpu, sp, spte, new);
2413         else
2414                 paging64_update_pte(vcpu, sp, spte, new);
2415 }
2416
2417 static bool need_remote_flush(u64 old, u64 new)
2418 {
2419         if (!is_shadow_present_pte(old))
2420                 return false;
2421         if (!is_shadow_present_pte(new))
2422                 return true;
2423         if ((old ^ new) & PT64_BASE_ADDR_MASK)
2424                 return true;
2425         old ^= PT64_NX_MASK;
2426         new ^= PT64_NX_MASK;
2427         return (old & ~new & PT64_PERM_MASK) != 0;
2428 }
2429
2430 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2431 {
2432         if (need_remote_flush(old, new))
2433                 kvm_flush_remote_tlbs(vcpu->kvm);
2434         else
2435                 kvm_mmu_flush_tlb(vcpu);
2436 }
2437
2438 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2439 {
2440         u64 *spte = vcpu->arch.last_pte_updated;
2441
2442         return !!(spte && (*spte & shadow_accessed_mask));
2443 }
2444
2445 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2446                                           const u8 *new, int bytes)
2447 {
2448         gfn_t gfn;
2449         int r;
2450         u64 gpte = 0;
2451         pfn_t pfn;
2452
2453         vcpu->arch.update_pte.largepage = 0;
2454
2455         if (bytes != 4 && bytes != 8)
2456                 return;
2457
2458         /*
2459          * Assume that the pte write on a page table of the same type
2460          * as the current vcpu paging mode.  This is nearly always true
2461          * (might be false while changing modes).  Note it is verified later
2462          * by update_pte().
2463          */
2464         if (is_pae(vcpu)) {
2465                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2466                 if ((bytes == 4) && (gpa % 4 == 0)) {
2467                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2468                         if (r)
2469                                 return;
2470                         memcpy((void *)&gpte + (gpa % 8), new, 4);
2471                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
2472                         memcpy((void *)&gpte, new, 8);
2473                 }
2474         } else {
2475                 if ((bytes == 4) && (gpa % 4 == 0))
2476                         memcpy((void *)&gpte, new, 4);
2477         }
2478         if (!is_present_gpte(gpte))
2479                 return;
2480         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2481
2482         if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2483                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2484                 vcpu->arch.update_pte.largepage = 1;
2485         }
2486         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2487         smp_rmb();
2488         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2489
2490         if (is_error_pfn(pfn)) {
2491                 kvm_release_pfn_clean(pfn);
2492                 return;
2493         }
2494         vcpu->arch.update_pte.gfn = gfn;
2495         vcpu->arch.update_pte.pfn = pfn;
2496 }
2497
2498 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2499 {
2500         u64 *spte = vcpu->arch.last_pte_updated;
2501
2502         if (spte
2503             && vcpu->arch.last_pte_gfn == gfn
2504             && shadow_accessed_mask
2505             && !(*spte & shadow_accessed_mask)
2506             && is_shadow_present_pte(*spte))
2507                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2508 }
2509
2510 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2511                        const u8 *new, int bytes,
2512                        bool guest_initiated)
2513 {
2514         gfn_t gfn = gpa >> PAGE_SHIFT;
2515         struct kvm_mmu_page *sp;
2516         struct hlist_node *node, *n;
2517         struct hlist_head *bucket;
2518         unsigned index;
2519         u64 entry, gentry;
2520         u64 *spte;
2521         unsigned offset = offset_in_page(gpa);
2522         unsigned pte_size;
2523         unsigned page_offset;
2524         unsigned misaligned;
2525         unsigned quadrant;
2526         int level;
2527         int flooded = 0;
2528         int npte;
2529         int r;
2530
2531         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2532         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2533         spin_lock(&vcpu->kvm->mmu_lock);
2534         kvm_mmu_access_page(vcpu, gfn);
2535         kvm_mmu_free_some_pages(vcpu);
2536         ++vcpu->kvm->stat.mmu_pte_write;
2537         kvm_mmu_audit(vcpu, "pre pte write");
2538         if (guest_initiated) {
2539                 if (gfn == vcpu->arch.last_pt_write_gfn
2540                     && !last_updated_pte_accessed(vcpu)) {
2541                         ++vcpu->arch.last_pt_write_count;
2542                         if (vcpu->arch.last_pt_write_count >= 3)
2543                                 flooded = 1;
2544                 } else {
2545                         vcpu->arch.last_pt_write_gfn = gfn;
2546                         vcpu->arch.last_pt_write_count = 1;
2547                         vcpu->arch.last_pte_updated = NULL;
2548                 }
2549         }
2550         index = kvm_page_table_hashfn(gfn);
2551         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2552         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2553                 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2554                         continue;
2555                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2556                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2557                 misaligned |= bytes < 4;
2558                 if (misaligned || flooded) {
2559                         /*
2560                          * Misaligned accesses are too much trouble to fix
2561                          * up; also, they usually indicate a page is not used
2562                          * as a page table.
2563                          *
2564                          * If we're seeing too many writes to a page,
2565                          * it may no longer be a page table, or we may be
2566                          * forking, in which case it is better to unmap the
2567                          * page.
2568                          */
2569                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2570                                  gpa, bytes, sp->role.word);
2571                         if (kvm_mmu_zap_page(vcpu->kvm, sp))
2572                                 n = bucket->first;
2573                         ++vcpu->kvm->stat.mmu_flooded;
2574                         continue;
2575                 }
2576                 page_offset = offset;
2577                 level = sp->role.level;
2578                 npte = 1;
2579                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
2580                         page_offset <<= 1;      /* 32->64 */
2581                         /*
2582                          * A 32-bit pde maps 4MB while the shadow pdes map
2583                          * only 2MB.  So we need to double the offset again
2584                          * and zap two pdes instead of one.
2585                          */
2586                         if (level == PT32_ROOT_LEVEL) {
2587                                 page_offset &= ~7; /* kill rounding error */
2588                                 page_offset <<= 1;
2589                                 npte = 2;
2590                         }
2591                         quadrant = page_offset >> PAGE_SHIFT;
2592                         page_offset &= ~PAGE_MASK;
2593                         if (quadrant != sp->role.quadrant)
2594                                 continue;
2595                 }
2596                 spte = &sp->spt[page_offset / sizeof(*spte)];
2597                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2598                         gentry = 0;
2599                         r = kvm_read_guest_atomic(vcpu->kvm,
2600                                                   gpa & ~(u64)(pte_size - 1),
2601                                                   &gentry, pte_size);
2602                         new = (const void *)&gentry;
2603                         if (r < 0)
2604                                 new = NULL;
2605                 }
2606                 while (npte--) {
2607                         entry = *spte;
2608                         mmu_pte_write_zap_pte(vcpu, sp, spte);
2609                         if (new)
2610                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
2611                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2612                         ++spte;
2613                 }
2614         }
2615         kvm_mmu_audit(vcpu, "post pte write");
2616         spin_unlock(&vcpu->kvm->mmu_lock);
2617         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2618                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2619                 vcpu->arch.update_pte.pfn = bad_pfn;
2620         }
2621 }
2622
2623 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2624 {
2625         gpa_t gpa;
2626         int r;
2627
2628         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2629
2630         spin_lock(&vcpu->kvm->mmu_lock);
2631         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2632         spin_unlock(&vcpu->kvm->mmu_lock);
2633         return r;
2634 }
2635 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2636
2637 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2638 {
2639         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
2640                 struct kvm_mmu_page *sp;
2641
2642                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2643                                   struct kvm_mmu_page, link);
2644                 kvm_mmu_zap_page(vcpu->kvm, sp);
2645                 ++vcpu->kvm->stat.mmu_recycled;
2646         }
2647 }
2648
2649 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2650 {
2651         int r;
2652         enum emulation_result er;
2653
2654         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2655         if (r < 0)
2656                 goto out;
2657
2658         if (!r) {
2659                 r = 1;
2660                 goto out;
2661         }
2662
2663         r = mmu_topup_memory_caches(vcpu);
2664         if (r)
2665                 goto out;
2666
2667         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2668
2669         switch (er) {
2670         case EMULATE_DONE:
2671                 return 1;
2672         case EMULATE_DO_MMIO:
2673                 ++vcpu->stat.mmio_exits;
2674                 return 0;
2675         case EMULATE_FAIL:
2676                 kvm_report_emulation_failure(vcpu, "pagetable");
2677                 return 1;
2678         default:
2679                 BUG();
2680         }
2681 out:
2682         return r;
2683 }
2684 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2685
2686 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2687 {
2688         vcpu->arch.mmu.invlpg(vcpu, gva);
2689         kvm_mmu_flush_tlb(vcpu);
2690         ++vcpu->stat.invlpg;
2691 }
2692 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2693
2694 void kvm_enable_tdp(void)
2695 {
2696         tdp_enabled = true;
2697 }
2698 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2699
2700 void kvm_disable_tdp(void)
2701 {
2702         tdp_enabled = false;
2703 }
2704 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2705
2706 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2707 {
2708         free_page((unsigned long)vcpu->arch.mmu.pae_root);
2709 }
2710
2711 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2712 {
2713         struct page *page;
2714         int i;
2715
2716         ASSERT(vcpu);
2717
2718         if (vcpu->kvm->arch.n_requested_mmu_pages)
2719                 vcpu->kvm->arch.n_free_mmu_pages =
2720                                         vcpu->kvm->arch.n_requested_mmu_pages;
2721         else
2722                 vcpu->kvm->arch.n_free_mmu_pages =
2723                                         vcpu->kvm->arch.n_alloc_mmu_pages;
2724         /*
2725          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2726          * Therefore we need to allocate shadow page tables in the first
2727          * 4GB of memory, which happens to fit the DMA32 zone.
2728          */
2729         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2730         if (!page)
2731                 goto error_1;
2732         vcpu->arch.mmu.pae_root = page_address(page);
2733         for (i = 0; i < 4; ++i)
2734                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2735
2736         return 0;
2737
2738 error_1:
2739         free_mmu_pages(vcpu);
2740         return -ENOMEM;
2741 }
2742
2743 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2744 {
2745         ASSERT(vcpu);
2746         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2747
2748         return alloc_mmu_pages(vcpu);
2749 }
2750
2751 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2752 {
2753         ASSERT(vcpu);
2754         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2755
2756         return init_kvm_mmu(vcpu);
2757 }
2758
2759 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2760 {
2761         ASSERT(vcpu);
2762
2763         destroy_kvm_mmu(vcpu);
2764         free_mmu_pages(vcpu);
2765         mmu_free_memory_caches(vcpu);
2766 }
2767
2768 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2769 {
2770         struct kvm_mmu_page *sp;
2771
2772         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2773                 int i;
2774                 u64 *pt;
2775
2776                 if (!test_bit(slot, sp->slot_bitmap))
2777                         continue;
2778
2779                 pt = sp->spt;
2780                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2781                         /* avoid RMW */
2782                         if (pt[i] & PT_WRITABLE_MASK)
2783                                 pt[i] &= ~PT_WRITABLE_MASK;
2784         }
2785         kvm_flush_remote_tlbs(kvm);
2786 }
2787
2788 void kvm_mmu_zap_all(struct kvm *kvm)
2789 {
2790         struct kvm_mmu_page *sp, *node;
2791
2792         spin_lock(&kvm->mmu_lock);
2793         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2794                 if (kvm_mmu_zap_page(kvm, sp))
2795                         node = container_of(kvm->arch.active_mmu_pages.next,
2796                                             struct kvm_mmu_page, link);
2797         spin_unlock(&kvm->mmu_lock);
2798
2799         kvm_flush_remote_tlbs(kvm);
2800 }
2801
2802 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2803 {
2804         struct kvm_mmu_page *page;
2805
2806         page = container_of(kvm->arch.active_mmu_pages.prev,
2807                             struct kvm_mmu_page, link);
2808         kvm_mmu_zap_page(kvm, page);
2809 }
2810
2811 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2812 {
2813         struct kvm *kvm;
2814         struct kvm *kvm_freed = NULL;
2815         int cache_count = 0;
2816
2817         spin_lock(&kvm_lock);
2818
2819         list_for_each_entry(kvm, &vm_list, vm_list) {
2820                 int npages;
2821
2822                 if (!down_read_trylock(&kvm->slots_lock))
2823                         continue;
2824                 spin_lock(&kvm->mmu_lock);
2825                 npages = kvm->arch.n_alloc_mmu_pages -
2826                          kvm->arch.n_free_mmu_pages;
2827                 cache_count += npages;
2828                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2829                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
2830                         cache_count--;
2831                         kvm_freed = kvm;
2832                 }
2833                 nr_to_scan--;
2834
2835                 spin_unlock(&kvm->mmu_lock);
2836                 up_read(&kvm->slots_lock);
2837         }
2838         if (kvm_freed)
2839                 list_move_tail(&kvm_freed->vm_list, &vm_list);
2840
2841         spin_unlock(&kvm_lock);
2842
2843         return cache_count;
2844 }
2845
2846 static struct shrinker mmu_shrinker = {
2847         .shrink = mmu_shrink,
2848         .seeks = DEFAULT_SEEKS * 10,
2849 };
2850
2851 static void mmu_destroy_caches(void)
2852 {
2853         if (pte_chain_cache)
2854                 kmem_cache_destroy(pte_chain_cache);
2855         if (rmap_desc_cache)
2856                 kmem_cache_destroy(rmap_desc_cache);
2857         if (mmu_page_header_cache)
2858                 kmem_cache_destroy(mmu_page_header_cache);
2859 }
2860
2861 void kvm_mmu_module_exit(void)
2862 {
2863         mmu_destroy_caches();
2864         unregister_shrinker(&mmu_shrinker);
2865 }
2866
2867 int kvm_mmu_module_init(void)
2868 {
2869         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2870                                             sizeof(struct kvm_pte_chain),
2871                                             0, 0, NULL);
2872         if (!pte_chain_cache)
2873                 goto nomem;
2874         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2875                                             sizeof(struct kvm_rmap_desc),
2876                                             0, 0, NULL);
2877         if (!rmap_desc_cache)
2878                 goto nomem;
2879
2880         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2881                                                   sizeof(struct kvm_mmu_page),
2882                                                   0, 0, NULL);
2883         if (!mmu_page_header_cache)
2884                 goto nomem;
2885
2886         register_shrinker(&mmu_shrinker);
2887
2888         return 0;
2889
2890 nomem:
2891         mmu_destroy_caches();
2892         return -ENOMEM;
2893 }
2894
2895 /*
2896  * Caculate mmu pages needed for kvm.
2897  */
2898 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2899 {
2900         int i;
2901         unsigned int nr_mmu_pages;
2902         unsigned int  nr_pages = 0;
2903
2904         for (i = 0; i < kvm->nmemslots; i++)
2905                 nr_pages += kvm->memslots[i].npages;
2906
2907         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2908         nr_mmu_pages = max(nr_mmu_pages,
2909                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2910
2911         return nr_mmu_pages;
2912 }
2913
2914 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2915                                 unsigned len)
2916 {
2917         if (len > buffer->len)
2918                 return NULL;
2919         return buffer->ptr;
2920 }
2921
2922 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2923                                 unsigned len)
2924 {
2925         void *ret;
2926
2927         ret = pv_mmu_peek_buffer(buffer, len);
2928         if (!ret)
2929                 return ret;
2930         buffer->ptr += len;
2931         buffer->len -= len;
2932         buffer->processed += len;
2933         return ret;
2934 }
2935
2936 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2937                              gpa_t addr, gpa_t value)
2938 {
2939         int bytes = 8;
2940         int r;
2941
2942         if (!is_long_mode(vcpu) && !is_pae(vcpu))
2943                 bytes = 4;
2944
2945         r = mmu_topup_memory_caches(vcpu);
2946         if (r)
2947                 return r;
2948
2949         if (!emulator_write_phys(vcpu, addr, &value, bytes))
2950                 return -EFAULT;
2951
2952         return 1;
2953 }
2954
2955 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2956 {
2957         kvm_set_cr3(vcpu, vcpu->arch.cr3);
2958         return 1;
2959 }
2960
2961 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2962 {
2963         spin_lock(&vcpu->kvm->mmu_lock);
2964         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2965         spin_unlock(&vcpu->kvm->mmu_lock);
2966         return 1;
2967 }
2968
2969 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2970                              struct kvm_pv_mmu_op_buffer *buffer)
2971 {
2972         struct kvm_mmu_op_header *header;
2973
2974         header = pv_mmu_peek_buffer(buffer, sizeof *header);
2975         if (!header)
2976                 return 0;
2977         switch (header->op) {
2978         case KVM_MMU_OP_WRITE_PTE: {
2979                 struct kvm_mmu_op_write_pte *wpte;
2980
2981                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2982                 if (!wpte)
2983                         return 0;
2984                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2985                                         wpte->pte_val);
2986         }
2987         case KVM_MMU_OP_FLUSH_TLB: {
2988                 struct kvm_mmu_op_flush_tlb *ftlb;
2989
2990                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2991                 if (!ftlb)
2992                         return 0;
2993                 return kvm_pv_mmu_flush_tlb(vcpu);
2994         }
2995         case KVM_MMU_OP_RELEASE_PT: {
2996                 struct kvm_mmu_op_release_pt *rpt;
2997
2998                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2999                 if (!rpt)
3000                         return 0;
3001                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3002         }
3003         default: return 0;
3004         }
3005 }
3006
3007 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3008                   gpa_t addr, unsigned long *ret)
3009 {
3010         int r;
3011         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3012
3013         buffer->ptr = buffer->buf;
3014         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3015         buffer->processed = 0;
3016
3017         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3018         if (r)
3019                 goto out;
3020
3021         while (buffer->len) {
3022                 r = kvm_pv_mmu_op_one(vcpu, buffer);
3023                 if (r < 0)
3024                         goto out;
3025                 if (r == 0)
3026                         break;
3027         }
3028
3029         r = 1;
3030 out:
3031         *ret = buffer->processed;
3032         return r;
3033 }
3034
3035 #ifdef AUDIT
3036
3037 static const char *audit_msg;
3038
3039 static gva_t canonicalize(gva_t gva)
3040 {
3041 #ifdef CONFIG_X86_64
3042         gva = (long long)(gva << 16) >> 16;
3043 #endif
3044         return gva;
3045 }
3046
3047 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3048                                 gva_t va, int level)
3049 {
3050         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3051         int i;
3052         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3053
3054         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3055                 u64 ent = pt[i];
3056
3057                 if (ent == shadow_trap_nonpresent_pte)
3058                         continue;
3059
3060                 va = canonicalize(va);
3061                 if (level > 1) {
3062                         if (ent == shadow_notrap_nonpresent_pte)
3063                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
3064                                        " in nonleaf level: levels %d gva %lx"
3065                                        " level %d pte %llx\n", audit_msg,
3066                                        vcpu->arch.mmu.root_level, va, level, ent);
3067                         else
3068                                 audit_mappings_page(vcpu, ent, va, level - 1);
3069                 } else {
3070                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
3071                         gfn_t gfn = gpa >> PAGE_SHIFT;
3072                         pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3073                         hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3074
3075                         if (is_shadow_present_pte(ent)
3076                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
3077                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
3078                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3079                                        audit_msg, vcpu->arch.mmu.root_level,
3080                                        va, gpa, hpa, ent,
3081                                        is_shadow_present_pte(ent));
3082                         else if (ent == shadow_notrap_nonpresent_pte
3083                                  && !is_error_hpa(hpa))
3084                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
3085                                        " valid guest gva %lx\n", audit_msg, va);
3086                         kvm_release_pfn_clean(pfn);
3087
3088                 }
3089         }
3090 }
3091
3092 static void audit_mappings(struct kvm_vcpu *vcpu)
3093 {
3094         unsigned i;
3095
3096         if (vcpu->arch.mmu.root_level == 4)
3097                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3098         else
3099                 for (i = 0; i < 4; ++i)
3100                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3101                                 audit_mappings_page(vcpu,
3102                                                     vcpu->arch.mmu.pae_root[i],
3103                                                     i << 30,
3104                                                     2);
3105 }
3106
3107 static int count_rmaps(struct kvm_vcpu *vcpu)
3108 {
3109         int nmaps = 0;
3110         int i, j, k;
3111
3112         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3113                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
3114                 struct kvm_rmap_desc *d;
3115
3116                 for (j = 0; j < m->npages; ++j) {
3117                         unsigned long *rmapp = &m->rmap[j];
3118
3119                         if (!*rmapp)
3120                                 continue;
3121                         if (!(*rmapp & 1)) {
3122                                 ++nmaps;
3123                                 continue;
3124                         }
3125                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3126                         while (d) {
3127                                 for (k = 0; k < RMAP_EXT; ++k)
3128                                         if (d->sptes[k])
3129                                                 ++nmaps;
3130                                         else
3131                                                 break;
3132                                 d = d->more;
3133                         }
3134                 }
3135         }
3136         return nmaps;
3137 }
3138
3139 static int count_writable_mappings(struct kvm_vcpu *vcpu)
3140 {
3141         int nmaps = 0;
3142         struct kvm_mmu_page *sp;
3143         int i;
3144
3145         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3146                 u64 *pt = sp->spt;
3147
3148                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3149                         continue;
3150
3151                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3152                         u64 ent = pt[i];
3153
3154                         if (!(ent & PT_PRESENT_MASK))
3155                                 continue;
3156                         if (!(ent & PT_WRITABLE_MASK))
3157                                 continue;
3158                         ++nmaps;
3159                 }
3160         }
3161         return nmaps;
3162 }
3163
3164 static void audit_rmap(struct kvm_vcpu *vcpu)
3165 {
3166         int n_rmap = count_rmaps(vcpu);
3167         int n_actual = count_writable_mappings(vcpu);
3168
3169         if (n_rmap != n_actual)
3170                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
3171                        __func__, audit_msg, n_rmap, n_actual);
3172 }
3173
3174 static void audit_write_protection(struct kvm_vcpu *vcpu)
3175 {
3176         struct kvm_mmu_page *sp;
3177         struct kvm_memory_slot *slot;
3178         unsigned long *rmapp;
3179         gfn_t gfn;
3180
3181         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3182                 if (sp->role.direct)
3183                         continue;
3184
3185                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3186                 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3187                 rmapp = &slot->rmap[gfn - slot->base_gfn];
3188                 if (*rmapp)
3189                         printk(KERN_ERR "%s: (%s) shadow page has writable"
3190                                " mappings: gfn %lx role %x\n",
3191                                __func__, audit_msg, sp->gfn,
3192                                sp->role.word);
3193         }
3194 }
3195
3196 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3197 {
3198         int olddbg = dbg;
3199
3200         dbg = 0;
3201         audit_msg = msg;
3202         audit_rmap(vcpu);
3203         audit_write_protection(vcpu);
3204         audit_mappings(vcpu);
3205         dbg = olddbg;
3206 }
3207
3208 #endif