KVM: MMU audit: audit_mappings tweaks
[safe/jmp/linux-2.6] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31 #include <linux/compiler.h>
32
33 #include <asm/page.h>
34 #include <asm/cmpxchg.h>
35 #include <asm/io.h>
36 #include <asm/vmx.h>
37
38 /*
39  * When setting this variable to true it enables Two-Dimensional-Paging
40  * where the hardware walks 2 page tables:
41  * 1. the guest-virtual to guest-physical
42  * 2. while doing 1. it walks guest-physical to host-physical
43  * If the hardware supports that we don't need to do shadow paging.
44  */
45 bool tdp_enabled = false;
46
47 #undef MMU_DEBUG
48
49 #undef AUDIT
50
51 #ifdef AUDIT
52 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
53 #else
54 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
55 #endif
56
57 #ifdef MMU_DEBUG
58
59 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
60 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
61
62 #else
63
64 #define pgprintk(x...) do { } while (0)
65 #define rmap_printk(x...) do { } while (0)
66
67 #endif
68
69 #if defined(MMU_DEBUG) || defined(AUDIT)
70 static int dbg = 0;
71 module_param(dbg, bool, 0644);
72 #endif
73
74 static int oos_shadow = 1;
75 module_param(oos_shadow, bool, 0644);
76
77 #ifndef MMU_DEBUG
78 #define ASSERT(x) do { } while (0)
79 #else
80 #define ASSERT(x)                                                       \
81         if (!(x)) {                                                     \
82                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
83                        __FILE__, __LINE__, #x);                         \
84         }
85 #endif
86
87 #define PT_FIRST_AVAIL_BITS_SHIFT 9
88 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
89
90 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
91
92 #define PT64_LEVEL_BITS 9
93
94 #define PT64_LEVEL_SHIFT(level) \
95                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
96
97 #define PT64_LEVEL_MASK(level) \
98                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
99
100 #define PT64_INDEX(address, level)\
101         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
102
103
104 #define PT32_LEVEL_BITS 10
105
106 #define PT32_LEVEL_SHIFT(level) \
107                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
108
109 #define PT32_LEVEL_MASK(level) \
110                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
111
112 #define PT32_INDEX(address, level)\
113         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
114
115
116 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
117 #define PT64_DIR_BASE_ADDR_MASK \
118         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
119
120 #define PT32_BASE_ADDR_MASK PAGE_MASK
121 #define PT32_DIR_BASE_ADDR_MASK \
122         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
123
124 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
125                         | PT64_NX_MASK)
126
127 #define PFERR_PRESENT_MASK (1U << 0)
128 #define PFERR_WRITE_MASK (1U << 1)
129 #define PFERR_USER_MASK (1U << 2)
130 #define PFERR_RSVD_MASK (1U << 3)
131 #define PFERR_FETCH_MASK (1U << 4)
132
133 #define PT_DIRECTORY_LEVEL 2
134 #define PT_PAGE_TABLE_LEVEL 1
135
136 #define RMAP_EXT 4
137
138 #define ACC_EXEC_MASK    1
139 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
140 #define ACC_USER_MASK    PT_USER_MASK
141 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
142
143 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
144
145 struct kvm_rmap_desc {
146         u64 *sptes[RMAP_EXT];
147         struct kvm_rmap_desc *more;
148 };
149
150 struct kvm_shadow_walk_iterator {
151         u64 addr;
152         hpa_t shadow_addr;
153         int level;
154         u64 *sptep;
155         unsigned index;
156 };
157
158 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
159         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
160              shadow_walk_okay(&(_walker));                      \
161              shadow_walk_next(&(_walker)))
162
163
164 struct kvm_unsync_walk {
165         int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
166 };
167
168 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
169
170 static struct kmem_cache *pte_chain_cache;
171 static struct kmem_cache *rmap_desc_cache;
172 static struct kmem_cache *mmu_page_header_cache;
173
174 static u64 __read_mostly shadow_trap_nonpresent_pte;
175 static u64 __read_mostly shadow_notrap_nonpresent_pte;
176 static u64 __read_mostly shadow_base_present_pte;
177 static u64 __read_mostly shadow_nx_mask;
178 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
179 static u64 __read_mostly shadow_user_mask;
180 static u64 __read_mostly shadow_accessed_mask;
181 static u64 __read_mostly shadow_dirty_mask;
182
183 static inline u64 rsvd_bits(int s, int e)
184 {
185         return ((1ULL << (e - s + 1)) - 1) << s;
186 }
187
188 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
189 {
190         shadow_trap_nonpresent_pte = trap_pte;
191         shadow_notrap_nonpresent_pte = notrap_pte;
192 }
193 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
194
195 void kvm_mmu_set_base_ptes(u64 base_pte)
196 {
197         shadow_base_present_pte = base_pte;
198 }
199 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
200
201 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
202                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
203 {
204         shadow_user_mask = user_mask;
205         shadow_accessed_mask = accessed_mask;
206         shadow_dirty_mask = dirty_mask;
207         shadow_nx_mask = nx_mask;
208         shadow_x_mask = x_mask;
209 }
210 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
211
212 static int is_write_protection(struct kvm_vcpu *vcpu)
213 {
214         return vcpu->arch.cr0 & X86_CR0_WP;
215 }
216
217 static int is_cpuid_PSE36(void)
218 {
219         return 1;
220 }
221
222 static int is_nx(struct kvm_vcpu *vcpu)
223 {
224         return vcpu->arch.shadow_efer & EFER_NX;
225 }
226
227 static int is_shadow_present_pte(u64 pte)
228 {
229         return pte != shadow_trap_nonpresent_pte
230                 && pte != shadow_notrap_nonpresent_pte;
231 }
232
233 static int is_large_pte(u64 pte)
234 {
235         return pte & PT_PAGE_SIZE_MASK;
236 }
237
238 static int is_writeble_pte(unsigned long pte)
239 {
240         return pte & PT_WRITABLE_MASK;
241 }
242
243 static int is_dirty_gpte(unsigned long pte)
244 {
245         return pte & PT_DIRTY_MASK;
246 }
247
248 static int is_rmap_spte(u64 pte)
249 {
250         return is_shadow_present_pte(pte);
251 }
252
253 static int is_last_spte(u64 pte, int level)
254 {
255         if (level == PT_PAGE_TABLE_LEVEL)
256                 return 1;
257         if (level == PT_DIRECTORY_LEVEL && is_large_pte(pte))
258                 return 1;
259         return 0;
260 }
261
262 static pfn_t spte_to_pfn(u64 pte)
263 {
264         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
265 }
266
267 static gfn_t pse36_gfn_delta(u32 gpte)
268 {
269         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
270
271         return (gpte & PT32_DIR_PSE36_MASK) << shift;
272 }
273
274 static void __set_spte(u64 *sptep, u64 spte)
275 {
276 #ifdef CONFIG_X86_64
277         set_64bit((unsigned long *)sptep, spte);
278 #else
279         set_64bit((unsigned long long *)sptep, spte);
280 #endif
281 }
282
283 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
284                                   struct kmem_cache *base_cache, int min)
285 {
286         void *obj;
287
288         if (cache->nobjs >= min)
289                 return 0;
290         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
291                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
292                 if (!obj)
293                         return -ENOMEM;
294                 cache->objects[cache->nobjs++] = obj;
295         }
296         return 0;
297 }
298
299 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
300 {
301         while (mc->nobjs)
302                 kfree(mc->objects[--mc->nobjs]);
303 }
304
305 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
306                                        int min)
307 {
308         struct page *page;
309
310         if (cache->nobjs >= min)
311                 return 0;
312         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
313                 page = alloc_page(GFP_KERNEL);
314                 if (!page)
315                         return -ENOMEM;
316                 set_page_private(page, 0);
317                 cache->objects[cache->nobjs++] = page_address(page);
318         }
319         return 0;
320 }
321
322 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
323 {
324         while (mc->nobjs)
325                 free_page((unsigned long)mc->objects[--mc->nobjs]);
326 }
327
328 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
329 {
330         int r;
331
332         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
333                                    pte_chain_cache, 4);
334         if (r)
335                 goto out;
336         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
337                                    rmap_desc_cache, 4);
338         if (r)
339                 goto out;
340         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
341         if (r)
342                 goto out;
343         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
344                                    mmu_page_header_cache, 4);
345 out:
346         return r;
347 }
348
349 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
350 {
351         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
352         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
353         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
354         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
355 }
356
357 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
358                                     size_t size)
359 {
360         void *p;
361
362         BUG_ON(!mc->nobjs);
363         p = mc->objects[--mc->nobjs];
364         return p;
365 }
366
367 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
368 {
369         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
370                                       sizeof(struct kvm_pte_chain));
371 }
372
373 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
374 {
375         kfree(pc);
376 }
377
378 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
379 {
380         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
381                                       sizeof(struct kvm_rmap_desc));
382 }
383
384 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
385 {
386         kfree(rd);
387 }
388
389 /*
390  * Return the pointer to the largepage write count for a given
391  * gfn, handling slots that are not large page aligned.
392  */
393 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
394 {
395         unsigned long idx;
396
397         idx = (gfn / KVM_PAGES_PER_HPAGE) -
398               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
399         return &slot->lpage_info[idx].write_count;
400 }
401
402 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
403 {
404         int *write_count;
405
406         gfn = unalias_gfn(kvm, gfn);
407         write_count = slot_largepage_idx(gfn,
408                                          gfn_to_memslot_unaliased(kvm, gfn));
409         *write_count += 1;
410 }
411
412 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
413 {
414         int *write_count;
415
416         gfn = unalias_gfn(kvm, gfn);
417         write_count = slot_largepage_idx(gfn,
418                                          gfn_to_memslot_unaliased(kvm, gfn));
419         *write_count -= 1;
420         WARN_ON(*write_count < 0);
421 }
422
423 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
424 {
425         struct kvm_memory_slot *slot;
426         int *largepage_idx;
427
428         gfn = unalias_gfn(kvm, gfn);
429         slot = gfn_to_memslot_unaliased(kvm, gfn);
430         if (slot) {
431                 largepage_idx = slot_largepage_idx(gfn, slot);
432                 return *largepage_idx;
433         }
434
435         return 1;
436 }
437
438 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
439 {
440         struct vm_area_struct *vma;
441         unsigned long addr;
442         int ret = 0;
443
444         addr = gfn_to_hva(kvm, gfn);
445         if (kvm_is_error_hva(addr))
446                 return ret;
447
448         down_read(&current->mm->mmap_sem);
449         vma = find_vma(current->mm, addr);
450         if (vma && is_vm_hugetlb_page(vma))
451                 ret = 1;
452         up_read(&current->mm->mmap_sem);
453
454         return ret;
455 }
456
457 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
458 {
459         struct kvm_memory_slot *slot;
460
461         if (has_wrprotected_page(vcpu->kvm, large_gfn))
462                 return 0;
463
464         if (!host_largepage_backed(vcpu->kvm, large_gfn))
465                 return 0;
466
467         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
468         if (slot && slot->dirty_bitmap)
469                 return 0;
470
471         return 1;
472 }
473
474 /*
475  * Take gfn and return the reverse mapping to it.
476  * Note: gfn must be unaliased before this function get called
477  */
478
479 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
480 {
481         struct kvm_memory_slot *slot;
482         unsigned long idx;
483
484         slot = gfn_to_memslot(kvm, gfn);
485         if (!lpage)
486                 return &slot->rmap[gfn - slot->base_gfn];
487
488         idx = (gfn / KVM_PAGES_PER_HPAGE) -
489               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
490
491         return &slot->lpage_info[idx].rmap_pde;
492 }
493
494 /*
495  * Reverse mapping data structures:
496  *
497  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
498  * that points to page_address(page).
499  *
500  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
501  * containing more mappings.
502  *
503  * Returns the number of rmap entries before the spte was added or zero if
504  * the spte was not added.
505  *
506  */
507 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
508 {
509         struct kvm_mmu_page *sp;
510         struct kvm_rmap_desc *desc;
511         unsigned long *rmapp;
512         int i, count = 0;
513
514         if (!is_rmap_spte(*spte))
515                 return count;
516         gfn = unalias_gfn(vcpu->kvm, gfn);
517         sp = page_header(__pa(spte));
518         sp->gfns[spte - sp->spt] = gfn;
519         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
520         if (!*rmapp) {
521                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
522                 *rmapp = (unsigned long)spte;
523         } else if (!(*rmapp & 1)) {
524                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
525                 desc = mmu_alloc_rmap_desc(vcpu);
526                 desc->sptes[0] = (u64 *)*rmapp;
527                 desc->sptes[1] = spte;
528                 *rmapp = (unsigned long)desc | 1;
529         } else {
530                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
531                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
532                 while (desc->sptes[RMAP_EXT-1] && desc->more) {
533                         desc = desc->more;
534                         count += RMAP_EXT;
535                 }
536                 if (desc->sptes[RMAP_EXT-1]) {
537                         desc->more = mmu_alloc_rmap_desc(vcpu);
538                         desc = desc->more;
539                 }
540                 for (i = 0; desc->sptes[i]; ++i)
541                         ;
542                 desc->sptes[i] = spte;
543         }
544         return count;
545 }
546
547 static void rmap_desc_remove_entry(unsigned long *rmapp,
548                                    struct kvm_rmap_desc *desc,
549                                    int i,
550                                    struct kvm_rmap_desc *prev_desc)
551 {
552         int j;
553
554         for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
555                 ;
556         desc->sptes[i] = desc->sptes[j];
557         desc->sptes[j] = NULL;
558         if (j != 0)
559                 return;
560         if (!prev_desc && !desc->more)
561                 *rmapp = (unsigned long)desc->sptes[0];
562         else
563                 if (prev_desc)
564                         prev_desc->more = desc->more;
565                 else
566                         *rmapp = (unsigned long)desc->more | 1;
567         mmu_free_rmap_desc(desc);
568 }
569
570 static void rmap_remove(struct kvm *kvm, u64 *spte)
571 {
572         struct kvm_rmap_desc *desc;
573         struct kvm_rmap_desc *prev_desc;
574         struct kvm_mmu_page *sp;
575         pfn_t pfn;
576         unsigned long *rmapp;
577         int i;
578
579         if (!is_rmap_spte(*spte))
580                 return;
581         sp = page_header(__pa(spte));
582         pfn = spte_to_pfn(*spte);
583         if (*spte & shadow_accessed_mask)
584                 kvm_set_pfn_accessed(pfn);
585         if (is_writeble_pte(*spte))
586                 kvm_release_pfn_dirty(pfn);
587         else
588                 kvm_release_pfn_clean(pfn);
589         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
590         if (!*rmapp) {
591                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
592                 BUG();
593         } else if (!(*rmapp & 1)) {
594                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
595                 if ((u64 *)*rmapp != spte) {
596                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
597                                spte, *spte);
598                         BUG();
599                 }
600                 *rmapp = 0;
601         } else {
602                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
603                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
604                 prev_desc = NULL;
605                 while (desc) {
606                         for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
607                                 if (desc->sptes[i] == spte) {
608                                         rmap_desc_remove_entry(rmapp,
609                                                                desc, i,
610                                                                prev_desc);
611                                         return;
612                                 }
613                         prev_desc = desc;
614                         desc = desc->more;
615                 }
616                 BUG();
617         }
618 }
619
620 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
621 {
622         struct kvm_rmap_desc *desc;
623         struct kvm_rmap_desc *prev_desc;
624         u64 *prev_spte;
625         int i;
626
627         if (!*rmapp)
628                 return NULL;
629         else if (!(*rmapp & 1)) {
630                 if (!spte)
631                         return (u64 *)*rmapp;
632                 return NULL;
633         }
634         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
635         prev_desc = NULL;
636         prev_spte = NULL;
637         while (desc) {
638                 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
639                         if (prev_spte == spte)
640                                 return desc->sptes[i];
641                         prev_spte = desc->sptes[i];
642                 }
643                 desc = desc->more;
644         }
645         return NULL;
646 }
647
648 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
649 {
650         unsigned long *rmapp;
651         u64 *spte;
652         int write_protected = 0;
653
654         gfn = unalias_gfn(kvm, gfn);
655         rmapp = gfn_to_rmap(kvm, gfn, 0);
656
657         spte = rmap_next(kvm, rmapp, NULL);
658         while (spte) {
659                 BUG_ON(!spte);
660                 BUG_ON(!(*spte & PT_PRESENT_MASK));
661                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
662                 if (is_writeble_pte(*spte)) {
663                         __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
664                         write_protected = 1;
665                 }
666                 spte = rmap_next(kvm, rmapp, spte);
667         }
668         if (write_protected) {
669                 pfn_t pfn;
670
671                 spte = rmap_next(kvm, rmapp, NULL);
672                 pfn = spte_to_pfn(*spte);
673                 kvm_set_pfn_dirty(pfn);
674         }
675
676         /* check for huge page mappings */
677         rmapp = gfn_to_rmap(kvm, gfn, 1);
678         spte = rmap_next(kvm, rmapp, NULL);
679         while (spte) {
680                 BUG_ON(!spte);
681                 BUG_ON(!(*spte & PT_PRESENT_MASK));
682                 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
683                 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
684                 if (is_writeble_pte(*spte)) {
685                         rmap_remove(kvm, spte);
686                         --kvm->stat.lpages;
687                         __set_spte(spte, shadow_trap_nonpresent_pte);
688                         spte = NULL;
689                         write_protected = 1;
690                 }
691                 spte = rmap_next(kvm, rmapp, spte);
692         }
693
694         return write_protected;
695 }
696
697 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
698 {
699         u64 *spte;
700         int need_tlb_flush = 0;
701
702         while ((spte = rmap_next(kvm, rmapp, NULL))) {
703                 BUG_ON(!(*spte & PT_PRESENT_MASK));
704                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
705                 rmap_remove(kvm, spte);
706                 __set_spte(spte, shadow_trap_nonpresent_pte);
707                 need_tlb_flush = 1;
708         }
709         return need_tlb_flush;
710 }
711
712 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
713                           int (*handler)(struct kvm *kvm, unsigned long *rmapp))
714 {
715         int i;
716         int retval = 0;
717
718         /*
719          * If mmap_sem isn't taken, we can look the memslots with only
720          * the mmu_lock by skipping over the slots with userspace_addr == 0.
721          */
722         for (i = 0; i < kvm->nmemslots; i++) {
723                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
724                 unsigned long start = memslot->userspace_addr;
725                 unsigned long end;
726
727                 /* mmu_lock protects userspace_addr */
728                 if (!start)
729                         continue;
730
731                 end = start + (memslot->npages << PAGE_SHIFT);
732                 if (hva >= start && hva < end) {
733                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
734                         retval |= handler(kvm, &memslot->rmap[gfn_offset]);
735                         retval |= handler(kvm,
736                                           &memslot->lpage_info[
737                                                   gfn_offset /
738                                                   KVM_PAGES_PER_HPAGE].rmap_pde);
739                 }
740         }
741
742         return retval;
743 }
744
745 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
746 {
747         return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
748 }
749
750 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
751 {
752         u64 *spte;
753         int young = 0;
754
755         /* always return old for EPT */
756         if (!shadow_accessed_mask)
757                 return 0;
758
759         spte = rmap_next(kvm, rmapp, NULL);
760         while (spte) {
761                 int _young;
762                 u64 _spte = *spte;
763                 BUG_ON(!(_spte & PT_PRESENT_MASK));
764                 _young = _spte & PT_ACCESSED_MASK;
765                 if (_young) {
766                         young = 1;
767                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
768                 }
769                 spte = rmap_next(kvm, rmapp, spte);
770         }
771         return young;
772 }
773
774 #define RMAP_RECYCLE_THRESHOLD 1000
775
776 static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage)
777 {
778         unsigned long *rmapp;
779
780         gfn = unalias_gfn(vcpu->kvm, gfn);
781         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
782
783         kvm_unmap_rmapp(vcpu->kvm, rmapp);
784         kvm_flush_remote_tlbs(vcpu->kvm);
785 }
786
787 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
788 {
789         return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
790 }
791
792 #ifdef MMU_DEBUG
793 static int is_empty_shadow_page(u64 *spt)
794 {
795         u64 *pos;
796         u64 *end;
797
798         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
799                 if (is_shadow_present_pte(*pos)) {
800                         printk(KERN_ERR "%s: %p %llx\n", __func__,
801                                pos, *pos);
802                         return 0;
803                 }
804         return 1;
805 }
806 #endif
807
808 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
809 {
810         ASSERT(is_empty_shadow_page(sp->spt));
811         list_del(&sp->link);
812         __free_page(virt_to_page(sp->spt));
813         __free_page(virt_to_page(sp->gfns));
814         kfree(sp);
815         ++kvm->arch.n_free_mmu_pages;
816 }
817
818 static unsigned kvm_page_table_hashfn(gfn_t gfn)
819 {
820         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
821 }
822
823 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
824                                                u64 *parent_pte)
825 {
826         struct kvm_mmu_page *sp;
827
828         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
829         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
830         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
831         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
832         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
833         INIT_LIST_HEAD(&sp->oos_link);
834         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
835         sp->multimapped = 0;
836         sp->parent_pte = parent_pte;
837         --vcpu->kvm->arch.n_free_mmu_pages;
838         return sp;
839 }
840
841 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
842                                     struct kvm_mmu_page *sp, u64 *parent_pte)
843 {
844         struct kvm_pte_chain *pte_chain;
845         struct hlist_node *node;
846         int i;
847
848         if (!parent_pte)
849                 return;
850         if (!sp->multimapped) {
851                 u64 *old = sp->parent_pte;
852
853                 if (!old) {
854                         sp->parent_pte = parent_pte;
855                         return;
856                 }
857                 sp->multimapped = 1;
858                 pte_chain = mmu_alloc_pte_chain(vcpu);
859                 INIT_HLIST_HEAD(&sp->parent_ptes);
860                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
861                 pte_chain->parent_ptes[0] = old;
862         }
863         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
864                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
865                         continue;
866                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
867                         if (!pte_chain->parent_ptes[i]) {
868                                 pte_chain->parent_ptes[i] = parent_pte;
869                                 return;
870                         }
871         }
872         pte_chain = mmu_alloc_pte_chain(vcpu);
873         BUG_ON(!pte_chain);
874         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
875         pte_chain->parent_ptes[0] = parent_pte;
876 }
877
878 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
879                                        u64 *parent_pte)
880 {
881         struct kvm_pte_chain *pte_chain;
882         struct hlist_node *node;
883         int i;
884
885         if (!sp->multimapped) {
886                 BUG_ON(sp->parent_pte != parent_pte);
887                 sp->parent_pte = NULL;
888                 return;
889         }
890         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
891                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
892                         if (!pte_chain->parent_ptes[i])
893                                 break;
894                         if (pte_chain->parent_ptes[i] != parent_pte)
895                                 continue;
896                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
897                                 && pte_chain->parent_ptes[i + 1]) {
898                                 pte_chain->parent_ptes[i]
899                                         = pte_chain->parent_ptes[i + 1];
900                                 ++i;
901                         }
902                         pte_chain->parent_ptes[i] = NULL;
903                         if (i == 0) {
904                                 hlist_del(&pte_chain->link);
905                                 mmu_free_pte_chain(pte_chain);
906                                 if (hlist_empty(&sp->parent_ptes)) {
907                                         sp->multimapped = 0;
908                                         sp->parent_pte = NULL;
909                                 }
910                         }
911                         return;
912                 }
913         BUG();
914 }
915
916
917 static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
918                             mmu_parent_walk_fn fn)
919 {
920         struct kvm_pte_chain *pte_chain;
921         struct hlist_node *node;
922         struct kvm_mmu_page *parent_sp;
923         int i;
924
925         if (!sp->multimapped && sp->parent_pte) {
926                 parent_sp = page_header(__pa(sp->parent_pte));
927                 fn(vcpu, parent_sp);
928                 mmu_parent_walk(vcpu, parent_sp, fn);
929                 return;
930         }
931         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
932                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
933                         if (!pte_chain->parent_ptes[i])
934                                 break;
935                         parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
936                         fn(vcpu, parent_sp);
937                         mmu_parent_walk(vcpu, parent_sp, fn);
938                 }
939 }
940
941 static void kvm_mmu_update_unsync_bitmap(u64 *spte)
942 {
943         unsigned int index;
944         struct kvm_mmu_page *sp = page_header(__pa(spte));
945
946         index = spte - sp->spt;
947         if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
948                 sp->unsync_children++;
949         WARN_ON(!sp->unsync_children);
950 }
951
952 static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
953 {
954         struct kvm_pte_chain *pte_chain;
955         struct hlist_node *node;
956         int i;
957
958         if (!sp->parent_pte)
959                 return;
960
961         if (!sp->multimapped) {
962                 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
963                 return;
964         }
965
966         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
967                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
968                         if (!pte_chain->parent_ptes[i])
969                                 break;
970                         kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
971                 }
972 }
973
974 static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
975 {
976         kvm_mmu_update_parents_unsync(sp);
977         return 1;
978 }
979
980 static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
981                                         struct kvm_mmu_page *sp)
982 {
983         mmu_parent_walk(vcpu, sp, unsync_walk_fn);
984         kvm_mmu_update_parents_unsync(sp);
985 }
986
987 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
988                                     struct kvm_mmu_page *sp)
989 {
990         int i;
991
992         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
993                 sp->spt[i] = shadow_trap_nonpresent_pte;
994 }
995
996 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
997                                struct kvm_mmu_page *sp)
998 {
999         return 1;
1000 }
1001
1002 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1003 {
1004 }
1005
1006 #define KVM_PAGE_ARRAY_NR 16
1007
1008 struct kvm_mmu_pages {
1009         struct mmu_page_and_offset {
1010                 struct kvm_mmu_page *sp;
1011                 unsigned int idx;
1012         } page[KVM_PAGE_ARRAY_NR];
1013         unsigned int nr;
1014 };
1015
1016 #define for_each_unsync_children(bitmap, idx)           \
1017         for (idx = find_first_bit(bitmap, 512);         \
1018              idx < 512;                                 \
1019              idx = find_next_bit(bitmap, 512, idx+1))
1020
1021 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1022                          int idx)
1023 {
1024         int i;
1025
1026         if (sp->unsync)
1027                 for (i=0; i < pvec->nr; i++)
1028                         if (pvec->page[i].sp == sp)
1029                                 return 0;
1030
1031         pvec->page[pvec->nr].sp = sp;
1032         pvec->page[pvec->nr].idx = idx;
1033         pvec->nr++;
1034         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1035 }
1036
1037 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1038                            struct kvm_mmu_pages *pvec)
1039 {
1040         int i, ret, nr_unsync_leaf = 0;
1041
1042         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1043                 u64 ent = sp->spt[i];
1044
1045                 if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
1046                         struct kvm_mmu_page *child;
1047                         child = page_header(ent & PT64_BASE_ADDR_MASK);
1048
1049                         if (child->unsync_children) {
1050                                 if (mmu_pages_add(pvec, child, i))
1051                                         return -ENOSPC;
1052
1053                                 ret = __mmu_unsync_walk(child, pvec);
1054                                 if (!ret)
1055                                         __clear_bit(i, sp->unsync_child_bitmap);
1056                                 else if (ret > 0)
1057                                         nr_unsync_leaf += ret;
1058                                 else
1059                                         return ret;
1060                         }
1061
1062                         if (child->unsync) {
1063                                 nr_unsync_leaf++;
1064                                 if (mmu_pages_add(pvec, child, i))
1065                                         return -ENOSPC;
1066                         }
1067                 }
1068         }
1069
1070         if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
1071                 sp->unsync_children = 0;
1072
1073         return nr_unsync_leaf;
1074 }
1075
1076 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1077                            struct kvm_mmu_pages *pvec)
1078 {
1079         if (!sp->unsync_children)
1080                 return 0;
1081
1082         mmu_pages_add(pvec, sp, 0);
1083         return __mmu_unsync_walk(sp, pvec);
1084 }
1085
1086 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1087 {
1088         unsigned index;
1089         struct hlist_head *bucket;
1090         struct kvm_mmu_page *sp;
1091         struct hlist_node *node;
1092
1093         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1094         index = kvm_page_table_hashfn(gfn);
1095         bucket = &kvm->arch.mmu_page_hash[index];
1096         hlist_for_each_entry(sp, node, bucket, hash_link)
1097                 if (sp->gfn == gfn && !sp->role.direct
1098                     && !sp->role.invalid) {
1099                         pgprintk("%s: found role %x\n",
1100                                  __func__, sp->role.word);
1101                         return sp;
1102                 }
1103         return NULL;
1104 }
1105
1106 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1107 {
1108         WARN_ON(!sp->unsync);
1109         sp->unsync = 0;
1110         --kvm->stat.mmu_unsync;
1111 }
1112
1113 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1114
1115 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1116 {
1117         if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1118                 kvm_mmu_zap_page(vcpu->kvm, sp);
1119                 return 1;
1120         }
1121
1122         if (rmap_write_protect(vcpu->kvm, sp->gfn))
1123                 kvm_flush_remote_tlbs(vcpu->kvm);
1124         kvm_unlink_unsync_page(vcpu->kvm, sp);
1125         if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1126                 kvm_mmu_zap_page(vcpu->kvm, sp);
1127                 return 1;
1128         }
1129
1130         kvm_mmu_flush_tlb(vcpu);
1131         return 0;
1132 }
1133
1134 struct mmu_page_path {
1135         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1136         unsigned int idx[PT64_ROOT_LEVEL-1];
1137 };
1138
1139 #define for_each_sp(pvec, sp, parents, i)                       \
1140                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1141                         sp = pvec.page[i].sp;                   \
1142                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1143                         i = mmu_pages_next(&pvec, &parents, i))
1144
1145 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1146                           struct mmu_page_path *parents,
1147                           int i)
1148 {
1149         int n;
1150
1151         for (n = i+1; n < pvec->nr; n++) {
1152                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1153
1154                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1155                         parents->idx[0] = pvec->page[n].idx;
1156                         return n;
1157                 }
1158
1159                 parents->parent[sp->role.level-2] = sp;
1160                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1161         }
1162
1163         return n;
1164 }
1165
1166 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1167 {
1168         struct kvm_mmu_page *sp;
1169         unsigned int level = 0;
1170
1171         do {
1172                 unsigned int idx = parents->idx[level];
1173
1174                 sp = parents->parent[level];
1175                 if (!sp)
1176                         return;
1177
1178                 --sp->unsync_children;
1179                 WARN_ON((int)sp->unsync_children < 0);
1180                 __clear_bit(idx, sp->unsync_child_bitmap);
1181                 level++;
1182         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1183 }
1184
1185 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1186                                struct mmu_page_path *parents,
1187                                struct kvm_mmu_pages *pvec)
1188 {
1189         parents->parent[parent->role.level-1] = NULL;
1190         pvec->nr = 0;
1191 }
1192
1193 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1194                               struct kvm_mmu_page *parent)
1195 {
1196         int i;
1197         struct kvm_mmu_page *sp;
1198         struct mmu_page_path parents;
1199         struct kvm_mmu_pages pages;
1200
1201         kvm_mmu_pages_init(parent, &parents, &pages);
1202         while (mmu_unsync_walk(parent, &pages)) {
1203                 int protected = 0;
1204
1205                 for_each_sp(pages, sp, parents, i)
1206                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1207
1208                 if (protected)
1209                         kvm_flush_remote_tlbs(vcpu->kvm);
1210
1211                 for_each_sp(pages, sp, parents, i) {
1212                         kvm_sync_page(vcpu, sp);
1213                         mmu_pages_clear_parents(&parents);
1214                 }
1215                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1216                 kvm_mmu_pages_init(parent, &parents, &pages);
1217         }
1218 }
1219
1220 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1221                                              gfn_t gfn,
1222                                              gva_t gaddr,
1223                                              unsigned level,
1224                                              int direct,
1225                                              unsigned access,
1226                                              u64 *parent_pte)
1227 {
1228         union kvm_mmu_page_role role;
1229         unsigned index;
1230         unsigned quadrant;
1231         struct hlist_head *bucket;
1232         struct kvm_mmu_page *sp;
1233         struct hlist_node *node, *tmp;
1234
1235         role = vcpu->arch.mmu.base_role;
1236         role.level = level;
1237         role.direct = direct;
1238         role.access = access;
1239         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1240                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1241                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1242                 role.quadrant = quadrant;
1243         }
1244         pgprintk("%s: looking gfn %lx role %x\n", __func__,
1245                  gfn, role.word);
1246         index = kvm_page_table_hashfn(gfn);
1247         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1248         hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1249                 if (sp->gfn == gfn) {
1250                         if (sp->unsync)
1251                                 if (kvm_sync_page(vcpu, sp))
1252                                         continue;
1253
1254                         if (sp->role.word != role.word)
1255                                 continue;
1256
1257                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1258                         if (sp->unsync_children) {
1259                                 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1260                                 kvm_mmu_mark_parents_unsync(vcpu, sp);
1261                         }
1262                         pgprintk("%s: found\n", __func__);
1263                         return sp;
1264                 }
1265         ++vcpu->kvm->stat.mmu_cache_miss;
1266         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1267         if (!sp)
1268                 return sp;
1269         pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1270         sp->gfn = gfn;
1271         sp->role = role;
1272         hlist_add_head(&sp->hash_link, bucket);
1273         if (!direct) {
1274                 if (rmap_write_protect(vcpu->kvm, gfn))
1275                         kvm_flush_remote_tlbs(vcpu->kvm);
1276                 account_shadowed(vcpu->kvm, gfn);
1277         }
1278         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1279                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1280         else
1281                 nonpaging_prefetch_page(vcpu, sp);
1282         return sp;
1283 }
1284
1285 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1286                              struct kvm_vcpu *vcpu, u64 addr)
1287 {
1288         iterator->addr = addr;
1289         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1290         iterator->level = vcpu->arch.mmu.shadow_root_level;
1291         if (iterator->level == PT32E_ROOT_LEVEL) {
1292                 iterator->shadow_addr
1293                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1294                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1295                 --iterator->level;
1296                 if (!iterator->shadow_addr)
1297                         iterator->level = 0;
1298         }
1299 }
1300
1301 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1302 {
1303         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1304                 return false;
1305         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1306         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1307         return true;
1308 }
1309
1310 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1311 {
1312         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1313         --iterator->level;
1314 }
1315
1316 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1317                                          struct kvm_mmu_page *sp)
1318 {
1319         unsigned i;
1320         u64 *pt;
1321         u64 ent;
1322
1323         pt = sp->spt;
1324
1325         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1326                 ent = pt[i];
1327
1328                 if (is_shadow_present_pte(ent)) {
1329                         if (!is_last_spte(ent, sp->role.level)) {
1330                                 ent &= PT64_BASE_ADDR_MASK;
1331                                 mmu_page_remove_parent_pte(page_header(ent),
1332                                                            &pt[i]);
1333                         } else {
1334                                 if (is_large_pte(ent))
1335                                         --kvm->stat.lpages;
1336                                 rmap_remove(kvm, &pt[i]);
1337                         }
1338                 }
1339                 pt[i] = shadow_trap_nonpresent_pte;
1340         }
1341 }
1342
1343 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1344 {
1345         mmu_page_remove_parent_pte(sp, parent_pte);
1346 }
1347
1348 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1349 {
1350         int i;
1351         struct kvm_vcpu *vcpu;
1352
1353         kvm_for_each_vcpu(i, vcpu, kvm)
1354                 vcpu->arch.last_pte_updated = NULL;
1355 }
1356
1357 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1358 {
1359         u64 *parent_pte;
1360
1361         while (sp->multimapped || sp->parent_pte) {
1362                 if (!sp->multimapped)
1363                         parent_pte = sp->parent_pte;
1364                 else {
1365                         struct kvm_pte_chain *chain;
1366
1367                         chain = container_of(sp->parent_ptes.first,
1368                                              struct kvm_pte_chain, link);
1369                         parent_pte = chain->parent_ptes[0];
1370                 }
1371                 BUG_ON(!parent_pte);
1372                 kvm_mmu_put_page(sp, parent_pte);
1373                 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1374         }
1375 }
1376
1377 static int mmu_zap_unsync_children(struct kvm *kvm,
1378                                    struct kvm_mmu_page *parent)
1379 {
1380         int i, zapped = 0;
1381         struct mmu_page_path parents;
1382         struct kvm_mmu_pages pages;
1383
1384         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1385                 return 0;
1386
1387         kvm_mmu_pages_init(parent, &parents, &pages);
1388         while (mmu_unsync_walk(parent, &pages)) {
1389                 struct kvm_mmu_page *sp;
1390
1391                 for_each_sp(pages, sp, parents, i) {
1392                         kvm_mmu_zap_page(kvm, sp);
1393                         mmu_pages_clear_parents(&parents);
1394                 }
1395                 zapped += pages.nr;
1396                 kvm_mmu_pages_init(parent, &parents, &pages);
1397         }
1398
1399         return zapped;
1400 }
1401
1402 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1403 {
1404         int ret;
1405         ++kvm->stat.mmu_shadow_zapped;
1406         ret = mmu_zap_unsync_children(kvm, sp);
1407         kvm_mmu_page_unlink_children(kvm, sp);
1408         kvm_mmu_unlink_parents(kvm, sp);
1409         kvm_flush_remote_tlbs(kvm);
1410         if (!sp->role.invalid && !sp->role.direct)
1411                 unaccount_shadowed(kvm, sp->gfn);
1412         if (sp->unsync)
1413                 kvm_unlink_unsync_page(kvm, sp);
1414         if (!sp->root_count) {
1415                 hlist_del(&sp->hash_link);
1416                 kvm_mmu_free_page(kvm, sp);
1417         } else {
1418                 sp->role.invalid = 1;
1419                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1420                 kvm_reload_remote_mmus(kvm);
1421         }
1422         kvm_mmu_reset_last_pte_updated(kvm);
1423         return ret;
1424 }
1425
1426 /*
1427  * Changing the number of mmu pages allocated to the vm
1428  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1429  */
1430 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1431 {
1432         int used_pages;
1433
1434         used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
1435         used_pages = max(0, used_pages);
1436
1437         /*
1438          * If we set the number of mmu pages to be smaller be than the
1439          * number of actived pages , we must to free some mmu pages before we
1440          * change the value
1441          */
1442
1443         if (used_pages > kvm_nr_mmu_pages) {
1444                 while (used_pages > kvm_nr_mmu_pages) {
1445                         struct kvm_mmu_page *page;
1446
1447                         page = container_of(kvm->arch.active_mmu_pages.prev,
1448                                             struct kvm_mmu_page, link);
1449                         kvm_mmu_zap_page(kvm, page);
1450                         used_pages--;
1451                 }
1452                 kvm->arch.n_free_mmu_pages = 0;
1453         }
1454         else
1455                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1456                                          - kvm->arch.n_alloc_mmu_pages;
1457
1458         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1459 }
1460
1461 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1462 {
1463         unsigned index;
1464         struct hlist_head *bucket;
1465         struct kvm_mmu_page *sp;
1466         struct hlist_node *node, *n;
1467         int r;
1468
1469         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1470         r = 0;
1471         index = kvm_page_table_hashfn(gfn);
1472         bucket = &kvm->arch.mmu_page_hash[index];
1473         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1474                 if (sp->gfn == gfn && !sp->role.direct) {
1475                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1476                                  sp->role.word);
1477                         r = 1;
1478                         if (kvm_mmu_zap_page(kvm, sp))
1479                                 n = bucket->first;
1480                 }
1481         return r;
1482 }
1483
1484 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1485 {
1486         unsigned index;
1487         struct hlist_head *bucket;
1488         struct kvm_mmu_page *sp;
1489         struct hlist_node *node, *nn;
1490
1491         index = kvm_page_table_hashfn(gfn);
1492         bucket = &kvm->arch.mmu_page_hash[index];
1493         hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1494                 if (sp->gfn == gfn && !sp->role.direct
1495                     && !sp->role.invalid) {
1496                         pgprintk("%s: zap %lx %x\n",
1497                                  __func__, gfn, sp->role.word);
1498                         kvm_mmu_zap_page(kvm, sp);
1499                 }
1500         }
1501 }
1502
1503 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1504 {
1505         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1506         struct kvm_mmu_page *sp = page_header(__pa(pte));
1507
1508         __set_bit(slot, sp->slot_bitmap);
1509 }
1510
1511 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1512 {
1513         int i;
1514         u64 *pt = sp->spt;
1515
1516         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1517                 return;
1518
1519         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1520                 if (pt[i] == shadow_notrap_nonpresent_pte)
1521                         __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1522         }
1523 }
1524
1525 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1526 {
1527         struct page *page;
1528
1529         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1530
1531         if (gpa == UNMAPPED_GVA)
1532                 return NULL;
1533
1534         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1535
1536         return page;
1537 }
1538
1539 /*
1540  * The function is based on mtrr_type_lookup() in
1541  * arch/x86/kernel/cpu/mtrr/generic.c
1542  */
1543 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1544                          u64 start, u64 end)
1545 {
1546         int i;
1547         u64 base, mask;
1548         u8 prev_match, curr_match;
1549         int num_var_ranges = KVM_NR_VAR_MTRR;
1550
1551         if (!mtrr_state->enabled)
1552                 return 0xFF;
1553
1554         /* Make end inclusive end, instead of exclusive */
1555         end--;
1556
1557         /* Look in fixed ranges. Just return the type as per start */
1558         if (mtrr_state->have_fixed && (start < 0x100000)) {
1559                 int idx;
1560
1561                 if (start < 0x80000) {
1562                         idx = 0;
1563                         idx += (start >> 16);
1564                         return mtrr_state->fixed_ranges[idx];
1565                 } else if (start < 0xC0000) {
1566                         idx = 1 * 8;
1567                         idx += ((start - 0x80000) >> 14);
1568                         return mtrr_state->fixed_ranges[idx];
1569                 } else if (start < 0x1000000) {
1570                         idx = 3 * 8;
1571                         idx += ((start - 0xC0000) >> 12);
1572                         return mtrr_state->fixed_ranges[idx];
1573                 }
1574         }
1575
1576         /*
1577          * Look in variable ranges
1578          * Look of multiple ranges matching this address and pick type
1579          * as per MTRR precedence
1580          */
1581         if (!(mtrr_state->enabled & 2))
1582                 return mtrr_state->def_type;
1583
1584         prev_match = 0xFF;
1585         for (i = 0; i < num_var_ranges; ++i) {
1586                 unsigned short start_state, end_state;
1587
1588                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1589                         continue;
1590
1591                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1592                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1593                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1594                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1595
1596                 start_state = ((start & mask) == (base & mask));
1597                 end_state = ((end & mask) == (base & mask));
1598                 if (start_state != end_state)
1599                         return 0xFE;
1600
1601                 if ((start & mask) != (base & mask))
1602                         continue;
1603
1604                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1605                 if (prev_match == 0xFF) {
1606                         prev_match = curr_match;
1607                         continue;
1608                 }
1609
1610                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1611                     curr_match == MTRR_TYPE_UNCACHABLE)
1612                         return MTRR_TYPE_UNCACHABLE;
1613
1614                 if ((prev_match == MTRR_TYPE_WRBACK &&
1615                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1616                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1617                      curr_match == MTRR_TYPE_WRBACK)) {
1618                         prev_match = MTRR_TYPE_WRTHROUGH;
1619                         curr_match = MTRR_TYPE_WRTHROUGH;
1620                 }
1621
1622                 if (prev_match != curr_match)
1623                         return MTRR_TYPE_UNCACHABLE;
1624         }
1625
1626         if (prev_match != 0xFF)
1627                 return prev_match;
1628
1629         return mtrr_state->def_type;
1630 }
1631
1632 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1633 {
1634         u8 mtrr;
1635
1636         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1637                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1638         if (mtrr == 0xfe || mtrr == 0xff)
1639                 mtrr = MTRR_TYPE_WRBACK;
1640         return mtrr;
1641 }
1642 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1643
1644 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1645 {
1646         unsigned index;
1647         struct hlist_head *bucket;
1648         struct kvm_mmu_page *s;
1649         struct hlist_node *node, *n;
1650
1651         index = kvm_page_table_hashfn(sp->gfn);
1652         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1653         /* don't unsync if pagetable is shadowed with multiple roles */
1654         hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1655                 if (s->gfn != sp->gfn || s->role.direct)
1656                         continue;
1657                 if (s->role.word != sp->role.word)
1658                         return 1;
1659         }
1660         ++vcpu->kvm->stat.mmu_unsync;
1661         sp->unsync = 1;
1662
1663         kvm_mmu_mark_parents_unsync(vcpu, sp);
1664
1665         mmu_convert_notrap(sp);
1666         return 0;
1667 }
1668
1669 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1670                                   bool can_unsync)
1671 {
1672         struct kvm_mmu_page *shadow;
1673
1674         shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1675         if (shadow) {
1676                 if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1677                         return 1;
1678                 if (shadow->unsync)
1679                         return 0;
1680                 if (can_unsync && oos_shadow)
1681                         return kvm_unsync_page(vcpu, shadow);
1682                 return 1;
1683         }
1684         return 0;
1685 }
1686
1687 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1688                     unsigned pte_access, int user_fault,
1689                     int write_fault, int dirty, int largepage,
1690                     gfn_t gfn, pfn_t pfn, bool speculative,
1691                     bool can_unsync)
1692 {
1693         u64 spte;
1694         int ret = 0;
1695
1696         /*
1697          * We don't set the accessed bit, since we sometimes want to see
1698          * whether the guest actually used the pte (in order to detect
1699          * demand paging).
1700          */
1701         spte = shadow_base_present_pte | shadow_dirty_mask;
1702         if (!speculative)
1703                 spte |= shadow_accessed_mask;
1704         if (!dirty)
1705                 pte_access &= ~ACC_WRITE_MASK;
1706         if (pte_access & ACC_EXEC_MASK)
1707                 spte |= shadow_x_mask;
1708         else
1709                 spte |= shadow_nx_mask;
1710         if (pte_access & ACC_USER_MASK)
1711                 spte |= shadow_user_mask;
1712         if (largepage)
1713                 spte |= PT_PAGE_SIZE_MASK;
1714         if (tdp_enabled)
1715                 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1716                         kvm_is_mmio_pfn(pfn));
1717
1718         spte |= (u64)pfn << PAGE_SHIFT;
1719
1720         if ((pte_access & ACC_WRITE_MASK)
1721             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1722
1723                 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1724                         ret = 1;
1725                         spte = shadow_trap_nonpresent_pte;
1726                         goto set_pte;
1727                 }
1728
1729                 spte |= PT_WRITABLE_MASK;
1730
1731                 /*
1732                  * Optimization: for pte sync, if spte was writable the hash
1733                  * lookup is unnecessary (and expensive). Write protection
1734                  * is responsibility of mmu_get_page / kvm_sync_page.
1735                  * Same reasoning can be applied to dirty page accounting.
1736                  */
1737                 if (!can_unsync && is_writeble_pte(*sptep))
1738                         goto set_pte;
1739
1740                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1741                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1742                                  __func__, gfn);
1743                         ret = 1;
1744                         pte_access &= ~ACC_WRITE_MASK;
1745                         if (is_writeble_pte(spte))
1746                                 spte &= ~PT_WRITABLE_MASK;
1747                 }
1748         }
1749
1750         if (pte_access & ACC_WRITE_MASK)
1751                 mark_page_dirty(vcpu->kvm, gfn);
1752
1753 set_pte:
1754         __set_spte(sptep, spte);
1755         return ret;
1756 }
1757
1758 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1759                          unsigned pt_access, unsigned pte_access,
1760                          int user_fault, int write_fault, int dirty,
1761                          int *ptwrite, int largepage, gfn_t gfn,
1762                          pfn_t pfn, bool speculative)
1763 {
1764         int was_rmapped = 0;
1765         int was_writeble = is_writeble_pte(*sptep);
1766         int rmap_count;
1767
1768         pgprintk("%s: spte %llx access %x write_fault %d"
1769                  " user_fault %d gfn %lx\n",
1770                  __func__, *sptep, pt_access,
1771                  write_fault, user_fault, gfn);
1772
1773         if (is_rmap_spte(*sptep)) {
1774                 /*
1775                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1776                  * the parent of the now unreachable PTE.
1777                  */
1778                 if (largepage && !is_large_pte(*sptep)) {
1779                         struct kvm_mmu_page *child;
1780                         u64 pte = *sptep;
1781
1782                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1783                         mmu_page_remove_parent_pte(child, sptep);
1784                 } else if (pfn != spte_to_pfn(*sptep)) {
1785                         pgprintk("hfn old %lx new %lx\n",
1786                                  spte_to_pfn(*sptep), pfn);
1787                         rmap_remove(vcpu->kvm, sptep);
1788                 } else
1789                         was_rmapped = 1;
1790         }
1791         if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1792                       dirty, largepage, gfn, pfn, speculative, true)) {
1793                 if (write_fault)
1794                         *ptwrite = 1;
1795                 kvm_x86_ops->tlb_flush(vcpu);
1796         }
1797
1798         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
1799         pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1800                  is_large_pte(*sptep)? "2MB" : "4kB",
1801                  is_present_pte(*sptep)?"RW":"R", gfn,
1802                  *shadow_pte, sptep);
1803         if (!was_rmapped && is_large_pte(*sptep))
1804                 ++vcpu->kvm->stat.lpages;
1805
1806         page_header_update_slot(vcpu->kvm, sptep, gfn);
1807         if (!was_rmapped) {
1808                 rmap_count = rmap_add(vcpu, sptep, gfn, largepage);
1809                 if (!is_rmap_spte(*sptep))
1810                         kvm_release_pfn_clean(pfn);
1811                 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1812                         rmap_recycle(vcpu, gfn, largepage);
1813         } else {
1814                 if (was_writeble)
1815                         kvm_release_pfn_dirty(pfn);
1816                 else
1817                         kvm_release_pfn_clean(pfn);
1818         }
1819         if (speculative) {
1820                 vcpu->arch.last_pte_updated = sptep;
1821                 vcpu->arch.last_pte_gfn = gfn;
1822         }
1823 }
1824
1825 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1826 {
1827 }
1828
1829 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1830                         int largepage, gfn_t gfn, pfn_t pfn)
1831 {
1832         struct kvm_shadow_walk_iterator iterator;
1833         struct kvm_mmu_page *sp;
1834         int pt_write = 0;
1835         gfn_t pseudo_gfn;
1836
1837         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1838                 if (iterator.level == PT_PAGE_TABLE_LEVEL
1839                     || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1840                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1841                                      0, write, 1, &pt_write,
1842                                      largepage, gfn, pfn, false);
1843                         ++vcpu->stat.pf_fixed;
1844                         break;
1845                 }
1846
1847                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1848                         pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1849                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1850                                               iterator.level - 1,
1851                                               1, ACC_ALL, iterator.sptep);
1852                         if (!sp) {
1853                                 pgprintk("nonpaging_map: ENOMEM\n");
1854                                 kvm_release_pfn_clean(pfn);
1855                                 return -ENOMEM;
1856                         }
1857
1858                         __set_spte(iterator.sptep,
1859                                    __pa(sp->spt)
1860                                    | PT_PRESENT_MASK | PT_WRITABLE_MASK
1861                                    | shadow_user_mask | shadow_x_mask);
1862                 }
1863         }
1864         return pt_write;
1865 }
1866
1867 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1868 {
1869         int r;
1870         int largepage = 0;
1871         pfn_t pfn;
1872         unsigned long mmu_seq;
1873
1874         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1875                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1876                 largepage = 1;
1877         }
1878
1879         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1880         smp_rmb();
1881         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1882
1883         /* mmio */
1884         if (is_error_pfn(pfn)) {
1885                 kvm_release_pfn_clean(pfn);
1886                 return 1;
1887         }
1888
1889         spin_lock(&vcpu->kvm->mmu_lock);
1890         if (mmu_notifier_retry(vcpu, mmu_seq))
1891                 goto out_unlock;
1892         kvm_mmu_free_some_pages(vcpu);
1893         r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
1894         spin_unlock(&vcpu->kvm->mmu_lock);
1895
1896
1897         return r;
1898
1899 out_unlock:
1900         spin_unlock(&vcpu->kvm->mmu_lock);
1901         kvm_release_pfn_clean(pfn);
1902         return 0;
1903 }
1904
1905
1906 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1907 {
1908         int i;
1909         struct kvm_mmu_page *sp;
1910
1911         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1912                 return;
1913         spin_lock(&vcpu->kvm->mmu_lock);
1914         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1915                 hpa_t root = vcpu->arch.mmu.root_hpa;
1916
1917                 sp = page_header(root);
1918                 --sp->root_count;
1919                 if (!sp->root_count && sp->role.invalid)
1920                         kvm_mmu_zap_page(vcpu->kvm, sp);
1921                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1922                 spin_unlock(&vcpu->kvm->mmu_lock);
1923                 return;
1924         }
1925         for (i = 0; i < 4; ++i) {
1926                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1927
1928                 if (root) {
1929                         root &= PT64_BASE_ADDR_MASK;
1930                         sp = page_header(root);
1931                         --sp->root_count;
1932                         if (!sp->root_count && sp->role.invalid)
1933                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1934                 }
1935                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1936         }
1937         spin_unlock(&vcpu->kvm->mmu_lock);
1938         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1939 }
1940
1941 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
1942 {
1943         int ret = 0;
1944
1945         if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
1946                 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
1947                 ret = 1;
1948         }
1949
1950         return ret;
1951 }
1952
1953 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1954 {
1955         int i;
1956         gfn_t root_gfn;
1957         struct kvm_mmu_page *sp;
1958         int direct = 0;
1959         u64 pdptr;
1960
1961         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1962
1963         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1964                 hpa_t root = vcpu->arch.mmu.root_hpa;
1965
1966                 ASSERT(!VALID_PAGE(root));
1967                 if (tdp_enabled)
1968                         direct = 1;
1969                 if (mmu_check_root(vcpu, root_gfn))
1970                         return 1;
1971                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1972                                       PT64_ROOT_LEVEL, direct,
1973                                       ACC_ALL, NULL);
1974                 root = __pa(sp->spt);
1975                 ++sp->root_count;
1976                 vcpu->arch.mmu.root_hpa = root;
1977                 return 0;
1978         }
1979         direct = !is_paging(vcpu);
1980         if (tdp_enabled)
1981                 direct = 1;
1982         for (i = 0; i < 4; ++i) {
1983                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1984
1985                 ASSERT(!VALID_PAGE(root));
1986                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1987                         pdptr = kvm_pdptr_read(vcpu, i);
1988                         if (!is_present_gpte(pdptr)) {
1989                                 vcpu->arch.mmu.pae_root[i] = 0;
1990                                 continue;
1991                         }
1992                         root_gfn = pdptr >> PAGE_SHIFT;
1993                 } else if (vcpu->arch.mmu.root_level == 0)
1994                         root_gfn = 0;
1995                 if (mmu_check_root(vcpu, root_gfn))
1996                         return 1;
1997                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1998                                       PT32_ROOT_LEVEL, direct,
1999                                       ACC_ALL, NULL);
2000                 root = __pa(sp->spt);
2001                 ++sp->root_count;
2002                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2003         }
2004         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2005         return 0;
2006 }
2007
2008 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2009 {
2010         int i;
2011         struct kvm_mmu_page *sp;
2012
2013         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2014                 return;
2015         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2016                 hpa_t root = vcpu->arch.mmu.root_hpa;
2017                 sp = page_header(root);
2018                 mmu_sync_children(vcpu, sp);
2019                 return;
2020         }
2021         for (i = 0; i < 4; ++i) {
2022                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2023
2024                 if (root && VALID_PAGE(root)) {
2025                         root &= PT64_BASE_ADDR_MASK;
2026                         sp = page_header(root);
2027                         mmu_sync_children(vcpu, sp);
2028                 }
2029         }
2030 }
2031
2032 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2033 {
2034         spin_lock(&vcpu->kvm->mmu_lock);
2035         mmu_sync_roots(vcpu);
2036         spin_unlock(&vcpu->kvm->mmu_lock);
2037 }
2038
2039 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2040 {
2041         return vaddr;
2042 }
2043
2044 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2045                                 u32 error_code)
2046 {
2047         gfn_t gfn;
2048         int r;
2049
2050         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2051         r = mmu_topup_memory_caches(vcpu);
2052         if (r)
2053                 return r;
2054
2055         ASSERT(vcpu);
2056         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2057
2058         gfn = gva >> PAGE_SHIFT;
2059
2060         return nonpaging_map(vcpu, gva & PAGE_MASK,
2061                              error_code & PFERR_WRITE_MASK, gfn);
2062 }
2063
2064 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2065                                 u32 error_code)
2066 {
2067         pfn_t pfn;
2068         int r;
2069         int largepage = 0;
2070         gfn_t gfn = gpa >> PAGE_SHIFT;
2071         unsigned long mmu_seq;
2072
2073         ASSERT(vcpu);
2074         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2075
2076         r = mmu_topup_memory_caches(vcpu);
2077         if (r)
2078                 return r;
2079
2080         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
2081                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2082                 largepage = 1;
2083         }
2084         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2085         smp_rmb();
2086         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2087         if (is_error_pfn(pfn)) {
2088                 kvm_release_pfn_clean(pfn);
2089                 return 1;
2090         }
2091         spin_lock(&vcpu->kvm->mmu_lock);
2092         if (mmu_notifier_retry(vcpu, mmu_seq))
2093                 goto out_unlock;
2094         kvm_mmu_free_some_pages(vcpu);
2095         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2096                          largepage, gfn, pfn);
2097         spin_unlock(&vcpu->kvm->mmu_lock);
2098
2099         return r;
2100
2101 out_unlock:
2102         spin_unlock(&vcpu->kvm->mmu_lock);
2103         kvm_release_pfn_clean(pfn);
2104         return 0;
2105 }
2106
2107 static void nonpaging_free(struct kvm_vcpu *vcpu)
2108 {
2109         mmu_free_roots(vcpu);
2110 }
2111
2112 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2113 {
2114         struct kvm_mmu *context = &vcpu->arch.mmu;
2115
2116         context->new_cr3 = nonpaging_new_cr3;
2117         context->page_fault = nonpaging_page_fault;
2118         context->gva_to_gpa = nonpaging_gva_to_gpa;
2119         context->free = nonpaging_free;
2120         context->prefetch_page = nonpaging_prefetch_page;
2121         context->sync_page = nonpaging_sync_page;
2122         context->invlpg = nonpaging_invlpg;
2123         context->root_level = 0;
2124         context->shadow_root_level = PT32E_ROOT_LEVEL;
2125         context->root_hpa = INVALID_PAGE;
2126         return 0;
2127 }
2128
2129 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2130 {
2131         ++vcpu->stat.tlb_flush;
2132         kvm_x86_ops->tlb_flush(vcpu);
2133 }
2134
2135 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2136 {
2137         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2138         mmu_free_roots(vcpu);
2139 }
2140
2141 static void inject_page_fault(struct kvm_vcpu *vcpu,
2142                               u64 addr,
2143                               u32 err_code)
2144 {
2145         kvm_inject_page_fault(vcpu, addr, err_code);
2146 }
2147
2148 static void paging_free(struct kvm_vcpu *vcpu)
2149 {
2150         nonpaging_free(vcpu);
2151 }
2152
2153 static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2154 {
2155         int bit7;
2156
2157         bit7 = (gpte >> 7) & 1;
2158         return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
2159 }
2160
2161 #define PTTYPE 64
2162 #include "paging_tmpl.h"
2163 #undef PTTYPE
2164
2165 #define PTTYPE 32
2166 #include "paging_tmpl.h"
2167 #undef PTTYPE
2168
2169 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2170 {
2171         struct kvm_mmu *context = &vcpu->arch.mmu;
2172         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2173         u64 exb_bit_rsvd = 0;
2174
2175         if (!is_nx(vcpu))
2176                 exb_bit_rsvd = rsvd_bits(63, 63);
2177         switch (level) {
2178         case PT32_ROOT_LEVEL:
2179                 /* no rsvd bits for 2 level 4K page table entries */
2180                 context->rsvd_bits_mask[0][1] = 0;
2181                 context->rsvd_bits_mask[0][0] = 0;
2182                 if (is_cpuid_PSE36())
2183                         /* 36bits PSE 4MB page */
2184                         context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2185                 else
2186                         /* 32 bits PSE 4MB page */
2187                         context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2188                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2189                 break;
2190         case PT32E_ROOT_LEVEL:
2191                 context->rsvd_bits_mask[0][2] =
2192                         rsvd_bits(maxphyaddr, 63) |
2193                         rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2194                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2195                         rsvd_bits(maxphyaddr, 62);      /* PDE */
2196                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2197                         rsvd_bits(maxphyaddr, 62);      /* PTE */
2198                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2199                         rsvd_bits(maxphyaddr, 62) |
2200                         rsvd_bits(13, 20);              /* large page */
2201                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2202                 break;
2203         case PT64_ROOT_LEVEL:
2204                 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2205                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2206                 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2207                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2208                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2209                         rsvd_bits(maxphyaddr, 51);
2210                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2211                         rsvd_bits(maxphyaddr, 51);
2212                 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2213                 context->rsvd_bits_mask[1][2] = context->rsvd_bits_mask[0][2];
2214                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2215                         rsvd_bits(maxphyaddr, 51) |
2216                         rsvd_bits(13, 20);              /* large page */
2217                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2218                 break;
2219         }
2220 }
2221
2222 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2223 {
2224         struct kvm_mmu *context = &vcpu->arch.mmu;
2225
2226         ASSERT(is_pae(vcpu));
2227         context->new_cr3 = paging_new_cr3;
2228         context->page_fault = paging64_page_fault;
2229         context->gva_to_gpa = paging64_gva_to_gpa;
2230         context->prefetch_page = paging64_prefetch_page;
2231         context->sync_page = paging64_sync_page;
2232         context->invlpg = paging64_invlpg;
2233         context->free = paging_free;
2234         context->root_level = level;
2235         context->shadow_root_level = level;
2236         context->root_hpa = INVALID_PAGE;
2237         return 0;
2238 }
2239
2240 static int paging64_init_context(struct kvm_vcpu *vcpu)
2241 {
2242         reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2243         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2244 }
2245
2246 static int paging32_init_context(struct kvm_vcpu *vcpu)
2247 {
2248         struct kvm_mmu *context = &vcpu->arch.mmu;
2249
2250         reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2251         context->new_cr3 = paging_new_cr3;
2252         context->page_fault = paging32_page_fault;
2253         context->gva_to_gpa = paging32_gva_to_gpa;
2254         context->free = paging_free;
2255         context->prefetch_page = paging32_prefetch_page;
2256         context->sync_page = paging32_sync_page;
2257         context->invlpg = paging32_invlpg;
2258         context->root_level = PT32_ROOT_LEVEL;
2259         context->shadow_root_level = PT32E_ROOT_LEVEL;
2260         context->root_hpa = INVALID_PAGE;
2261         return 0;
2262 }
2263
2264 static int paging32E_init_context(struct kvm_vcpu *vcpu)
2265 {
2266         reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2267         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2268 }
2269
2270 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2271 {
2272         struct kvm_mmu *context = &vcpu->arch.mmu;
2273
2274         context->new_cr3 = nonpaging_new_cr3;
2275         context->page_fault = tdp_page_fault;
2276         context->free = nonpaging_free;
2277         context->prefetch_page = nonpaging_prefetch_page;
2278         context->sync_page = nonpaging_sync_page;
2279         context->invlpg = nonpaging_invlpg;
2280         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2281         context->root_hpa = INVALID_PAGE;
2282
2283         if (!is_paging(vcpu)) {
2284                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2285                 context->root_level = 0;
2286         } else if (is_long_mode(vcpu)) {
2287                 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2288                 context->gva_to_gpa = paging64_gva_to_gpa;
2289                 context->root_level = PT64_ROOT_LEVEL;
2290         } else if (is_pae(vcpu)) {
2291                 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2292                 context->gva_to_gpa = paging64_gva_to_gpa;
2293                 context->root_level = PT32E_ROOT_LEVEL;
2294         } else {
2295                 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2296                 context->gva_to_gpa = paging32_gva_to_gpa;
2297                 context->root_level = PT32_ROOT_LEVEL;
2298         }
2299
2300         return 0;
2301 }
2302
2303 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2304 {
2305         int r;
2306
2307         ASSERT(vcpu);
2308         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2309
2310         if (!is_paging(vcpu))
2311                 r = nonpaging_init_context(vcpu);
2312         else if (is_long_mode(vcpu))
2313                 r = paging64_init_context(vcpu);
2314         else if (is_pae(vcpu))
2315                 r = paging32E_init_context(vcpu);
2316         else
2317                 r = paging32_init_context(vcpu);
2318
2319         vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2320
2321         return r;
2322 }
2323
2324 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2325 {
2326         vcpu->arch.update_pte.pfn = bad_pfn;
2327
2328         if (tdp_enabled)
2329                 return init_kvm_tdp_mmu(vcpu);
2330         else
2331                 return init_kvm_softmmu(vcpu);
2332 }
2333
2334 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2335 {
2336         ASSERT(vcpu);
2337         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2338                 vcpu->arch.mmu.free(vcpu);
2339                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2340         }
2341 }
2342
2343 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2344 {
2345         destroy_kvm_mmu(vcpu);
2346         return init_kvm_mmu(vcpu);
2347 }
2348 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2349
2350 int kvm_mmu_load(struct kvm_vcpu *vcpu)
2351 {
2352         int r;
2353
2354         r = mmu_topup_memory_caches(vcpu);
2355         if (r)
2356                 goto out;
2357         spin_lock(&vcpu->kvm->mmu_lock);
2358         kvm_mmu_free_some_pages(vcpu);
2359         r = mmu_alloc_roots(vcpu);
2360         mmu_sync_roots(vcpu);
2361         spin_unlock(&vcpu->kvm->mmu_lock);
2362         if (r)
2363                 goto out;
2364         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2365         kvm_mmu_flush_tlb(vcpu);
2366 out:
2367         return r;
2368 }
2369 EXPORT_SYMBOL_GPL(kvm_mmu_load);
2370
2371 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2372 {
2373         mmu_free_roots(vcpu);
2374 }
2375
2376 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2377                                   struct kvm_mmu_page *sp,
2378                                   u64 *spte)
2379 {
2380         u64 pte;
2381         struct kvm_mmu_page *child;
2382
2383         pte = *spte;
2384         if (is_shadow_present_pte(pte)) {
2385                 if (is_last_spte(pte, sp->role.level))
2386                         rmap_remove(vcpu->kvm, spte);
2387                 else {
2388                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2389                         mmu_page_remove_parent_pte(child, spte);
2390                 }
2391         }
2392         __set_spte(spte, shadow_trap_nonpresent_pte);
2393         if (is_large_pte(pte))
2394                 --vcpu->kvm->stat.lpages;
2395 }
2396
2397 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2398                                   struct kvm_mmu_page *sp,
2399                                   u64 *spte,
2400                                   const void *new)
2401 {
2402         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2403                 if (!vcpu->arch.update_pte.largepage ||
2404                     sp->role.glevels == PT32_ROOT_LEVEL) {
2405                         ++vcpu->kvm->stat.mmu_pde_zapped;
2406                         return;
2407                 }
2408         }
2409
2410         ++vcpu->kvm->stat.mmu_pte_updated;
2411         if (sp->role.glevels == PT32_ROOT_LEVEL)
2412                 paging32_update_pte(vcpu, sp, spte, new);
2413         else
2414                 paging64_update_pte(vcpu, sp, spte, new);
2415 }
2416
2417 static bool need_remote_flush(u64 old, u64 new)
2418 {
2419         if (!is_shadow_present_pte(old))
2420                 return false;
2421         if (!is_shadow_present_pte(new))
2422                 return true;
2423         if ((old ^ new) & PT64_BASE_ADDR_MASK)
2424                 return true;
2425         old ^= PT64_NX_MASK;
2426         new ^= PT64_NX_MASK;
2427         return (old & ~new & PT64_PERM_MASK) != 0;
2428 }
2429
2430 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2431 {
2432         if (need_remote_flush(old, new))
2433                 kvm_flush_remote_tlbs(vcpu->kvm);
2434         else
2435                 kvm_mmu_flush_tlb(vcpu);
2436 }
2437
2438 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2439 {
2440         u64 *spte = vcpu->arch.last_pte_updated;
2441
2442         return !!(spte && (*spte & shadow_accessed_mask));
2443 }
2444
2445 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2446                                           const u8 *new, int bytes)
2447 {
2448         gfn_t gfn;
2449         int r;
2450         u64 gpte = 0;
2451         pfn_t pfn;
2452
2453         vcpu->arch.update_pte.largepage = 0;
2454
2455         if (bytes != 4 && bytes != 8)
2456                 return;
2457
2458         /*
2459          * Assume that the pte write on a page table of the same type
2460          * as the current vcpu paging mode.  This is nearly always true
2461          * (might be false while changing modes).  Note it is verified later
2462          * by update_pte().
2463          */
2464         if (is_pae(vcpu)) {
2465                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2466                 if ((bytes == 4) && (gpa % 4 == 0)) {
2467                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2468                         if (r)
2469                                 return;
2470                         memcpy((void *)&gpte + (gpa % 8), new, 4);
2471                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
2472                         memcpy((void *)&gpte, new, 8);
2473                 }
2474         } else {
2475                 if ((bytes == 4) && (gpa % 4 == 0))
2476                         memcpy((void *)&gpte, new, 4);
2477         }
2478         if (!is_present_gpte(gpte))
2479                 return;
2480         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2481
2482         if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2483                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2484                 vcpu->arch.update_pte.largepage = 1;
2485         }
2486         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2487         smp_rmb();
2488         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2489
2490         if (is_error_pfn(pfn)) {
2491                 kvm_release_pfn_clean(pfn);
2492                 return;
2493         }
2494         vcpu->arch.update_pte.gfn = gfn;
2495         vcpu->arch.update_pte.pfn = pfn;
2496 }
2497
2498 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2499 {
2500         u64 *spte = vcpu->arch.last_pte_updated;
2501
2502         if (spte
2503             && vcpu->arch.last_pte_gfn == gfn
2504             && shadow_accessed_mask
2505             && !(*spte & shadow_accessed_mask)
2506             && is_shadow_present_pte(*spte))
2507                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2508 }
2509
2510 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2511                        const u8 *new, int bytes,
2512                        bool guest_initiated)
2513 {
2514         gfn_t gfn = gpa >> PAGE_SHIFT;
2515         struct kvm_mmu_page *sp;
2516         struct hlist_node *node, *n;
2517         struct hlist_head *bucket;
2518         unsigned index;
2519         u64 entry, gentry;
2520         u64 *spte;
2521         unsigned offset = offset_in_page(gpa);
2522         unsigned pte_size;
2523         unsigned page_offset;
2524         unsigned misaligned;
2525         unsigned quadrant;
2526         int level;
2527         int flooded = 0;
2528         int npte;
2529         int r;
2530
2531         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2532         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2533         spin_lock(&vcpu->kvm->mmu_lock);
2534         kvm_mmu_access_page(vcpu, gfn);
2535         kvm_mmu_free_some_pages(vcpu);
2536         ++vcpu->kvm->stat.mmu_pte_write;
2537         kvm_mmu_audit(vcpu, "pre pte write");
2538         if (guest_initiated) {
2539                 if (gfn == vcpu->arch.last_pt_write_gfn
2540                     && !last_updated_pte_accessed(vcpu)) {
2541                         ++vcpu->arch.last_pt_write_count;
2542                         if (vcpu->arch.last_pt_write_count >= 3)
2543                                 flooded = 1;
2544                 } else {
2545                         vcpu->arch.last_pt_write_gfn = gfn;
2546                         vcpu->arch.last_pt_write_count = 1;
2547                         vcpu->arch.last_pte_updated = NULL;
2548                 }
2549         }
2550         index = kvm_page_table_hashfn(gfn);
2551         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2552         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2553                 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2554                         continue;
2555                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2556                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2557                 misaligned |= bytes < 4;
2558                 if (misaligned || flooded) {
2559                         /*
2560                          * Misaligned accesses are too much trouble to fix
2561                          * up; also, they usually indicate a page is not used
2562                          * as a page table.
2563                          *
2564                          * If we're seeing too many writes to a page,
2565                          * it may no longer be a page table, or we may be
2566                          * forking, in which case it is better to unmap the
2567                          * page.
2568                          */
2569                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2570                                  gpa, bytes, sp->role.word);
2571                         if (kvm_mmu_zap_page(vcpu->kvm, sp))
2572                                 n = bucket->first;
2573                         ++vcpu->kvm->stat.mmu_flooded;
2574                         continue;
2575                 }
2576                 page_offset = offset;
2577                 level = sp->role.level;
2578                 npte = 1;
2579                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
2580                         page_offset <<= 1;      /* 32->64 */
2581                         /*
2582                          * A 32-bit pde maps 4MB while the shadow pdes map
2583                          * only 2MB.  So we need to double the offset again
2584                          * and zap two pdes instead of one.
2585                          */
2586                         if (level == PT32_ROOT_LEVEL) {
2587                                 page_offset &= ~7; /* kill rounding error */
2588                                 page_offset <<= 1;
2589                                 npte = 2;
2590                         }
2591                         quadrant = page_offset >> PAGE_SHIFT;
2592                         page_offset &= ~PAGE_MASK;
2593                         if (quadrant != sp->role.quadrant)
2594                                 continue;
2595                 }
2596                 spte = &sp->spt[page_offset / sizeof(*spte)];
2597                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2598                         gentry = 0;
2599                         r = kvm_read_guest_atomic(vcpu->kvm,
2600                                                   gpa & ~(u64)(pte_size - 1),
2601                                                   &gentry, pte_size);
2602                         new = (const void *)&gentry;
2603                         if (r < 0)
2604                                 new = NULL;
2605                 }
2606                 while (npte--) {
2607                         entry = *spte;
2608                         mmu_pte_write_zap_pte(vcpu, sp, spte);
2609                         if (new)
2610                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
2611                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2612                         ++spte;
2613                 }
2614         }
2615         kvm_mmu_audit(vcpu, "post pte write");
2616         spin_unlock(&vcpu->kvm->mmu_lock);
2617         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2618                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2619                 vcpu->arch.update_pte.pfn = bad_pfn;
2620         }
2621 }
2622
2623 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2624 {
2625         gpa_t gpa;
2626         int r;
2627
2628         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2629
2630         spin_lock(&vcpu->kvm->mmu_lock);
2631         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2632         spin_unlock(&vcpu->kvm->mmu_lock);
2633         return r;
2634 }
2635 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2636
2637 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2638 {
2639         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
2640                 struct kvm_mmu_page *sp;
2641
2642                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2643                                   struct kvm_mmu_page, link);
2644                 kvm_mmu_zap_page(vcpu->kvm, sp);
2645                 ++vcpu->kvm->stat.mmu_recycled;
2646         }
2647 }
2648
2649 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2650 {
2651         int r;
2652         enum emulation_result er;
2653
2654         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2655         if (r < 0)
2656                 goto out;
2657
2658         if (!r) {
2659                 r = 1;
2660                 goto out;
2661         }
2662
2663         r = mmu_topup_memory_caches(vcpu);
2664         if (r)
2665                 goto out;
2666
2667         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2668
2669         switch (er) {
2670         case EMULATE_DONE:
2671                 return 1;
2672         case EMULATE_DO_MMIO:
2673                 ++vcpu->stat.mmio_exits;
2674                 return 0;
2675         case EMULATE_FAIL:
2676                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2677                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2678                 return 0;
2679         default:
2680                 BUG();
2681         }
2682 out:
2683         return r;
2684 }
2685 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2686
2687 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2688 {
2689         vcpu->arch.mmu.invlpg(vcpu, gva);
2690         kvm_mmu_flush_tlb(vcpu);
2691         ++vcpu->stat.invlpg;
2692 }
2693 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2694
2695 void kvm_enable_tdp(void)
2696 {
2697         tdp_enabled = true;
2698 }
2699 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2700
2701 void kvm_disable_tdp(void)
2702 {
2703         tdp_enabled = false;
2704 }
2705 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2706
2707 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2708 {
2709         free_page((unsigned long)vcpu->arch.mmu.pae_root);
2710 }
2711
2712 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2713 {
2714         struct page *page;
2715         int i;
2716
2717         ASSERT(vcpu);
2718
2719         if (vcpu->kvm->arch.n_requested_mmu_pages)
2720                 vcpu->kvm->arch.n_free_mmu_pages =
2721                                         vcpu->kvm->arch.n_requested_mmu_pages;
2722         else
2723                 vcpu->kvm->arch.n_free_mmu_pages =
2724                                         vcpu->kvm->arch.n_alloc_mmu_pages;
2725         /*
2726          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2727          * Therefore we need to allocate shadow page tables in the first
2728          * 4GB of memory, which happens to fit the DMA32 zone.
2729          */
2730         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2731         if (!page)
2732                 goto error_1;
2733         vcpu->arch.mmu.pae_root = page_address(page);
2734         for (i = 0; i < 4; ++i)
2735                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2736
2737         return 0;
2738
2739 error_1:
2740         free_mmu_pages(vcpu);
2741         return -ENOMEM;
2742 }
2743
2744 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2745 {
2746         ASSERT(vcpu);
2747         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2748
2749         return alloc_mmu_pages(vcpu);
2750 }
2751
2752 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2753 {
2754         ASSERT(vcpu);
2755         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2756
2757         return init_kvm_mmu(vcpu);
2758 }
2759
2760 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2761 {
2762         ASSERT(vcpu);
2763
2764         destroy_kvm_mmu(vcpu);
2765         free_mmu_pages(vcpu);
2766         mmu_free_memory_caches(vcpu);
2767 }
2768
2769 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2770 {
2771         struct kvm_mmu_page *sp;
2772
2773         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2774                 int i;
2775                 u64 *pt;
2776
2777                 if (!test_bit(slot, sp->slot_bitmap))
2778                         continue;
2779
2780                 pt = sp->spt;
2781                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2782                         /* avoid RMW */
2783                         if (pt[i] & PT_WRITABLE_MASK)
2784                                 pt[i] &= ~PT_WRITABLE_MASK;
2785         }
2786         kvm_flush_remote_tlbs(kvm);
2787 }
2788
2789 void kvm_mmu_zap_all(struct kvm *kvm)
2790 {
2791         struct kvm_mmu_page *sp, *node;
2792
2793         spin_lock(&kvm->mmu_lock);
2794         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2795                 if (kvm_mmu_zap_page(kvm, sp))
2796                         node = container_of(kvm->arch.active_mmu_pages.next,
2797                                             struct kvm_mmu_page, link);
2798         spin_unlock(&kvm->mmu_lock);
2799
2800         kvm_flush_remote_tlbs(kvm);
2801 }
2802
2803 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2804 {
2805         struct kvm_mmu_page *page;
2806
2807         page = container_of(kvm->arch.active_mmu_pages.prev,
2808                             struct kvm_mmu_page, link);
2809         kvm_mmu_zap_page(kvm, page);
2810 }
2811
2812 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2813 {
2814         struct kvm *kvm;
2815         struct kvm *kvm_freed = NULL;
2816         int cache_count = 0;
2817
2818         spin_lock(&kvm_lock);
2819
2820         list_for_each_entry(kvm, &vm_list, vm_list) {
2821                 int npages;
2822
2823                 if (!down_read_trylock(&kvm->slots_lock))
2824                         continue;
2825                 spin_lock(&kvm->mmu_lock);
2826                 npages = kvm->arch.n_alloc_mmu_pages -
2827                          kvm->arch.n_free_mmu_pages;
2828                 cache_count += npages;
2829                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2830                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
2831                         cache_count--;
2832                         kvm_freed = kvm;
2833                 }
2834                 nr_to_scan--;
2835
2836                 spin_unlock(&kvm->mmu_lock);
2837                 up_read(&kvm->slots_lock);
2838         }
2839         if (kvm_freed)
2840                 list_move_tail(&kvm_freed->vm_list, &vm_list);
2841
2842         spin_unlock(&kvm_lock);
2843
2844         return cache_count;
2845 }
2846
2847 static struct shrinker mmu_shrinker = {
2848         .shrink = mmu_shrink,
2849         .seeks = DEFAULT_SEEKS * 10,
2850 };
2851
2852 static void mmu_destroy_caches(void)
2853 {
2854         if (pte_chain_cache)
2855                 kmem_cache_destroy(pte_chain_cache);
2856         if (rmap_desc_cache)
2857                 kmem_cache_destroy(rmap_desc_cache);
2858         if (mmu_page_header_cache)
2859                 kmem_cache_destroy(mmu_page_header_cache);
2860 }
2861
2862 void kvm_mmu_module_exit(void)
2863 {
2864         mmu_destroy_caches();
2865         unregister_shrinker(&mmu_shrinker);
2866 }
2867
2868 int kvm_mmu_module_init(void)
2869 {
2870         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2871                                             sizeof(struct kvm_pte_chain),
2872                                             0, 0, NULL);
2873         if (!pte_chain_cache)
2874                 goto nomem;
2875         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2876                                             sizeof(struct kvm_rmap_desc),
2877                                             0, 0, NULL);
2878         if (!rmap_desc_cache)
2879                 goto nomem;
2880
2881         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2882                                                   sizeof(struct kvm_mmu_page),
2883                                                   0, 0, NULL);
2884         if (!mmu_page_header_cache)
2885                 goto nomem;
2886
2887         register_shrinker(&mmu_shrinker);
2888
2889         return 0;
2890
2891 nomem:
2892         mmu_destroy_caches();
2893         return -ENOMEM;
2894 }
2895
2896 /*
2897  * Caculate mmu pages needed for kvm.
2898  */
2899 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2900 {
2901         int i;
2902         unsigned int nr_mmu_pages;
2903         unsigned int  nr_pages = 0;
2904
2905         for (i = 0; i < kvm->nmemslots; i++)
2906                 nr_pages += kvm->memslots[i].npages;
2907
2908         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2909         nr_mmu_pages = max(nr_mmu_pages,
2910                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2911
2912         return nr_mmu_pages;
2913 }
2914
2915 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2916                                 unsigned len)
2917 {
2918         if (len > buffer->len)
2919                 return NULL;
2920         return buffer->ptr;
2921 }
2922
2923 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2924                                 unsigned len)
2925 {
2926         void *ret;
2927
2928         ret = pv_mmu_peek_buffer(buffer, len);
2929         if (!ret)
2930                 return ret;
2931         buffer->ptr += len;
2932         buffer->len -= len;
2933         buffer->processed += len;
2934         return ret;
2935 }
2936
2937 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2938                              gpa_t addr, gpa_t value)
2939 {
2940         int bytes = 8;
2941         int r;
2942
2943         if (!is_long_mode(vcpu) && !is_pae(vcpu))
2944                 bytes = 4;
2945
2946         r = mmu_topup_memory_caches(vcpu);
2947         if (r)
2948                 return r;
2949
2950         if (!emulator_write_phys(vcpu, addr, &value, bytes))
2951                 return -EFAULT;
2952
2953         return 1;
2954 }
2955
2956 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2957 {
2958         kvm_set_cr3(vcpu, vcpu->arch.cr3);
2959         return 1;
2960 }
2961
2962 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2963 {
2964         spin_lock(&vcpu->kvm->mmu_lock);
2965         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2966         spin_unlock(&vcpu->kvm->mmu_lock);
2967         return 1;
2968 }
2969
2970 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2971                              struct kvm_pv_mmu_op_buffer *buffer)
2972 {
2973         struct kvm_mmu_op_header *header;
2974
2975         header = pv_mmu_peek_buffer(buffer, sizeof *header);
2976         if (!header)
2977                 return 0;
2978         switch (header->op) {
2979         case KVM_MMU_OP_WRITE_PTE: {
2980                 struct kvm_mmu_op_write_pte *wpte;
2981
2982                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2983                 if (!wpte)
2984                         return 0;
2985                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2986                                         wpte->pte_val);
2987         }
2988         case KVM_MMU_OP_FLUSH_TLB: {
2989                 struct kvm_mmu_op_flush_tlb *ftlb;
2990
2991                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2992                 if (!ftlb)
2993                         return 0;
2994                 return kvm_pv_mmu_flush_tlb(vcpu);
2995         }
2996         case KVM_MMU_OP_RELEASE_PT: {
2997                 struct kvm_mmu_op_release_pt *rpt;
2998
2999                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3000                 if (!rpt)
3001                         return 0;
3002                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3003         }
3004         default: return 0;
3005         }
3006 }
3007
3008 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3009                   gpa_t addr, unsigned long *ret)
3010 {
3011         int r;
3012         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3013
3014         buffer->ptr = buffer->buf;
3015         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3016         buffer->processed = 0;
3017
3018         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3019         if (r)
3020                 goto out;
3021
3022         while (buffer->len) {
3023                 r = kvm_pv_mmu_op_one(vcpu, buffer);
3024                 if (r < 0)
3025                         goto out;
3026                 if (r == 0)
3027                         break;
3028         }
3029
3030         r = 1;
3031 out:
3032         *ret = buffer->processed;
3033         return r;
3034 }
3035
3036 #ifdef AUDIT
3037
3038 static const char *audit_msg;
3039
3040 static gva_t canonicalize(gva_t gva)
3041 {
3042 #ifdef CONFIG_X86_64
3043         gva = (long long)(gva << 16) >> 16;
3044 #endif
3045         return gva;
3046 }
3047
3048
3049 typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
3050                                  u64 *sptep);
3051
3052 static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
3053                             inspect_spte_fn fn)
3054 {
3055         int i;
3056
3057         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3058                 u64 ent = sp->spt[i];
3059
3060                 if (is_shadow_present_pte(ent)) {
3061                         if (sp->role.level > 1 && !is_large_pte(ent)) {
3062                                 struct kvm_mmu_page *child;
3063                                 child = page_header(ent & PT64_BASE_ADDR_MASK);
3064                                 __mmu_spte_walk(kvm, child, fn);
3065                         }
3066                         if (sp->role.level == 1)
3067                                 fn(kvm, sp, &sp->spt[i]);
3068                 }
3069         }
3070 }
3071
3072 static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
3073 {
3074         int i;
3075         struct kvm_mmu_page *sp;
3076
3077         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3078                 return;
3079         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3080                 hpa_t root = vcpu->arch.mmu.root_hpa;
3081                 sp = page_header(root);
3082                 __mmu_spte_walk(vcpu->kvm, sp, fn);
3083                 return;
3084         }
3085         for (i = 0; i < 4; ++i) {
3086                 hpa_t root = vcpu->arch.mmu.pae_root[i];
3087
3088                 if (root && VALID_PAGE(root)) {
3089                         root &= PT64_BASE_ADDR_MASK;
3090                         sp = page_header(root);
3091                         __mmu_spte_walk(vcpu->kvm, sp, fn);
3092                 }
3093         }
3094         return;
3095 }
3096
3097 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3098                                 gva_t va, int level)
3099 {
3100         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3101         int i;
3102         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3103
3104         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3105                 u64 ent = pt[i];
3106
3107                 if (ent == shadow_trap_nonpresent_pte)
3108                         continue;
3109
3110                 va = canonicalize(va);
3111                 if (level > 1) {
3112                         if (is_shadow_present_pte(ent))
3113                                 audit_mappings_page(vcpu, ent, va, level - 1);
3114                 } else {
3115                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
3116                         gfn_t gfn = gpa >> PAGE_SHIFT;
3117                         pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3118                         hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3119
3120                         if (is_error_pfn(pfn)) {
3121                                 kvm_release_pfn_clean(pfn);
3122                                 continue;
3123                         }
3124
3125                         if (is_shadow_present_pte(ent)
3126                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
3127                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
3128                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3129                                        audit_msg, vcpu->arch.mmu.root_level,
3130                                        va, gpa, hpa, ent,
3131                                        is_shadow_present_pte(ent));
3132                         else if (ent == shadow_notrap_nonpresent_pte
3133                                  && !is_error_hpa(hpa))
3134                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
3135                                        " valid guest gva %lx\n", audit_msg, va);
3136                         kvm_release_pfn_clean(pfn);
3137
3138                 }
3139         }
3140 }
3141
3142 static void audit_mappings(struct kvm_vcpu *vcpu)
3143 {
3144         unsigned i;
3145
3146         if (vcpu->arch.mmu.root_level == 4)
3147                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3148         else
3149                 for (i = 0; i < 4; ++i)
3150                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3151                                 audit_mappings_page(vcpu,
3152                                                     vcpu->arch.mmu.pae_root[i],
3153                                                     i << 30,
3154                                                     2);
3155 }
3156
3157 static int count_rmaps(struct kvm_vcpu *vcpu)
3158 {
3159         int nmaps = 0;
3160         int i, j, k;
3161
3162         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3163                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
3164                 struct kvm_rmap_desc *d;
3165
3166                 for (j = 0; j < m->npages; ++j) {
3167                         unsigned long *rmapp = &m->rmap[j];
3168
3169                         if (!*rmapp)
3170                                 continue;
3171                         if (!(*rmapp & 1)) {
3172                                 ++nmaps;
3173                                 continue;
3174                         }
3175                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3176                         while (d) {
3177                                 for (k = 0; k < RMAP_EXT; ++k)
3178                                         if (d->sptes[k])
3179                                                 ++nmaps;
3180                                         else
3181                                                 break;
3182                                 d = d->more;
3183                         }
3184                 }
3185         }
3186         return nmaps;
3187 }
3188
3189 void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
3190 {
3191         unsigned long *rmapp;
3192         struct kvm_mmu_page *rev_sp;
3193         gfn_t gfn;
3194
3195         if (*sptep & PT_WRITABLE_MASK) {
3196                 rev_sp = page_header(__pa(sptep));
3197                 gfn = rev_sp->gfns[sptep - rev_sp->spt];
3198
3199                 if (!gfn_to_memslot(kvm, gfn)) {
3200                         if (!printk_ratelimit())
3201                                 return;
3202                         printk(KERN_ERR "%s: no memslot for gfn %ld\n",
3203                                          audit_msg, gfn);
3204                         printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
3205                                         audit_msg, sptep - rev_sp->spt,
3206                                         rev_sp->gfn);
3207                         dump_stack();
3208                         return;
3209                 }
3210
3211                 rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt], 0);
3212                 if (!*rmapp) {
3213                         if (!printk_ratelimit())
3214                                 return;
3215                         printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
3216                                          audit_msg, *sptep);
3217                         dump_stack();
3218                 }
3219         }
3220
3221 }
3222
3223 void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
3224 {
3225         mmu_spte_walk(vcpu, inspect_spte_has_rmap);
3226 }
3227
3228 static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
3229 {
3230         struct kvm_mmu_page *sp;
3231         int i;
3232
3233         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3234                 u64 *pt = sp->spt;
3235
3236                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3237                         continue;
3238
3239                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3240                         u64 ent = pt[i];
3241
3242                         if (!(ent & PT_PRESENT_MASK))
3243                                 continue;
3244                         if (!(ent & PT_WRITABLE_MASK))
3245                                 continue;
3246                         inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
3247                 }
3248         }
3249         return;
3250 }
3251
3252 static void audit_rmap(struct kvm_vcpu *vcpu)
3253 {
3254         check_writable_mappings_rmap(vcpu);
3255         count_rmaps(vcpu);
3256 }
3257
3258 static void audit_write_protection(struct kvm_vcpu *vcpu)
3259 {
3260         struct kvm_mmu_page *sp;
3261         struct kvm_memory_slot *slot;
3262         unsigned long *rmapp;
3263         u64 *spte;
3264         gfn_t gfn;
3265
3266         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3267                 if (sp->role.direct)
3268                         continue;
3269                 if (sp->unsync)
3270                         continue;
3271
3272                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3273                 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3274                 rmapp = &slot->rmap[gfn - slot->base_gfn];
3275
3276                 spte = rmap_next(vcpu->kvm, rmapp, NULL);
3277                 while (spte) {
3278                         if (*spte & PT_WRITABLE_MASK)
3279                                 printk(KERN_ERR "%s: (%s) shadow page has "
3280                                 "writable mappings: gfn %lx role %x\n",
3281                                __func__, audit_msg, sp->gfn,
3282                                sp->role.word);
3283                         spte = rmap_next(vcpu->kvm, rmapp, spte);
3284                 }
3285         }
3286 }
3287
3288 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3289 {
3290         int olddbg = dbg;
3291
3292         dbg = 0;
3293         audit_msg = msg;
3294         audit_rmap(vcpu);
3295         audit_write_protection(vcpu);
3296         if (strcmp("pre pte write", audit_msg) != 0)
3297                 audit_mappings(vcpu);
3298         audit_writable_sptes_have_rmaps(vcpu);
3299         dbg = olddbg;
3300 }
3301
3302 #endif