KVM: add support for change_pte mmu notifiers
[safe/jmp/linux-2.6] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31 #include <linux/compiler.h>
32
33 #include <asm/page.h>
34 #include <asm/cmpxchg.h>
35 #include <asm/io.h>
36 #include <asm/vmx.h>
37
38 /*
39  * When setting this variable to true it enables Two-Dimensional-Paging
40  * where the hardware walks 2 page tables:
41  * 1. the guest-virtual to guest-physical
42  * 2. while doing 1. it walks guest-physical to host-physical
43  * If the hardware supports that we don't need to do shadow paging.
44  */
45 bool tdp_enabled = false;
46
47 #undef MMU_DEBUG
48
49 #undef AUDIT
50
51 #ifdef AUDIT
52 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
53 #else
54 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
55 #endif
56
57 #ifdef MMU_DEBUG
58
59 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
60 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
61
62 #else
63
64 #define pgprintk(x...) do { } while (0)
65 #define rmap_printk(x...) do { } while (0)
66
67 #endif
68
69 #if defined(MMU_DEBUG) || defined(AUDIT)
70 static int dbg = 0;
71 module_param(dbg, bool, 0644);
72 #endif
73
74 static int oos_shadow = 1;
75 module_param(oos_shadow, bool, 0644);
76
77 #ifndef MMU_DEBUG
78 #define ASSERT(x) do { } while (0)
79 #else
80 #define ASSERT(x)                                                       \
81         if (!(x)) {                                                     \
82                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
83                        __FILE__, __LINE__, #x);                         \
84         }
85 #endif
86
87 #define PT_FIRST_AVAIL_BITS_SHIFT 9
88 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
89
90 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
91
92 #define PT64_LEVEL_BITS 9
93
94 #define PT64_LEVEL_SHIFT(level) \
95                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
96
97 #define PT64_LEVEL_MASK(level) \
98                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
99
100 #define PT64_INDEX(address, level)\
101         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
102
103
104 #define PT32_LEVEL_BITS 10
105
106 #define PT32_LEVEL_SHIFT(level) \
107                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
108
109 #define PT32_LEVEL_MASK(level) \
110                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
111 #define PT32_LVL_OFFSET_MASK(level) \
112         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
113                                                 * PT32_LEVEL_BITS))) - 1))
114
115 #define PT32_INDEX(address, level)\
116         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
117
118
119 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
120 #define PT64_DIR_BASE_ADDR_MASK \
121         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
122 #define PT64_LVL_ADDR_MASK(level) \
123         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
124                                                 * PT64_LEVEL_BITS))) - 1))
125 #define PT64_LVL_OFFSET_MASK(level) \
126         (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
127                                                 * PT64_LEVEL_BITS))) - 1))
128
129 #define PT32_BASE_ADDR_MASK PAGE_MASK
130 #define PT32_DIR_BASE_ADDR_MASK \
131         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
132 #define PT32_LVL_ADDR_MASK(level) \
133         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
134                                             * PT32_LEVEL_BITS))) - 1))
135
136 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
137                         | PT64_NX_MASK)
138
139 #define PFERR_PRESENT_MASK (1U << 0)
140 #define PFERR_WRITE_MASK (1U << 1)
141 #define PFERR_USER_MASK (1U << 2)
142 #define PFERR_RSVD_MASK (1U << 3)
143 #define PFERR_FETCH_MASK (1U << 4)
144
145 #define PT_PDPE_LEVEL 3
146 #define PT_DIRECTORY_LEVEL 2
147 #define PT_PAGE_TABLE_LEVEL 1
148
149 #define RMAP_EXT 4
150
151 #define ACC_EXEC_MASK    1
152 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
153 #define ACC_USER_MASK    PT_USER_MASK
154 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
155
156 #define CREATE_TRACE_POINTS
157 #include "mmutrace.h"
158
159 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
160
161 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
162
163 struct kvm_rmap_desc {
164         u64 *sptes[RMAP_EXT];
165         struct kvm_rmap_desc *more;
166 };
167
168 struct kvm_shadow_walk_iterator {
169         u64 addr;
170         hpa_t shadow_addr;
171         int level;
172         u64 *sptep;
173         unsigned index;
174 };
175
176 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
177         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
178              shadow_walk_okay(&(_walker));                      \
179              shadow_walk_next(&(_walker)))
180
181
182 struct kvm_unsync_walk {
183         int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
184 };
185
186 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
187
188 static struct kmem_cache *pte_chain_cache;
189 static struct kmem_cache *rmap_desc_cache;
190 static struct kmem_cache *mmu_page_header_cache;
191
192 static u64 __read_mostly shadow_trap_nonpresent_pte;
193 static u64 __read_mostly shadow_notrap_nonpresent_pte;
194 static u64 __read_mostly shadow_base_present_pte;
195 static u64 __read_mostly shadow_nx_mask;
196 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
197 static u64 __read_mostly shadow_user_mask;
198 static u64 __read_mostly shadow_accessed_mask;
199 static u64 __read_mostly shadow_dirty_mask;
200
201 static inline u64 rsvd_bits(int s, int e)
202 {
203         return ((1ULL << (e - s + 1)) - 1) << s;
204 }
205
206 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
207 {
208         shadow_trap_nonpresent_pte = trap_pte;
209         shadow_notrap_nonpresent_pte = notrap_pte;
210 }
211 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
212
213 void kvm_mmu_set_base_ptes(u64 base_pte)
214 {
215         shadow_base_present_pte = base_pte;
216 }
217 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
218
219 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
220                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
221 {
222         shadow_user_mask = user_mask;
223         shadow_accessed_mask = accessed_mask;
224         shadow_dirty_mask = dirty_mask;
225         shadow_nx_mask = nx_mask;
226         shadow_x_mask = x_mask;
227 }
228 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
229
230 static int is_write_protection(struct kvm_vcpu *vcpu)
231 {
232         return vcpu->arch.cr0 & X86_CR0_WP;
233 }
234
235 static int is_cpuid_PSE36(void)
236 {
237         return 1;
238 }
239
240 static int is_nx(struct kvm_vcpu *vcpu)
241 {
242         return vcpu->arch.shadow_efer & EFER_NX;
243 }
244
245 static int is_shadow_present_pte(u64 pte)
246 {
247         return pte != shadow_trap_nonpresent_pte
248                 && pte != shadow_notrap_nonpresent_pte;
249 }
250
251 static int is_large_pte(u64 pte)
252 {
253         return pte & PT_PAGE_SIZE_MASK;
254 }
255
256 static int is_writeble_pte(unsigned long pte)
257 {
258         return pte & PT_WRITABLE_MASK;
259 }
260
261 static int is_dirty_gpte(unsigned long pte)
262 {
263         return pte & PT_DIRTY_MASK;
264 }
265
266 static int is_rmap_spte(u64 pte)
267 {
268         return is_shadow_present_pte(pte);
269 }
270
271 static int is_last_spte(u64 pte, int level)
272 {
273         if (level == PT_PAGE_TABLE_LEVEL)
274                 return 1;
275         if (is_large_pte(pte))
276                 return 1;
277         return 0;
278 }
279
280 static pfn_t spte_to_pfn(u64 pte)
281 {
282         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
283 }
284
285 static gfn_t pse36_gfn_delta(u32 gpte)
286 {
287         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
288
289         return (gpte & PT32_DIR_PSE36_MASK) << shift;
290 }
291
292 static void __set_spte(u64 *sptep, u64 spte)
293 {
294 #ifdef CONFIG_X86_64
295         set_64bit((unsigned long *)sptep, spte);
296 #else
297         set_64bit((unsigned long long *)sptep, spte);
298 #endif
299 }
300
301 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
302                                   struct kmem_cache *base_cache, int min)
303 {
304         void *obj;
305
306         if (cache->nobjs >= min)
307                 return 0;
308         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
309                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
310                 if (!obj)
311                         return -ENOMEM;
312                 cache->objects[cache->nobjs++] = obj;
313         }
314         return 0;
315 }
316
317 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
318 {
319         while (mc->nobjs)
320                 kfree(mc->objects[--mc->nobjs]);
321 }
322
323 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
324                                        int min)
325 {
326         struct page *page;
327
328         if (cache->nobjs >= min)
329                 return 0;
330         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
331                 page = alloc_page(GFP_KERNEL);
332                 if (!page)
333                         return -ENOMEM;
334                 set_page_private(page, 0);
335                 cache->objects[cache->nobjs++] = page_address(page);
336         }
337         return 0;
338 }
339
340 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
341 {
342         while (mc->nobjs)
343                 free_page((unsigned long)mc->objects[--mc->nobjs]);
344 }
345
346 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
347 {
348         int r;
349
350         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
351                                    pte_chain_cache, 4);
352         if (r)
353                 goto out;
354         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
355                                    rmap_desc_cache, 4);
356         if (r)
357                 goto out;
358         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
359         if (r)
360                 goto out;
361         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
362                                    mmu_page_header_cache, 4);
363 out:
364         return r;
365 }
366
367 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
368 {
369         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
370         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
371         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
372         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
373 }
374
375 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
376                                     size_t size)
377 {
378         void *p;
379
380         BUG_ON(!mc->nobjs);
381         p = mc->objects[--mc->nobjs];
382         return p;
383 }
384
385 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
386 {
387         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
388                                       sizeof(struct kvm_pte_chain));
389 }
390
391 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
392 {
393         kfree(pc);
394 }
395
396 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
397 {
398         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
399                                       sizeof(struct kvm_rmap_desc));
400 }
401
402 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
403 {
404         kfree(rd);
405 }
406
407 /*
408  * Return the pointer to the largepage write count for a given
409  * gfn, handling slots that are not large page aligned.
410  */
411 static int *slot_largepage_idx(gfn_t gfn,
412                                struct kvm_memory_slot *slot,
413                                int level)
414 {
415         unsigned long idx;
416
417         idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
418               (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
419         return &slot->lpage_info[level - 2][idx].write_count;
420 }
421
422 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
423 {
424         struct kvm_memory_slot *slot;
425         int *write_count;
426         int i;
427
428         gfn = unalias_gfn(kvm, gfn);
429
430         slot = gfn_to_memslot_unaliased(kvm, gfn);
431         for (i = PT_DIRECTORY_LEVEL;
432              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
433                 write_count   = slot_largepage_idx(gfn, slot, i);
434                 *write_count += 1;
435         }
436 }
437
438 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
439 {
440         struct kvm_memory_slot *slot;
441         int *write_count;
442         int i;
443
444         gfn = unalias_gfn(kvm, gfn);
445         for (i = PT_DIRECTORY_LEVEL;
446              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
447                 slot          = gfn_to_memslot_unaliased(kvm, gfn);
448                 write_count   = slot_largepage_idx(gfn, slot, i);
449                 *write_count -= 1;
450                 WARN_ON(*write_count < 0);
451         }
452 }
453
454 static int has_wrprotected_page(struct kvm *kvm,
455                                 gfn_t gfn,
456                                 int level)
457 {
458         struct kvm_memory_slot *slot;
459         int *largepage_idx;
460
461         gfn = unalias_gfn(kvm, gfn);
462         slot = gfn_to_memslot_unaliased(kvm, gfn);
463         if (slot) {
464                 largepage_idx = slot_largepage_idx(gfn, slot, level);
465                 return *largepage_idx;
466         }
467
468         return 1;
469 }
470
471 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
472 {
473         unsigned long page_size = PAGE_SIZE;
474         struct vm_area_struct *vma;
475         unsigned long addr;
476         int i, ret = 0;
477
478         addr = gfn_to_hva(kvm, gfn);
479         if (kvm_is_error_hva(addr))
480                 return page_size;
481
482         down_read(&current->mm->mmap_sem);
483         vma = find_vma(current->mm, addr);
484         if (!vma)
485                 goto out;
486
487         page_size = vma_kernel_pagesize(vma);
488
489 out:
490         up_read(&current->mm->mmap_sem);
491
492         for (i = PT_PAGE_TABLE_LEVEL;
493              i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
494                 if (page_size >= KVM_HPAGE_SIZE(i))
495                         ret = i;
496                 else
497                         break;
498         }
499
500         return ret;
501 }
502
503 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
504 {
505         struct kvm_memory_slot *slot;
506         int host_level;
507         int level = PT_PAGE_TABLE_LEVEL;
508
509         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
510         if (slot && slot->dirty_bitmap)
511                 return PT_PAGE_TABLE_LEVEL;
512
513         host_level = host_mapping_level(vcpu->kvm, large_gfn);
514
515         if (host_level == PT_PAGE_TABLE_LEVEL)
516                 return host_level;
517
518         for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) {
519
520                 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
521                         break;
522         }
523
524         return level - 1;
525 }
526
527 /*
528  * Take gfn and return the reverse mapping to it.
529  * Note: gfn must be unaliased before this function get called
530  */
531
532 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
533 {
534         struct kvm_memory_slot *slot;
535         unsigned long idx;
536
537         slot = gfn_to_memslot(kvm, gfn);
538         if (likely(level == PT_PAGE_TABLE_LEVEL))
539                 return &slot->rmap[gfn - slot->base_gfn];
540
541         idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
542                 (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
543
544         return &slot->lpage_info[level - 2][idx].rmap_pde;
545 }
546
547 /*
548  * Reverse mapping data structures:
549  *
550  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
551  * that points to page_address(page).
552  *
553  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
554  * containing more mappings.
555  *
556  * Returns the number of rmap entries before the spte was added or zero if
557  * the spte was not added.
558  *
559  */
560 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
561 {
562         struct kvm_mmu_page *sp;
563         struct kvm_rmap_desc *desc;
564         unsigned long *rmapp;
565         int i, count = 0;
566
567         if (!is_rmap_spte(*spte))
568                 return count;
569         gfn = unalias_gfn(vcpu->kvm, gfn);
570         sp = page_header(__pa(spte));
571         sp->gfns[spte - sp->spt] = gfn;
572         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
573         if (!*rmapp) {
574                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
575                 *rmapp = (unsigned long)spte;
576         } else if (!(*rmapp & 1)) {
577                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
578                 desc = mmu_alloc_rmap_desc(vcpu);
579                 desc->sptes[0] = (u64 *)*rmapp;
580                 desc->sptes[1] = spte;
581                 *rmapp = (unsigned long)desc | 1;
582         } else {
583                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
584                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
585                 while (desc->sptes[RMAP_EXT-1] && desc->more) {
586                         desc = desc->more;
587                         count += RMAP_EXT;
588                 }
589                 if (desc->sptes[RMAP_EXT-1]) {
590                         desc->more = mmu_alloc_rmap_desc(vcpu);
591                         desc = desc->more;
592                 }
593                 for (i = 0; desc->sptes[i]; ++i)
594                         ;
595                 desc->sptes[i] = spte;
596         }
597         return count;
598 }
599
600 static void rmap_desc_remove_entry(unsigned long *rmapp,
601                                    struct kvm_rmap_desc *desc,
602                                    int i,
603                                    struct kvm_rmap_desc *prev_desc)
604 {
605         int j;
606
607         for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
608                 ;
609         desc->sptes[i] = desc->sptes[j];
610         desc->sptes[j] = NULL;
611         if (j != 0)
612                 return;
613         if (!prev_desc && !desc->more)
614                 *rmapp = (unsigned long)desc->sptes[0];
615         else
616                 if (prev_desc)
617                         prev_desc->more = desc->more;
618                 else
619                         *rmapp = (unsigned long)desc->more | 1;
620         mmu_free_rmap_desc(desc);
621 }
622
623 static void rmap_remove(struct kvm *kvm, u64 *spte)
624 {
625         struct kvm_rmap_desc *desc;
626         struct kvm_rmap_desc *prev_desc;
627         struct kvm_mmu_page *sp;
628         pfn_t pfn;
629         unsigned long *rmapp;
630         int i;
631
632         if (!is_rmap_spte(*spte))
633                 return;
634         sp = page_header(__pa(spte));
635         pfn = spte_to_pfn(*spte);
636         if (*spte & shadow_accessed_mask)
637                 kvm_set_pfn_accessed(pfn);
638         if (is_writeble_pte(*spte))
639                 kvm_set_pfn_dirty(pfn);
640         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
641         if (!*rmapp) {
642                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
643                 BUG();
644         } else if (!(*rmapp & 1)) {
645                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
646                 if ((u64 *)*rmapp != spte) {
647                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
648                                spte, *spte);
649                         BUG();
650                 }
651                 *rmapp = 0;
652         } else {
653                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
654                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
655                 prev_desc = NULL;
656                 while (desc) {
657                         for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
658                                 if (desc->sptes[i] == spte) {
659                                         rmap_desc_remove_entry(rmapp,
660                                                                desc, i,
661                                                                prev_desc);
662                                         return;
663                                 }
664                         prev_desc = desc;
665                         desc = desc->more;
666                 }
667                 BUG();
668         }
669 }
670
671 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
672 {
673         struct kvm_rmap_desc *desc;
674         struct kvm_rmap_desc *prev_desc;
675         u64 *prev_spte;
676         int i;
677
678         if (!*rmapp)
679                 return NULL;
680         else if (!(*rmapp & 1)) {
681                 if (!spte)
682                         return (u64 *)*rmapp;
683                 return NULL;
684         }
685         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
686         prev_desc = NULL;
687         prev_spte = NULL;
688         while (desc) {
689                 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
690                         if (prev_spte == spte)
691                                 return desc->sptes[i];
692                         prev_spte = desc->sptes[i];
693                 }
694                 desc = desc->more;
695         }
696         return NULL;
697 }
698
699 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
700 {
701         unsigned long *rmapp;
702         u64 *spte;
703         int i, write_protected = 0;
704
705         gfn = unalias_gfn(kvm, gfn);
706         rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
707
708         spte = rmap_next(kvm, rmapp, NULL);
709         while (spte) {
710                 BUG_ON(!spte);
711                 BUG_ON(!(*spte & PT_PRESENT_MASK));
712                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
713                 if (is_writeble_pte(*spte)) {
714                         __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
715                         write_protected = 1;
716                 }
717                 spte = rmap_next(kvm, rmapp, spte);
718         }
719         if (write_protected) {
720                 pfn_t pfn;
721
722                 spte = rmap_next(kvm, rmapp, NULL);
723                 pfn = spte_to_pfn(*spte);
724                 kvm_set_pfn_dirty(pfn);
725         }
726
727         /* check for huge page mappings */
728         for (i = PT_DIRECTORY_LEVEL;
729              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
730                 rmapp = gfn_to_rmap(kvm, gfn, i);
731                 spte = rmap_next(kvm, rmapp, NULL);
732                 while (spte) {
733                         BUG_ON(!spte);
734                         BUG_ON(!(*spte & PT_PRESENT_MASK));
735                         BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
736                         pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
737                         if (is_writeble_pte(*spte)) {
738                                 rmap_remove(kvm, spte);
739                                 --kvm->stat.lpages;
740                                 __set_spte(spte, shadow_trap_nonpresent_pte);
741                                 spte = NULL;
742                                 write_protected = 1;
743                         }
744                         spte = rmap_next(kvm, rmapp, spte);
745                 }
746         }
747
748         return write_protected;
749 }
750
751 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
752 {
753         u64 *spte;
754         int need_tlb_flush = 0;
755
756         while ((spte = rmap_next(kvm, rmapp, NULL))) {
757                 BUG_ON(!(*spte & PT_PRESENT_MASK));
758                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
759                 rmap_remove(kvm, spte);
760                 __set_spte(spte, shadow_trap_nonpresent_pte);
761                 need_tlb_flush = 1;
762         }
763         return need_tlb_flush;
764 }
765
766 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
767 {
768         int need_flush = 0;
769         u64 *spte, new_spte;
770         pte_t *ptep = (pte_t *)data;
771         pfn_t new_pfn;
772
773         WARN_ON(pte_huge(*ptep));
774         new_pfn = pte_pfn(*ptep);
775         spte = rmap_next(kvm, rmapp, NULL);
776         while (spte) {
777                 BUG_ON(!is_shadow_present_pte(*spte));
778                 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
779                 need_flush = 1;
780                 if (pte_write(*ptep)) {
781                         rmap_remove(kvm, spte);
782                         __set_spte(spte, shadow_trap_nonpresent_pte);
783                         spte = rmap_next(kvm, rmapp, NULL);
784                 } else {
785                         new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
786                         new_spte |= (u64)new_pfn << PAGE_SHIFT;
787
788                         new_spte &= ~PT_WRITABLE_MASK;
789                         new_spte &= ~SPTE_HOST_WRITEABLE;
790                         if (is_writeble_pte(*spte))
791                                 kvm_set_pfn_dirty(spte_to_pfn(*spte));
792                         __set_spte(spte, new_spte);
793                         spte = rmap_next(kvm, rmapp, spte);
794                 }
795         }
796         if (need_flush)
797                 kvm_flush_remote_tlbs(kvm);
798
799         return 0;
800 }
801
802 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data,
803                           int (*handler)(struct kvm *kvm, unsigned long *rmapp,
804                                          u64 data))
805 {
806         int i, j;
807         int retval = 0;
808
809         /*
810          * If mmap_sem isn't taken, we can look the memslots with only
811          * the mmu_lock by skipping over the slots with userspace_addr == 0.
812          */
813         for (i = 0; i < kvm->nmemslots; i++) {
814                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
815                 unsigned long start = memslot->userspace_addr;
816                 unsigned long end;
817
818                 /* mmu_lock protects userspace_addr */
819                 if (!start)
820                         continue;
821
822                 end = start + (memslot->npages << PAGE_SHIFT);
823                 if (hva >= start && hva < end) {
824                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
825
826                         retval |= handler(kvm, &memslot->rmap[gfn_offset],
827                                           data);
828
829                         for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
830                                 int idx = gfn_offset;
831                                 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
832                                 retval |= handler(kvm,
833                                         &memslot->lpage_info[j][idx].rmap_pde,
834                                         data);
835                         }
836                 }
837         }
838
839         return retval;
840 }
841
842 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
843 {
844         return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
845 }
846
847 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
848 {
849         kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp);
850 }
851
852 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
853 {
854         u64 *spte;
855         int young = 0;
856
857         /* always return old for EPT */
858         if (!shadow_accessed_mask)
859                 return 0;
860
861         spte = rmap_next(kvm, rmapp, NULL);
862         while (spte) {
863                 int _young;
864                 u64 _spte = *spte;
865                 BUG_ON(!(_spte & PT_PRESENT_MASK));
866                 _young = _spte & PT_ACCESSED_MASK;
867                 if (_young) {
868                         young = 1;
869                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
870                 }
871                 spte = rmap_next(kvm, rmapp, spte);
872         }
873         return young;
874 }
875
876 #define RMAP_RECYCLE_THRESHOLD 1000
877
878 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
879 {
880         unsigned long *rmapp;
881         struct kvm_mmu_page *sp;
882
883         sp = page_header(__pa(spte));
884
885         gfn = unalias_gfn(vcpu->kvm, gfn);
886         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
887
888         kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
889         kvm_flush_remote_tlbs(vcpu->kvm);
890 }
891
892 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
893 {
894         return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
895 }
896
897 #ifdef MMU_DEBUG
898 static int is_empty_shadow_page(u64 *spt)
899 {
900         u64 *pos;
901         u64 *end;
902
903         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
904                 if (is_shadow_present_pte(*pos)) {
905                         printk(KERN_ERR "%s: %p %llx\n", __func__,
906                                pos, *pos);
907                         return 0;
908                 }
909         return 1;
910 }
911 #endif
912
913 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
914 {
915         ASSERT(is_empty_shadow_page(sp->spt));
916         list_del(&sp->link);
917         __free_page(virt_to_page(sp->spt));
918         __free_page(virt_to_page(sp->gfns));
919         kfree(sp);
920         ++kvm->arch.n_free_mmu_pages;
921 }
922
923 static unsigned kvm_page_table_hashfn(gfn_t gfn)
924 {
925         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
926 }
927
928 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
929                                                u64 *parent_pte)
930 {
931         struct kvm_mmu_page *sp;
932
933         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
934         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
935         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
936         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
937         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
938         INIT_LIST_HEAD(&sp->oos_link);
939         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
940         sp->multimapped = 0;
941         sp->parent_pte = parent_pte;
942         --vcpu->kvm->arch.n_free_mmu_pages;
943         return sp;
944 }
945
946 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
947                                     struct kvm_mmu_page *sp, u64 *parent_pte)
948 {
949         struct kvm_pte_chain *pte_chain;
950         struct hlist_node *node;
951         int i;
952
953         if (!parent_pte)
954                 return;
955         if (!sp->multimapped) {
956                 u64 *old = sp->parent_pte;
957
958                 if (!old) {
959                         sp->parent_pte = parent_pte;
960                         return;
961                 }
962                 sp->multimapped = 1;
963                 pte_chain = mmu_alloc_pte_chain(vcpu);
964                 INIT_HLIST_HEAD(&sp->parent_ptes);
965                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
966                 pte_chain->parent_ptes[0] = old;
967         }
968         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
969                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
970                         continue;
971                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
972                         if (!pte_chain->parent_ptes[i]) {
973                                 pte_chain->parent_ptes[i] = parent_pte;
974                                 return;
975                         }
976         }
977         pte_chain = mmu_alloc_pte_chain(vcpu);
978         BUG_ON(!pte_chain);
979         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
980         pte_chain->parent_ptes[0] = parent_pte;
981 }
982
983 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
984                                        u64 *parent_pte)
985 {
986         struct kvm_pte_chain *pte_chain;
987         struct hlist_node *node;
988         int i;
989
990         if (!sp->multimapped) {
991                 BUG_ON(sp->parent_pte != parent_pte);
992                 sp->parent_pte = NULL;
993                 return;
994         }
995         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
996                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
997                         if (!pte_chain->parent_ptes[i])
998                                 break;
999                         if (pte_chain->parent_ptes[i] != parent_pte)
1000                                 continue;
1001                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
1002                                 && pte_chain->parent_ptes[i + 1]) {
1003                                 pte_chain->parent_ptes[i]
1004                                         = pte_chain->parent_ptes[i + 1];
1005                                 ++i;
1006                         }
1007                         pte_chain->parent_ptes[i] = NULL;
1008                         if (i == 0) {
1009                                 hlist_del(&pte_chain->link);
1010                                 mmu_free_pte_chain(pte_chain);
1011                                 if (hlist_empty(&sp->parent_ptes)) {
1012                                         sp->multimapped = 0;
1013                                         sp->parent_pte = NULL;
1014                                 }
1015                         }
1016                         return;
1017                 }
1018         BUG();
1019 }
1020
1021
1022 static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1023                             mmu_parent_walk_fn fn)
1024 {
1025         struct kvm_pte_chain *pte_chain;
1026         struct hlist_node *node;
1027         struct kvm_mmu_page *parent_sp;
1028         int i;
1029
1030         if (!sp->multimapped && sp->parent_pte) {
1031                 parent_sp = page_header(__pa(sp->parent_pte));
1032                 fn(vcpu, parent_sp);
1033                 mmu_parent_walk(vcpu, parent_sp, fn);
1034                 return;
1035         }
1036         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1037                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1038                         if (!pte_chain->parent_ptes[i])
1039                                 break;
1040                         parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
1041                         fn(vcpu, parent_sp);
1042                         mmu_parent_walk(vcpu, parent_sp, fn);
1043                 }
1044 }
1045
1046 static void kvm_mmu_update_unsync_bitmap(u64 *spte)
1047 {
1048         unsigned int index;
1049         struct kvm_mmu_page *sp = page_header(__pa(spte));
1050
1051         index = spte - sp->spt;
1052         if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
1053                 sp->unsync_children++;
1054         WARN_ON(!sp->unsync_children);
1055 }
1056
1057 static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
1058 {
1059         struct kvm_pte_chain *pte_chain;
1060         struct hlist_node *node;
1061         int i;
1062
1063         if (!sp->parent_pte)
1064                 return;
1065
1066         if (!sp->multimapped) {
1067                 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
1068                 return;
1069         }
1070
1071         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1072                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1073                         if (!pte_chain->parent_ptes[i])
1074                                 break;
1075                         kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
1076                 }
1077 }
1078
1079 static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1080 {
1081         kvm_mmu_update_parents_unsync(sp);
1082         return 1;
1083 }
1084
1085 static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
1086                                         struct kvm_mmu_page *sp)
1087 {
1088         mmu_parent_walk(vcpu, sp, unsync_walk_fn);
1089         kvm_mmu_update_parents_unsync(sp);
1090 }
1091
1092 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1093                                     struct kvm_mmu_page *sp)
1094 {
1095         int i;
1096
1097         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1098                 sp->spt[i] = shadow_trap_nonpresent_pte;
1099 }
1100
1101 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1102                                struct kvm_mmu_page *sp)
1103 {
1104         return 1;
1105 }
1106
1107 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1108 {
1109 }
1110
1111 #define KVM_PAGE_ARRAY_NR 16
1112
1113 struct kvm_mmu_pages {
1114         struct mmu_page_and_offset {
1115                 struct kvm_mmu_page *sp;
1116                 unsigned int idx;
1117         } page[KVM_PAGE_ARRAY_NR];
1118         unsigned int nr;
1119 };
1120
1121 #define for_each_unsync_children(bitmap, idx)           \
1122         for (idx = find_first_bit(bitmap, 512);         \
1123              idx < 512;                                 \
1124              idx = find_next_bit(bitmap, 512, idx+1))
1125
1126 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1127                          int idx)
1128 {
1129         int i;
1130
1131         if (sp->unsync)
1132                 for (i=0; i < pvec->nr; i++)
1133                         if (pvec->page[i].sp == sp)
1134                                 return 0;
1135
1136         pvec->page[pvec->nr].sp = sp;
1137         pvec->page[pvec->nr].idx = idx;
1138         pvec->nr++;
1139         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1140 }
1141
1142 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1143                            struct kvm_mmu_pages *pvec)
1144 {
1145         int i, ret, nr_unsync_leaf = 0;
1146
1147         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1148                 u64 ent = sp->spt[i];
1149
1150                 if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
1151                         struct kvm_mmu_page *child;
1152                         child = page_header(ent & PT64_BASE_ADDR_MASK);
1153
1154                         if (child->unsync_children) {
1155                                 if (mmu_pages_add(pvec, child, i))
1156                                         return -ENOSPC;
1157
1158                                 ret = __mmu_unsync_walk(child, pvec);
1159                                 if (!ret)
1160                                         __clear_bit(i, sp->unsync_child_bitmap);
1161                                 else if (ret > 0)
1162                                         nr_unsync_leaf += ret;
1163                                 else
1164                                         return ret;
1165                         }
1166
1167                         if (child->unsync) {
1168                                 nr_unsync_leaf++;
1169                                 if (mmu_pages_add(pvec, child, i))
1170                                         return -ENOSPC;
1171                         }
1172                 }
1173         }
1174
1175         if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
1176                 sp->unsync_children = 0;
1177
1178         return nr_unsync_leaf;
1179 }
1180
1181 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1182                            struct kvm_mmu_pages *pvec)
1183 {
1184         if (!sp->unsync_children)
1185                 return 0;
1186
1187         mmu_pages_add(pvec, sp, 0);
1188         return __mmu_unsync_walk(sp, pvec);
1189 }
1190
1191 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1192 {
1193         unsigned index;
1194         struct hlist_head *bucket;
1195         struct kvm_mmu_page *sp;
1196         struct hlist_node *node;
1197
1198         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1199         index = kvm_page_table_hashfn(gfn);
1200         bucket = &kvm->arch.mmu_page_hash[index];
1201         hlist_for_each_entry(sp, node, bucket, hash_link)
1202                 if (sp->gfn == gfn && !sp->role.direct
1203                     && !sp->role.invalid) {
1204                         pgprintk("%s: found role %x\n",
1205                                  __func__, sp->role.word);
1206                         return sp;
1207                 }
1208         return NULL;
1209 }
1210
1211 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1212 {
1213         WARN_ON(!sp->unsync);
1214         sp->unsync = 0;
1215         --kvm->stat.mmu_unsync;
1216 }
1217
1218 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1219
1220 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1221 {
1222         if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1223                 kvm_mmu_zap_page(vcpu->kvm, sp);
1224                 return 1;
1225         }
1226
1227         trace_kvm_mmu_sync_page(sp);
1228         if (rmap_write_protect(vcpu->kvm, sp->gfn))
1229                 kvm_flush_remote_tlbs(vcpu->kvm);
1230         kvm_unlink_unsync_page(vcpu->kvm, sp);
1231         if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1232                 kvm_mmu_zap_page(vcpu->kvm, sp);
1233                 return 1;
1234         }
1235
1236         kvm_mmu_flush_tlb(vcpu);
1237         return 0;
1238 }
1239
1240 struct mmu_page_path {
1241         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1242         unsigned int idx[PT64_ROOT_LEVEL-1];
1243 };
1244
1245 #define for_each_sp(pvec, sp, parents, i)                       \
1246                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1247                         sp = pvec.page[i].sp;                   \
1248                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1249                         i = mmu_pages_next(&pvec, &parents, i))
1250
1251 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1252                           struct mmu_page_path *parents,
1253                           int i)
1254 {
1255         int n;
1256
1257         for (n = i+1; n < pvec->nr; n++) {
1258                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1259
1260                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1261                         parents->idx[0] = pvec->page[n].idx;
1262                         return n;
1263                 }
1264
1265                 parents->parent[sp->role.level-2] = sp;
1266                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1267         }
1268
1269         return n;
1270 }
1271
1272 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1273 {
1274         struct kvm_mmu_page *sp;
1275         unsigned int level = 0;
1276
1277         do {
1278                 unsigned int idx = parents->idx[level];
1279
1280                 sp = parents->parent[level];
1281                 if (!sp)
1282                         return;
1283
1284                 --sp->unsync_children;
1285                 WARN_ON((int)sp->unsync_children < 0);
1286                 __clear_bit(idx, sp->unsync_child_bitmap);
1287                 level++;
1288         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1289 }
1290
1291 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1292                                struct mmu_page_path *parents,
1293                                struct kvm_mmu_pages *pvec)
1294 {
1295         parents->parent[parent->role.level-1] = NULL;
1296         pvec->nr = 0;
1297 }
1298
1299 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1300                               struct kvm_mmu_page *parent)
1301 {
1302         int i;
1303         struct kvm_mmu_page *sp;
1304         struct mmu_page_path parents;
1305         struct kvm_mmu_pages pages;
1306
1307         kvm_mmu_pages_init(parent, &parents, &pages);
1308         while (mmu_unsync_walk(parent, &pages)) {
1309                 int protected = 0;
1310
1311                 for_each_sp(pages, sp, parents, i)
1312                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1313
1314                 if (protected)
1315                         kvm_flush_remote_tlbs(vcpu->kvm);
1316
1317                 for_each_sp(pages, sp, parents, i) {
1318                         kvm_sync_page(vcpu, sp);
1319                         mmu_pages_clear_parents(&parents);
1320                 }
1321                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1322                 kvm_mmu_pages_init(parent, &parents, &pages);
1323         }
1324 }
1325
1326 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1327                                              gfn_t gfn,
1328                                              gva_t gaddr,
1329                                              unsigned level,
1330                                              int direct,
1331                                              unsigned access,
1332                                              u64 *parent_pte)
1333 {
1334         union kvm_mmu_page_role role;
1335         unsigned index;
1336         unsigned quadrant;
1337         struct hlist_head *bucket;
1338         struct kvm_mmu_page *sp;
1339         struct hlist_node *node, *tmp;
1340
1341         role = vcpu->arch.mmu.base_role;
1342         role.level = level;
1343         role.direct = direct;
1344         role.access = access;
1345         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1346                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1347                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1348                 role.quadrant = quadrant;
1349         }
1350         index = kvm_page_table_hashfn(gfn);
1351         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1352         hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1353                 if (sp->gfn == gfn) {
1354                         if (sp->unsync)
1355                                 if (kvm_sync_page(vcpu, sp))
1356                                         continue;
1357
1358                         if (sp->role.word != role.word)
1359                                 continue;
1360
1361                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1362                         if (sp->unsync_children) {
1363                                 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1364                                 kvm_mmu_mark_parents_unsync(vcpu, sp);
1365                         }
1366                         trace_kvm_mmu_get_page(sp, false);
1367                         return sp;
1368                 }
1369         ++vcpu->kvm->stat.mmu_cache_miss;
1370         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1371         if (!sp)
1372                 return sp;
1373         sp->gfn = gfn;
1374         sp->role = role;
1375         hlist_add_head(&sp->hash_link, bucket);
1376         if (!direct) {
1377                 if (rmap_write_protect(vcpu->kvm, gfn))
1378                         kvm_flush_remote_tlbs(vcpu->kvm);
1379                 account_shadowed(vcpu->kvm, gfn);
1380         }
1381         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1382                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1383         else
1384                 nonpaging_prefetch_page(vcpu, sp);
1385         trace_kvm_mmu_get_page(sp, true);
1386         return sp;
1387 }
1388
1389 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1390                              struct kvm_vcpu *vcpu, u64 addr)
1391 {
1392         iterator->addr = addr;
1393         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1394         iterator->level = vcpu->arch.mmu.shadow_root_level;
1395         if (iterator->level == PT32E_ROOT_LEVEL) {
1396                 iterator->shadow_addr
1397                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1398                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1399                 --iterator->level;
1400                 if (!iterator->shadow_addr)
1401                         iterator->level = 0;
1402         }
1403 }
1404
1405 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1406 {
1407         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1408                 return false;
1409
1410         if (iterator->level == PT_PAGE_TABLE_LEVEL)
1411                 if (is_large_pte(*iterator->sptep))
1412                         return false;
1413
1414         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1415         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1416         return true;
1417 }
1418
1419 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1420 {
1421         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1422         --iterator->level;
1423 }
1424
1425 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1426                                          struct kvm_mmu_page *sp)
1427 {
1428         unsigned i;
1429         u64 *pt;
1430         u64 ent;
1431
1432         pt = sp->spt;
1433
1434         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1435                 ent = pt[i];
1436
1437                 if (is_shadow_present_pte(ent)) {
1438                         if (!is_last_spte(ent, sp->role.level)) {
1439                                 ent &= PT64_BASE_ADDR_MASK;
1440                                 mmu_page_remove_parent_pte(page_header(ent),
1441                                                            &pt[i]);
1442                         } else {
1443                                 if (is_large_pte(ent))
1444                                         --kvm->stat.lpages;
1445                                 rmap_remove(kvm, &pt[i]);
1446                         }
1447                 }
1448                 pt[i] = shadow_trap_nonpresent_pte;
1449         }
1450 }
1451
1452 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1453 {
1454         mmu_page_remove_parent_pte(sp, parent_pte);
1455 }
1456
1457 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1458 {
1459         int i;
1460         struct kvm_vcpu *vcpu;
1461
1462         kvm_for_each_vcpu(i, vcpu, kvm)
1463                 vcpu->arch.last_pte_updated = NULL;
1464 }
1465
1466 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1467 {
1468         u64 *parent_pte;
1469
1470         while (sp->multimapped || sp->parent_pte) {
1471                 if (!sp->multimapped)
1472                         parent_pte = sp->parent_pte;
1473                 else {
1474                         struct kvm_pte_chain *chain;
1475
1476                         chain = container_of(sp->parent_ptes.first,
1477                                              struct kvm_pte_chain, link);
1478                         parent_pte = chain->parent_ptes[0];
1479                 }
1480                 BUG_ON(!parent_pte);
1481                 kvm_mmu_put_page(sp, parent_pte);
1482                 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1483         }
1484 }
1485
1486 static int mmu_zap_unsync_children(struct kvm *kvm,
1487                                    struct kvm_mmu_page *parent)
1488 {
1489         int i, zapped = 0;
1490         struct mmu_page_path parents;
1491         struct kvm_mmu_pages pages;
1492
1493         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1494                 return 0;
1495
1496         kvm_mmu_pages_init(parent, &parents, &pages);
1497         while (mmu_unsync_walk(parent, &pages)) {
1498                 struct kvm_mmu_page *sp;
1499
1500                 for_each_sp(pages, sp, parents, i) {
1501                         kvm_mmu_zap_page(kvm, sp);
1502                         mmu_pages_clear_parents(&parents);
1503                 }
1504                 zapped += pages.nr;
1505                 kvm_mmu_pages_init(parent, &parents, &pages);
1506         }
1507
1508         return zapped;
1509 }
1510
1511 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1512 {
1513         int ret;
1514
1515         trace_kvm_mmu_zap_page(sp);
1516         ++kvm->stat.mmu_shadow_zapped;
1517         ret = mmu_zap_unsync_children(kvm, sp);
1518         kvm_mmu_page_unlink_children(kvm, sp);
1519         kvm_mmu_unlink_parents(kvm, sp);
1520         kvm_flush_remote_tlbs(kvm);
1521         if (!sp->role.invalid && !sp->role.direct)
1522                 unaccount_shadowed(kvm, sp->gfn);
1523         if (sp->unsync)
1524                 kvm_unlink_unsync_page(kvm, sp);
1525         if (!sp->root_count) {
1526                 hlist_del(&sp->hash_link);
1527                 kvm_mmu_free_page(kvm, sp);
1528         } else {
1529                 sp->role.invalid = 1;
1530                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1531                 kvm_reload_remote_mmus(kvm);
1532         }
1533         kvm_mmu_reset_last_pte_updated(kvm);
1534         return ret;
1535 }
1536
1537 /*
1538  * Changing the number of mmu pages allocated to the vm
1539  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1540  */
1541 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1542 {
1543         int used_pages;
1544
1545         used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
1546         used_pages = max(0, used_pages);
1547
1548         /*
1549          * If we set the number of mmu pages to be smaller be than the
1550          * number of actived pages , we must to free some mmu pages before we
1551          * change the value
1552          */
1553
1554         if (used_pages > kvm_nr_mmu_pages) {
1555                 while (used_pages > kvm_nr_mmu_pages) {
1556                         struct kvm_mmu_page *page;
1557
1558                         page = container_of(kvm->arch.active_mmu_pages.prev,
1559                                             struct kvm_mmu_page, link);
1560                         kvm_mmu_zap_page(kvm, page);
1561                         used_pages--;
1562                 }
1563                 kvm->arch.n_free_mmu_pages = 0;
1564         }
1565         else
1566                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1567                                          - kvm->arch.n_alloc_mmu_pages;
1568
1569         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1570 }
1571
1572 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1573 {
1574         unsigned index;
1575         struct hlist_head *bucket;
1576         struct kvm_mmu_page *sp;
1577         struct hlist_node *node, *n;
1578         int r;
1579
1580         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1581         r = 0;
1582         index = kvm_page_table_hashfn(gfn);
1583         bucket = &kvm->arch.mmu_page_hash[index];
1584         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1585                 if (sp->gfn == gfn && !sp->role.direct) {
1586                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1587                                  sp->role.word);
1588                         r = 1;
1589                         if (kvm_mmu_zap_page(kvm, sp))
1590                                 n = bucket->first;
1591                 }
1592         return r;
1593 }
1594
1595 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1596 {
1597         unsigned index;
1598         struct hlist_head *bucket;
1599         struct kvm_mmu_page *sp;
1600         struct hlist_node *node, *nn;
1601
1602         index = kvm_page_table_hashfn(gfn);
1603         bucket = &kvm->arch.mmu_page_hash[index];
1604         hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1605                 if (sp->gfn == gfn && !sp->role.direct
1606                     && !sp->role.invalid) {
1607                         pgprintk("%s: zap %lx %x\n",
1608                                  __func__, gfn, sp->role.word);
1609                         kvm_mmu_zap_page(kvm, sp);
1610                 }
1611         }
1612 }
1613
1614 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1615 {
1616         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1617         struct kvm_mmu_page *sp = page_header(__pa(pte));
1618
1619         __set_bit(slot, sp->slot_bitmap);
1620 }
1621
1622 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1623 {
1624         int i;
1625         u64 *pt = sp->spt;
1626
1627         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1628                 return;
1629
1630         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1631                 if (pt[i] == shadow_notrap_nonpresent_pte)
1632                         __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1633         }
1634 }
1635
1636 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1637 {
1638         struct page *page;
1639
1640         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1641
1642         if (gpa == UNMAPPED_GVA)
1643                 return NULL;
1644
1645         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1646
1647         return page;
1648 }
1649
1650 /*
1651  * The function is based on mtrr_type_lookup() in
1652  * arch/x86/kernel/cpu/mtrr/generic.c
1653  */
1654 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1655                          u64 start, u64 end)
1656 {
1657         int i;
1658         u64 base, mask;
1659         u8 prev_match, curr_match;
1660         int num_var_ranges = KVM_NR_VAR_MTRR;
1661
1662         if (!mtrr_state->enabled)
1663                 return 0xFF;
1664
1665         /* Make end inclusive end, instead of exclusive */
1666         end--;
1667
1668         /* Look in fixed ranges. Just return the type as per start */
1669         if (mtrr_state->have_fixed && (start < 0x100000)) {
1670                 int idx;
1671
1672                 if (start < 0x80000) {
1673                         idx = 0;
1674                         idx += (start >> 16);
1675                         return mtrr_state->fixed_ranges[idx];
1676                 } else if (start < 0xC0000) {
1677                         idx = 1 * 8;
1678                         idx += ((start - 0x80000) >> 14);
1679                         return mtrr_state->fixed_ranges[idx];
1680                 } else if (start < 0x1000000) {
1681                         idx = 3 * 8;
1682                         idx += ((start - 0xC0000) >> 12);
1683                         return mtrr_state->fixed_ranges[idx];
1684                 }
1685         }
1686
1687         /*
1688          * Look in variable ranges
1689          * Look of multiple ranges matching this address and pick type
1690          * as per MTRR precedence
1691          */
1692         if (!(mtrr_state->enabled & 2))
1693                 return mtrr_state->def_type;
1694
1695         prev_match = 0xFF;
1696         for (i = 0; i < num_var_ranges; ++i) {
1697                 unsigned short start_state, end_state;
1698
1699                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1700                         continue;
1701
1702                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1703                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1704                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1705                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1706
1707                 start_state = ((start & mask) == (base & mask));
1708                 end_state = ((end & mask) == (base & mask));
1709                 if (start_state != end_state)
1710                         return 0xFE;
1711
1712                 if ((start & mask) != (base & mask))
1713                         continue;
1714
1715                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1716                 if (prev_match == 0xFF) {
1717                         prev_match = curr_match;
1718                         continue;
1719                 }
1720
1721                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1722                     curr_match == MTRR_TYPE_UNCACHABLE)
1723                         return MTRR_TYPE_UNCACHABLE;
1724
1725                 if ((prev_match == MTRR_TYPE_WRBACK &&
1726                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1727                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1728                      curr_match == MTRR_TYPE_WRBACK)) {
1729                         prev_match = MTRR_TYPE_WRTHROUGH;
1730                         curr_match = MTRR_TYPE_WRTHROUGH;
1731                 }
1732
1733                 if (prev_match != curr_match)
1734                         return MTRR_TYPE_UNCACHABLE;
1735         }
1736
1737         if (prev_match != 0xFF)
1738                 return prev_match;
1739
1740         return mtrr_state->def_type;
1741 }
1742
1743 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1744 {
1745         u8 mtrr;
1746
1747         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1748                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1749         if (mtrr == 0xfe || mtrr == 0xff)
1750                 mtrr = MTRR_TYPE_WRBACK;
1751         return mtrr;
1752 }
1753 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1754
1755 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1756 {
1757         unsigned index;
1758         struct hlist_head *bucket;
1759         struct kvm_mmu_page *s;
1760         struct hlist_node *node, *n;
1761
1762         trace_kvm_mmu_unsync_page(sp);
1763         index = kvm_page_table_hashfn(sp->gfn);
1764         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1765         /* don't unsync if pagetable is shadowed with multiple roles */
1766         hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1767                 if (s->gfn != sp->gfn || s->role.direct)
1768                         continue;
1769                 if (s->role.word != sp->role.word)
1770                         return 1;
1771         }
1772         ++vcpu->kvm->stat.mmu_unsync;
1773         sp->unsync = 1;
1774
1775         kvm_mmu_mark_parents_unsync(vcpu, sp);
1776
1777         mmu_convert_notrap(sp);
1778         return 0;
1779 }
1780
1781 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1782                                   bool can_unsync)
1783 {
1784         struct kvm_mmu_page *shadow;
1785
1786         shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1787         if (shadow) {
1788                 if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1789                         return 1;
1790                 if (shadow->unsync)
1791                         return 0;
1792                 if (can_unsync && oos_shadow)
1793                         return kvm_unsync_page(vcpu, shadow);
1794                 return 1;
1795         }
1796         return 0;
1797 }
1798
1799 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1800                     unsigned pte_access, int user_fault,
1801                     int write_fault, int dirty, int level,
1802                     gfn_t gfn, pfn_t pfn, bool speculative,
1803                     bool can_unsync, bool reset_host_protection)
1804 {
1805         u64 spte;
1806         int ret = 0;
1807
1808         /*
1809          * We don't set the accessed bit, since we sometimes want to see
1810          * whether the guest actually used the pte (in order to detect
1811          * demand paging).
1812          */
1813         spte = shadow_base_present_pte | shadow_dirty_mask;
1814         if (!speculative)
1815                 spte |= shadow_accessed_mask;
1816         if (!dirty)
1817                 pte_access &= ~ACC_WRITE_MASK;
1818         if (pte_access & ACC_EXEC_MASK)
1819                 spte |= shadow_x_mask;
1820         else
1821                 spte |= shadow_nx_mask;
1822         if (pte_access & ACC_USER_MASK)
1823                 spte |= shadow_user_mask;
1824         if (level > PT_PAGE_TABLE_LEVEL)
1825                 spte |= PT_PAGE_SIZE_MASK;
1826         if (tdp_enabled)
1827                 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1828                         kvm_is_mmio_pfn(pfn));
1829
1830         if (reset_host_protection)
1831                 spte |= SPTE_HOST_WRITEABLE;
1832
1833         spte |= (u64)pfn << PAGE_SHIFT;
1834
1835         if ((pte_access & ACC_WRITE_MASK)
1836             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1837
1838                 if (level > PT_PAGE_TABLE_LEVEL &&
1839                     has_wrprotected_page(vcpu->kvm, gfn, level)) {
1840                         ret = 1;
1841                         spte = shadow_trap_nonpresent_pte;
1842                         goto set_pte;
1843                 }
1844
1845                 spte |= PT_WRITABLE_MASK;
1846
1847                 /*
1848                  * Optimization: for pte sync, if spte was writable the hash
1849                  * lookup is unnecessary (and expensive). Write protection
1850                  * is responsibility of mmu_get_page / kvm_sync_page.
1851                  * Same reasoning can be applied to dirty page accounting.
1852                  */
1853                 if (!can_unsync && is_writeble_pte(*sptep))
1854                         goto set_pte;
1855
1856                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1857                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1858                                  __func__, gfn);
1859                         ret = 1;
1860                         pte_access &= ~ACC_WRITE_MASK;
1861                         if (is_writeble_pte(spte))
1862                                 spte &= ~PT_WRITABLE_MASK;
1863                 }
1864         }
1865
1866         if (pte_access & ACC_WRITE_MASK)
1867                 mark_page_dirty(vcpu->kvm, gfn);
1868
1869 set_pte:
1870         __set_spte(sptep, spte);
1871         return ret;
1872 }
1873
1874 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1875                          unsigned pt_access, unsigned pte_access,
1876                          int user_fault, int write_fault, int dirty,
1877                          int *ptwrite, int level, gfn_t gfn,
1878                          pfn_t pfn, bool speculative,
1879                          bool reset_host_protection)
1880 {
1881         int was_rmapped = 0;
1882         int was_writeble = is_writeble_pte(*sptep);
1883         int rmap_count;
1884
1885         pgprintk("%s: spte %llx access %x write_fault %d"
1886                  " user_fault %d gfn %lx\n",
1887                  __func__, *sptep, pt_access,
1888                  write_fault, user_fault, gfn);
1889
1890         if (is_rmap_spte(*sptep)) {
1891                 /*
1892                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1893                  * the parent of the now unreachable PTE.
1894                  */
1895                 if (level > PT_PAGE_TABLE_LEVEL &&
1896                     !is_large_pte(*sptep)) {
1897                         struct kvm_mmu_page *child;
1898                         u64 pte = *sptep;
1899
1900                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1901                         mmu_page_remove_parent_pte(child, sptep);
1902                 } else if (pfn != spte_to_pfn(*sptep)) {
1903                         pgprintk("hfn old %lx new %lx\n",
1904                                  spte_to_pfn(*sptep), pfn);
1905                         rmap_remove(vcpu->kvm, sptep);
1906                 } else
1907                         was_rmapped = 1;
1908         }
1909
1910         if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1911                       dirty, level, gfn, pfn, speculative, true,
1912                       reset_host_protection)) {
1913                 if (write_fault)
1914                         *ptwrite = 1;
1915                 kvm_x86_ops->tlb_flush(vcpu);
1916         }
1917
1918         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
1919         pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1920                  is_large_pte(*sptep)? "2MB" : "4kB",
1921                  *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
1922                  *sptep, sptep);
1923         if (!was_rmapped && is_large_pte(*sptep))
1924                 ++vcpu->kvm->stat.lpages;
1925
1926         page_header_update_slot(vcpu->kvm, sptep, gfn);
1927         if (!was_rmapped) {
1928                 rmap_count = rmap_add(vcpu, sptep, gfn);
1929                 kvm_release_pfn_clean(pfn);
1930                 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1931                         rmap_recycle(vcpu, sptep, gfn);
1932         } else {
1933                 if (was_writeble)
1934                         kvm_release_pfn_dirty(pfn);
1935                 else
1936                         kvm_release_pfn_clean(pfn);
1937         }
1938         if (speculative) {
1939                 vcpu->arch.last_pte_updated = sptep;
1940                 vcpu->arch.last_pte_gfn = gfn;
1941         }
1942 }
1943
1944 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1945 {
1946 }
1947
1948 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1949                         int level, gfn_t gfn, pfn_t pfn)
1950 {
1951         struct kvm_shadow_walk_iterator iterator;
1952         struct kvm_mmu_page *sp;
1953         int pt_write = 0;
1954         gfn_t pseudo_gfn;
1955
1956         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1957                 if (iterator.level == level) {
1958                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1959                                      0, write, 1, &pt_write,
1960                                      level, gfn, pfn, false, true);
1961                         ++vcpu->stat.pf_fixed;
1962                         break;
1963                 }
1964
1965                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1966                         pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1967                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1968                                               iterator.level - 1,
1969                                               1, ACC_ALL, iterator.sptep);
1970                         if (!sp) {
1971                                 pgprintk("nonpaging_map: ENOMEM\n");
1972                                 kvm_release_pfn_clean(pfn);
1973                                 return -ENOMEM;
1974                         }
1975
1976                         __set_spte(iterator.sptep,
1977                                    __pa(sp->spt)
1978                                    | PT_PRESENT_MASK | PT_WRITABLE_MASK
1979                                    | shadow_user_mask | shadow_x_mask);
1980                 }
1981         }
1982         return pt_write;
1983 }
1984
1985 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1986 {
1987         int r;
1988         int level;
1989         pfn_t pfn;
1990         unsigned long mmu_seq;
1991
1992         level = mapping_level(vcpu, gfn);
1993
1994         /*
1995          * This path builds a PAE pagetable - so we can map 2mb pages at
1996          * maximum. Therefore check if the level is larger than that.
1997          */
1998         if (level > PT_DIRECTORY_LEVEL)
1999                 level = PT_DIRECTORY_LEVEL;
2000
2001         gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2002
2003         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2004         smp_rmb();
2005         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2006
2007         /* mmio */
2008         if (is_error_pfn(pfn)) {
2009                 kvm_release_pfn_clean(pfn);
2010                 return 1;
2011         }
2012
2013         spin_lock(&vcpu->kvm->mmu_lock);
2014         if (mmu_notifier_retry(vcpu, mmu_seq))
2015                 goto out_unlock;
2016         kvm_mmu_free_some_pages(vcpu);
2017         r = __direct_map(vcpu, v, write, level, gfn, pfn);
2018         spin_unlock(&vcpu->kvm->mmu_lock);
2019
2020
2021         return r;
2022
2023 out_unlock:
2024         spin_unlock(&vcpu->kvm->mmu_lock);
2025         kvm_release_pfn_clean(pfn);
2026         return 0;
2027 }
2028
2029
2030 static void mmu_free_roots(struct kvm_vcpu *vcpu)
2031 {
2032         int i;
2033         struct kvm_mmu_page *sp;
2034
2035         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2036                 return;
2037         spin_lock(&vcpu->kvm->mmu_lock);
2038         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2039                 hpa_t root = vcpu->arch.mmu.root_hpa;
2040
2041                 sp = page_header(root);
2042                 --sp->root_count;
2043                 if (!sp->root_count && sp->role.invalid)
2044                         kvm_mmu_zap_page(vcpu->kvm, sp);
2045                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2046                 spin_unlock(&vcpu->kvm->mmu_lock);
2047                 return;
2048         }
2049         for (i = 0; i < 4; ++i) {
2050                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2051
2052                 if (root) {
2053                         root &= PT64_BASE_ADDR_MASK;
2054                         sp = page_header(root);
2055                         --sp->root_count;
2056                         if (!sp->root_count && sp->role.invalid)
2057                                 kvm_mmu_zap_page(vcpu->kvm, sp);
2058                 }
2059                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2060         }
2061         spin_unlock(&vcpu->kvm->mmu_lock);
2062         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2063 }
2064
2065 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2066 {
2067         int ret = 0;
2068
2069         if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
2070                 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
2071                 ret = 1;
2072         }
2073
2074         return ret;
2075 }
2076
2077 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2078 {
2079         int i;
2080         gfn_t root_gfn;
2081         struct kvm_mmu_page *sp;
2082         int direct = 0;
2083         u64 pdptr;
2084
2085         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
2086
2087         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2088                 hpa_t root = vcpu->arch.mmu.root_hpa;
2089
2090                 ASSERT(!VALID_PAGE(root));
2091                 if (tdp_enabled)
2092                         direct = 1;
2093                 if (mmu_check_root(vcpu, root_gfn))
2094                         return 1;
2095                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
2096                                       PT64_ROOT_LEVEL, direct,
2097                                       ACC_ALL, NULL);
2098                 root = __pa(sp->spt);
2099                 ++sp->root_count;
2100                 vcpu->arch.mmu.root_hpa = root;
2101                 return 0;
2102         }
2103         direct = !is_paging(vcpu);
2104         if (tdp_enabled)
2105                 direct = 1;
2106         for (i = 0; i < 4; ++i) {
2107                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2108
2109                 ASSERT(!VALID_PAGE(root));
2110                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
2111                         pdptr = kvm_pdptr_read(vcpu, i);
2112                         if (!is_present_gpte(pdptr)) {
2113                                 vcpu->arch.mmu.pae_root[i] = 0;
2114                                 continue;
2115                         }
2116                         root_gfn = pdptr >> PAGE_SHIFT;
2117                 } else if (vcpu->arch.mmu.root_level == 0)
2118                         root_gfn = 0;
2119                 if (mmu_check_root(vcpu, root_gfn))
2120                         return 1;
2121                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2122                                       PT32_ROOT_LEVEL, direct,
2123                                       ACC_ALL, NULL);
2124                 root = __pa(sp->spt);
2125                 ++sp->root_count;
2126                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2127         }
2128         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2129         return 0;
2130 }
2131
2132 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2133 {
2134         int i;
2135         struct kvm_mmu_page *sp;
2136
2137         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2138                 return;
2139         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2140                 hpa_t root = vcpu->arch.mmu.root_hpa;
2141                 sp = page_header(root);
2142                 mmu_sync_children(vcpu, sp);
2143                 return;
2144         }
2145         for (i = 0; i < 4; ++i) {
2146                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2147
2148                 if (root && VALID_PAGE(root)) {
2149                         root &= PT64_BASE_ADDR_MASK;
2150                         sp = page_header(root);
2151                         mmu_sync_children(vcpu, sp);
2152                 }
2153         }
2154 }
2155
2156 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2157 {
2158         spin_lock(&vcpu->kvm->mmu_lock);
2159         mmu_sync_roots(vcpu);
2160         spin_unlock(&vcpu->kvm->mmu_lock);
2161 }
2162
2163 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2164 {
2165         return vaddr;
2166 }
2167
2168 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2169                                 u32 error_code)
2170 {
2171         gfn_t gfn;
2172         int r;
2173
2174         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2175         r = mmu_topup_memory_caches(vcpu);
2176         if (r)
2177                 return r;
2178
2179         ASSERT(vcpu);
2180         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2181
2182         gfn = gva >> PAGE_SHIFT;
2183
2184         return nonpaging_map(vcpu, gva & PAGE_MASK,
2185                              error_code & PFERR_WRITE_MASK, gfn);
2186 }
2187
2188 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2189                                 u32 error_code)
2190 {
2191         pfn_t pfn;
2192         int r;
2193         int level;
2194         gfn_t gfn = gpa >> PAGE_SHIFT;
2195         unsigned long mmu_seq;
2196
2197         ASSERT(vcpu);
2198         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2199
2200         r = mmu_topup_memory_caches(vcpu);
2201         if (r)
2202                 return r;
2203
2204         level = mapping_level(vcpu, gfn);
2205
2206         gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2207
2208         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2209         smp_rmb();
2210         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2211         if (is_error_pfn(pfn)) {
2212                 kvm_release_pfn_clean(pfn);
2213                 return 1;
2214         }
2215         spin_lock(&vcpu->kvm->mmu_lock);
2216         if (mmu_notifier_retry(vcpu, mmu_seq))
2217                 goto out_unlock;
2218         kvm_mmu_free_some_pages(vcpu);
2219         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2220                          level, gfn, pfn);
2221         spin_unlock(&vcpu->kvm->mmu_lock);
2222
2223         return r;
2224
2225 out_unlock:
2226         spin_unlock(&vcpu->kvm->mmu_lock);
2227         kvm_release_pfn_clean(pfn);
2228         return 0;
2229 }
2230
2231 static void nonpaging_free(struct kvm_vcpu *vcpu)
2232 {
2233         mmu_free_roots(vcpu);
2234 }
2235
2236 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2237 {
2238         struct kvm_mmu *context = &vcpu->arch.mmu;
2239
2240         context->new_cr3 = nonpaging_new_cr3;
2241         context->page_fault = nonpaging_page_fault;
2242         context->gva_to_gpa = nonpaging_gva_to_gpa;
2243         context->free = nonpaging_free;
2244         context->prefetch_page = nonpaging_prefetch_page;
2245         context->sync_page = nonpaging_sync_page;
2246         context->invlpg = nonpaging_invlpg;
2247         context->root_level = 0;
2248         context->shadow_root_level = PT32E_ROOT_LEVEL;
2249         context->root_hpa = INVALID_PAGE;
2250         return 0;
2251 }
2252
2253 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2254 {
2255         ++vcpu->stat.tlb_flush;
2256         kvm_x86_ops->tlb_flush(vcpu);
2257 }
2258
2259 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2260 {
2261         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2262         mmu_free_roots(vcpu);
2263 }
2264
2265 static void inject_page_fault(struct kvm_vcpu *vcpu,
2266                               u64 addr,
2267                               u32 err_code)
2268 {
2269         kvm_inject_page_fault(vcpu, addr, err_code);
2270 }
2271
2272 static void paging_free(struct kvm_vcpu *vcpu)
2273 {
2274         nonpaging_free(vcpu);
2275 }
2276
2277 static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2278 {
2279         int bit7;
2280
2281         bit7 = (gpte >> 7) & 1;
2282         return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
2283 }
2284
2285 #define PTTYPE 64
2286 #include "paging_tmpl.h"
2287 #undef PTTYPE
2288
2289 #define PTTYPE 32
2290 #include "paging_tmpl.h"
2291 #undef PTTYPE
2292
2293 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2294 {
2295         struct kvm_mmu *context = &vcpu->arch.mmu;
2296         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2297         u64 exb_bit_rsvd = 0;
2298
2299         if (!is_nx(vcpu))
2300                 exb_bit_rsvd = rsvd_bits(63, 63);
2301         switch (level) {
2302         case PT32_ROOT_LEVEL:
2303                 /* no rsvd bits for 2 level 4K page table entries */
2304                 context->rsvd_bits_mask[0][1] = 0;
2305                 context->rsvd_bits_mask[0][0] = 0;
2306                 if (is_cpuid_PSE36())
2307                         /* 36bits PSE 4MB page */
2308                         context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2309                 else
2310                         /* 32 bits PSE 4MB page */
2311                         context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2312                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2313                 break;
2314         case PT32E_ROOT_LEVEL:
2315                 context->rsvd_bits_mask[0][2] =
2316                         rsvd_bits(maxphyaddr, 63) |
2317                         rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2318                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2319                         rsvd_bits(maxphyaddr, 62);      /* PDE */
2320                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2321                         rsvd_bits(maxphyaddr, 62);      /* PTE */
2322                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2323                         rsvd_bits(maxphyaddr, 62) |
2324                         rsvd_bits(13, 20);              /* large page */
2325                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2326                 break;
2327         case PT64_ROOT_LEVEL:
2328                 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2329                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2330                 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2331                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2332                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2333                         rsvd_bits(maxphyaddr, 51);
2334                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2335                         rsvd_bits(maxphyaddr, 51);
2336                 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2337                 context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
2338                         rsvd_bits(maxphyaddr, 51) |
2339                         rsvd_bits(13, 29);
2340                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2341                         rsvd_bits(maxphyaddr, 51) |
2342                         rsvd_bits(13, 20);              /* large page */
2343                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2344                 break;
2345         }
2346 }
2347
2348 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2349 {
2350         struct kvm_mmu *context = &vcpu->arch.mmu;
2351
2352         ASSERT(is_pae(vcpu));
2353         context->new_cr3 = paging_new_cr3;
2354         context->page_fault = paging64_page_fault;
2355         context->gva_to_gpa = paging64_gva_to_gpa;
2356         context->prefetch_page = paging64_prefetch_page;
2357         context->sync_page = paging64_sync_page;
2358         context->invlpg = paging64_invlpg;
2359         context->free = paging_free;
2360         context->root_level = level;
2361         context->shadow_root_level = level;
2362         context->root_hpa = INVALID_PAGE;
2363         return 0;
2364 }
2365
2366 static int paging64_init_context(struct kvm_vcpu *vcpu)
2367 {
2368         reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2369         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2370 }
2371
2372 static int paging32_init_context(struct kvm_vcpu *vcpu)
2373 {
2374         struct kvm_mmu *context = &vcpu->arch.mmu;
2375
2376         reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2377         context->new_cr3 = paging_new_cr3;
2378         context->page_fault = paging32_page_fault;
2379         context->gva_to_gpa = paging32_gva_to_gpa;
2380         context->free = paging_free;
2381         context->prefetch_page = paging32_prefetch_page;
2382         context->sync_page = paging32_sync_page;
2383         context->invlpg = paging32_invlpg;
2384         context->root_level = PT32_ROOT_LEVEL;
2385         context->shadow_root_level = PT32E_ROOT_LEVEL;
2386         context->root_hpa = INVALID_PAGE;
2387         return 0;
2388 }
2389
2390 static int paging32E_init_context(struct kvm_vcpu *vcpu)
2391 {
2392         reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2393         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2394 }
2395
2396 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2397 {
2398         struct kvm_mmu *context = &vcpu->arch.mmu;
2399
2400         context->new_cr3 = nonpaging_new_cr3;
2401         context->page_fault = tdp_page_fault;
2402         context->free = nonpaging_free;
2403         context->prefetch_page = nonpaging_prefetch_page;
2404         context->sync_page = nonpaging_sync_page;
2405         context->invlpg = nonpaging_invlpg;
2406         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2407         context->root_hpa = INVALID_PAGE;
2408
2409         if (!is_paging(vcpu)) {
2410                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2411                 context->root_level = 0;
2412         } else if (is_long_mode(vcpu)) {
2413                 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2414                 context->gva_to_gpa = paging64_gva_to_gpa;
2415                 context->root_level = PT64_ROOT_LEVEL;
2416         } else if (is_pae(vcpu)) {
2417                 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2418                 context->gva_to_gpa = paging64_gva_to_gpa;
2419                 context->root_level = PT32E_ROOT_LEVEL;
2420         } else {
2421                 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2422                 context->gva_to_gpa = paging32_gva_to_gpa;
2423                 context->root_level = PT32_ROOT_LEVEL;
2424         }
2425
2426         return 0;
2427 }
2428
2429 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2430 {
2431         int r;
2432
2433         ASSERT(vcpu);
2434         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2435
2436         if (!is_paging(vcpu))
2437                 r = nonpaging_init_context(vcpu);
2438         else if (is_long_mode(vcpu))
2439                 r = paging64_init_context(vcpu);
2440         else if (is_pae(vcpu))
2441                 r = paging32E_init_context(vcpu);
2442         else
2443                 r = paging32_init_context(vcpu);
2444
2445         vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2446
2447         return r;
2448 }
2449
2450 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2451 {
2452         vcpu->arch.update_pte.pfn = bad_pfn;
2453
2454         if (tdp_enabled)
2455                 return init_kvm_tdp_mmu(vcpu);
2456         else
2457                 return init_kvm_softmmu(vcpu);
2458 }
2459
2460 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2461 {
2462         ASSERT(vcpu);
2463         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2464                 vcpu->arch.mmu.free(vcpu);
2465                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2466         }
2467 }
2468
2469 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2470 {
2471         destroy_kvm_mmu(vcpu);
2472         return init_kvm_mmu(vcpu);
2473 }
2474 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2475
2476 int kvm_mmu_load(struct kvm_vcpu *vcpu)
2477 {
2478         int r;
2479
2480         r = mmu_topup_memory_caches(vcpu);
2481         if (r)
2482                 goto out;
2483         spin_lock(&vcpu->kvm->mmu_lock);
2484         kvm_mmu_free_some_pages(vcpu);
2485         r = mmu_alloc_roots(vcpu);
2486         mmu_sync_roots(vcpu);
2487         spin_unlock(&vcpu->kvm->mmu_lock);
2488         if (r)
2489                 goto out;
2490         /* set_cr3() should ensure TLB has been flushed */
2491         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2492 out:
2493         return r;
2494 }
2495 EXPORT_SYMBOL_GPL(kvm_mmu_load);
2496
2497 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2498 {
2499         mmu_free_roots(vcpu);
2500 }
2501
2502 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2503                                   struct kvm_mmu_page *sp,
2504                                   u64 *spte)
2505 {
2506         u64 pte;
2507         struct kvm_mmu_page *child;
2508
2509         pte = *spte;
2510         if (is_shadow_present_pte(pte)) {
2511                 if (is_last_spte(pte, sp->role.level))
2512                         rmap_remove(vcpu->kvm, spte);
2513                 else {
2514                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2515                         mmu_page_remove_parent_pte(child, spte);
2516                 }
2517         }
2518         __set_spte(spte, shadow_trap_nonpresent_pte);
2519         if (is_large_pte(pte))
2520                 --vcpu->kvm->stat.lpages;
2521 }
2522
2523 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2524                                   struct kvm_mmu_page *sp,
2525                                   u64 *spte,
2526                                   const void *new)
2527 {
2528         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2529                 ++vcpu->kvm->stat.mmu_pde_zapped;
2530                 return;
2531         }
2532
2533         ++vcpu->kvm->stat.mmu_pte_updated;
2534         if (sp->role.glevels == PT32_ROOT_LEVEL)
2535                 paging32_update_pte(vcpu, sp, spte, new);
2536         else
2537                 paging64_update_pte(vcpu, sp, spte, new);
2538 }
2539
2540 static bool need_remote_flush(u64 old, u64 new)
2541 {
2542         if (!is_shadow_present_pte(old))
2543                 return false;
2544         if (!is_shadow_present_pte(new))
2545                 return true;
2546         if ((old ^ new) & PT64_BASE_ADDR_MASK)
2547                 return true;
2548         old ^= PT64_NX_MASK;
2549         new ^= PT64_NX_MASK;
2550         return (old & ~new & PT64_PERM_MASK) != 0;
2551 }
2552
2553 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2554 {
2555         if (need_remote_flush(old, new))
2556                 kvm_flush_remote_tlbs(vcpu->kvm);
2557         else
2558                 kvm_mmu_flush_tlb(vcpu);
2559 }
2560
2561 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2562 {
2563         u64 *spte = vcpu->arch.last_pte_updated;
2564
2565         return !!(spte && (*spte & shadow_accessed_mask));
2566 }
2567
2568 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2569                                           const u8 *new, int bytes)
2570 {
2571         gfn_t gfn;
2572         int r;
2573         u64 gpte = 0;
2574         pfn_t pfn;
2575
2576         if (bytes != 4 && bytes != 8)
2577                 return;
2578
2579         /*
2580          * Assume that the pte write on a page table of the same type
2581          * as the current vcpu paging mode.  This is nearly always true
2582          * (might be false while changing modes).  Note it is verified later
2583          * by update_pte().
2584          */
2585         if (is_pae(vcpu)) {
2586                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2587                 if ((bytes == 4) && (gpa % 4 == 0)) {
2588                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2589                         if (r)
2590                                 return;
2591                         memcpy((void *)&gpte + (gpa % 8), new, 4);
2592                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
2593                         memcpy((void *)&gpte, new, 8);
2594                 }
2595         } else {
2596                 if ((bytes == 4) && (gpa % 4 == 0))
2597                         memcpy((void *)&gpte, new, 4);
2598         }
2599         if (!is_present_gpte(gpte))
2600                 return;
2601         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2602
2603         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2604         smp_rmb();
2605         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2606
2607         if (is_error_pfn(pfn)) {
2608                 kvm_release_pfn_clean(pfn);
2609                 return;
2610         }
2611         vcpu->arch.update_pte.gfn = gfn;
2612         vcpu->arch.update_pte.pfn = pfn;
2613 }
2614
2615 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2616 {
2617         u64 *spte = vcpu->arch.last_pte_updated;
2618
2619         if (spte
2620             && vcpu->arch.last_pte_gfn == gfn
2621             && shadow_accessed_mask
2622             && !(*spte & shadow_accessed_mask)
2623             && is_shadow_present_pte(*spte))
2624                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2625 }
2626
2627 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2628                        const u8 *new, int bytes,
2629                        bool guest_initiated)
2630 {
2631         gfn_t gfn = gpa >> PAGE_SHIFT;
2632         struct kvm_mmu_page *sp;
2633         struct hlist_node *node, *n;
2634         struct hlist_head *bucket;
2635         unsigned index;
2636         u64 entry, gentry;
2637         u64 *spte;
2638         unsigned offset = offset_in_page(gpa);
2639         unsigned pte_size;
2640         unsigned page_offset;
2641         unsigned misaligned;
2642         unsigned quadrant;
2643         int level;
2644         int flooded = 0;
2645         int npte;
2646         int r;
2647
2648         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2649         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2650         spin_lock(&vcpu->kvm->mmu_lock);
2651         kvm_mmu_access_page(vcpu, gfn);
2652         kvm_mmu_free_some_pages(vcpu);
2653         ++vcpu->kvm->stat.mmu_pte_write;
2654         kvm_mmu_audit(vcpu, "pre pte write");
2655         if (guest_initiated) {
2656                 if (gfn == vcpu->arch.last_pt_write_gfn
2657                     && !last_updated_pte_accessed(vcpu)) {
2658                         ++vcpu->arch.last_pt_write_count;
2659                         if (vcpu->arch.last_pt_write_count >= 3)
2660                                 flooded = 1;
2661                 } else {
2662                         vcpu->arch.last_pt_write_gfn = gfn;
2663                         vcpu->arch.last_pt_write_count = 1;
2664                         vcpu->arch.last_pte_updated = NULL;
2665                 }
2666         }
2667         index = kvm_page_table_hashfn(gfn);
2668         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2669         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2670                 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2671                         continue;
2672                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2673                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2674                 misaligned |= bytes < 4;
2675                 if (misaligned || flooded) {
2676                         /*
2677                          * Misaligned accesses are too much trouble to fix
2678                          * up; also, they usually indicate a page is not used
2679                          * as a page table.
2680                          *
2681                          * If we're seeing too many writes to a page,
2682                          * it may no longer be a page table, or we may be
2683                          * forking, in which case it is better to unmap the
2684                          * page.
2685                          */
2686                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2687                                  gpa, bytes, sp->role.word);
2688                         if (kvm_mmu_zap_page(vcpu->kvm, sp))
2689                                 n = bucket->first;
2690                         ++vcpu->kvm->stat.mmu_flooded;
2691                         continue;
2692                 }
2693                 page_offset = offset;
2694                 level = sp->role.level;
2695                 npte = 1;
2696                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
2697                         page_offset <<= 1;      /* 32->64 */
2698                         /*
2699                          * A 32-bit pde maps 4MB while the shadow pdes map
2700                          * only 2MB.  So we need to double the offset again
2701                          * and zap two pdes instead of one.
2702                          */
2703                         if (level == PT32_ROOT_LEVEL) {
2704                                 page_offset &= ~7; /* kill rounding error */
2705                                 page_offset <<= 1;
2706                                 npte = 2;
2707                         }
2708                         quadrant = page_offset >> PAGE_SHIFT;
2709                         page_offset &= ~PAGE_MASK;
2710                         if (quadrant != sp->role.quadrant)
2711                                 continue;
2712                 }
2713                 spte = &sp->spt[page_offset / sizeof(*spte)];
2714                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2715                         gentry = 0;
2716                         r = kvm_read_guest_atomic(vcpu->kvm,
2717                                                   gpa & ~(u64)(pte_size - 1),
2718                                                   &gentry, pte_size);
2719                         new = (const void *)&gentry;
2720                         if (r < 0)
2721                                 new = NULL;
2722                 }
2723                 while (npte--) {
2724                         entry = *spte;
2725                         mmu_pte_write_zap_pte(vcpu, sp, spte);
2726                         if (new)
2727                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
2728                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2729                         ++spte;
2730                 }
2731         }
2732         kvm_mmu_audit(vcpu, "post pte write");
2733         spin_unlock(&vcpu->kvm->mmu_lock);
2734         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2735                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2736                 vcpu->arch.update_pte.pfn = bad_pfn;
2737         }
2738 }
2739
2740 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2741 {
2742         gpa_t gpa;
2743         int r;
2744
2745         if (tdp_enabled)
2746                 return 0;
2747
2748         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2749
2750         spin_lock(&vcpu->kvm->mmu_lock);
2751         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2752         spin_unlock(&vcpu->kvm->mmu_lock);
2753         return r;
2754 }
2755 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2756
2757 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2758 {
2759         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES &&
2760                !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2761                 struct kvm_mmu_page *sp;
2762
2763                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2764                                   struct kvm_mmu_page, link);
2765                 kvm_mmu_zap_page(vcpu->kvm, sp);
2766                 ++vcpu->kvm->stat.mmu_recycled;
2767         }
2768 }
2769
2770 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2771 {
2772         int r;
2773         enum emulation_result er;
2774
2775         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2776         if (r < 0)
2777                 goto out;
2778
2779         if (!r) {
2780                 r = 1;
2781                 goto out;
2782         }
2783
2784         r = mmu_topup_memory_caches(vcpu);
2785         if (r)
2786                 goto out;
2787
2788         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2789
2790         switch (er) {
2791         case EMULATE_DONE:
2792                 return 1;
2793         case EMULATE_DO_MMIO:
2794                 ++vcpu->stat.mmio_exits;
2795                 return 0;
2796         case EMULATE_FAIL:
2797                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2798                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2799                 return 0;
2800         default:
2801                 BUG();
2802         }
2803 out:
2804         return r;
2805 }
2806 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2807
2808 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2809 {
2810         vcpu->arch.mmu.invlpg(vcpu, gva);
2811         kvm_mmu_flush_tlb(vcpu);
2812         ++vcpu->stat.invlpg;
2813 }
2814 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2815
2816 void kvm_enable_tdp(void)
2817 {
2818         tdp_enabled = true;
2819 }
2820 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2821
2822 void kvm_disable_tdp(void)
2823 {
2824         tdp_enabled = false;
2825 }
2826 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2827
2828 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2829 {
2830         free_page((unsigned long)vcpu->arch.mmu.pae_root);
2831 }
2832
2833 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2834 {
2835         struct page *page;
2836         int i;
2837
2838         ASSERT(vcpu);
2839
2840         /*
2841          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2842          * Therefore we need to allocate shadow page tables in the first
2843          * 4GB of memory, which happens to fit the DMA32 zone.
2844          */
2845         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2846         if (!page)
2847                 goto error_1;
2848         vcpu->arch.mmu.pae_root = page_address(page);
2849         for (i = 0; i < 4; ++i)
2850                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2851
2852         return 0;
2853
2854 error_1:
2855         free_mmu_pages(vcpu);
2856         return -ENOMEM;
2857 }
2858
2859 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2860 {
2861         ASSERT(vcpu);
2862         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2863
2864         return alloc_mmu_pages(vcpu);
2865 }
2866
2867 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2868 {
2869         ASSERT(vcpu);
2870         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2871
2872         return init_kvm_mmu(vcpu);
2873 }
2874
2875 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2876 {
2877         ASSERT(vcpu);
2878
2879         destroy_kvm_mmu(vcpu);
2880         free_mmu_pages(vcpu);
2881         mmu_free_memory_caches(vcpu);
2882 }
2883
2884 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2885 {
2886         struct kvm_mmu_page *sp;
2887
2888         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2889                 int i;
2890                 u64 *pt;
2891
2892                 if (!test_bit(slot, sp->slot_bitmap))
2893                         continue;
2894
2895                 pt = sp->spt;
2896                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2897                         /* avoid RMW */
2898                         if (pt[i] & PT_WRITABLE_MASK)
2899                                 pt[i] &= ~PT_WRITABLE_MASK;
2900         }
2901         kvm_flush_remote_tlbs(kvm);
2902 }
2903
2904 void kvm_mmu_zap_all(struct kvm *kvm)
2905 {
2906         struct kvm_mmu_page *sp, *node;
2907
2908         spin_lock(&kvm->mmu_lock);
2909         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2910                 if (kvm_mmu_zap_page(kvm, sp))
2911                         node = container_of(kvm->arch.active_mmu_pages.next,
2912                                             struct kvm_mmu_page, link);
2913         spin_unlock(&kvm->mmu_lock);
2914
2915         kvm_flush_remote_tlbs(kvm);
2916 }
2917
2918 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2919 {
2920         struct kvm_mmu_page *page;
2921
2922         page = container_of(kvm->arch.active_mmu_pages.prev,
2923                             struct kvm_mmu_page, link);
2924         kvm_mmu_zap_page(kvm, page);
2925 }
2926
2927 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2928 {
2929         struct kvm *kvm;
2930         struct kvm *kvm_freed = NULL;
2931         int cache_count = 0;
2932
2933         spin_lock(&kvm_lock);
2934
2935         list_for_each_entry(kvm, &vm_list, vm_list) {
2936                 int npages;
2937
2938                 if (!down_read_trylock(&kvm->slots_lock))
2939                         continue;
2940                 spin_lock(&kvm->mmu_lock);
2941                 npages = kvm->arch.n_alloc_mmu_pages -
2942                          kvm->arch.n_free_mmu_pages;
2943                 cache_count += npages;
2944                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2945                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
2946                         cache_count--;
2947                         kvm_freed = kvm;
2948                 }
2949                 nr_to_scan--;
2950
2951                 spin_unlock(&kvm->mmu_lock);
2952                 up_read(&kvm->slots_lock);
2953         }
2954         if (kvm_freed)
2955                 list_move_tail(&kvm_freed->vm_list, &vm_list);
2956
2957         spin_unlock(&kvm_lock);
2958
2959         return cache_count;
2960 }
2961
2962 static struct shrinker mmu_shrinker = {
2963         .shrink = mmu_shrink,
2964         .seeks = DEFAULT_SEEKS * 10,
2965 };
2966
2967 static void mmu_destroy_caches(void)
2968 {
2969         if (pte_chain_cache)
2970                 kmem_cache_destroy(pte_chain_cache);
2971         if (rmap_desc_cache)
2972                 kmem_cache_destroy(rmap_desc_cache);
2973         if (mmu_page_header_cache)
2974                 kmem_cache_destroy(mmu_page_header_cache);
2975 }
2976
2977 void kvm_mmu_module_exit(void)
2978 {
2979         mmu_destroy_caches();
2980         unregister_shrinker(&mmu_shrinker);
2981 }
2982
2983 int kvm_mmu_module_init(void)
2984 {
2985         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2986                                             sizeof(struct kvm_pte_chain),
2987                                             0, 0, NULL);
2988         if (!pte_chain_cache)
2989                 goto nomem;
2990         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2991                                             sizeof(struct kvm_rmap_desc),
2992                                             0, 0, NULL);
2993         if (!rmap_desc_cache)
2994                 goto nomem;
2995
2996         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2997                                                   sizeof(struct kvm_mmu_page),
2998                                                   0, 0, NULL);
2999         if (!mmu_page_header_cache)
3000                 goto nomem;
3001
3002         register_shrinker(&mmu_shrinker);
3003
3004         return 0;
3005
3006 nomem:
3007         mmu_destroy_caches();
3008         return -ENOMEM;
3009 }
3010
3011 /*
3012  * Caculate mmu pages needed for kvm.
3013  */
3014 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3015 {
3016         int i;
3017         unsigned int nr_mmu_pages;
3018         unsigned int  nr_pages = 0;
3019
3020         for (i = 0; i < kvm->nmemslots; i++)
3021                 nr_pages += kvm->memslots[i].npages;
3022
3023         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3024         nr_mmu_pages = max(nr_mmu_pages,
3025                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
3026
3027         return nr_mmu_pages;
3028 }
3029
3030 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3031                                 unsigned len)
3032 {
3033         if (len > buffer->len)
3034                 return NULL;
3035         return buffer->ptr;
3036 }
3037
3038 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3039                                 unsigned len)
3040 {
3041         void *ret;
3042
3043         ret = pv_mmu_peek_buffer(buffer, len);
3044         if (!ret)
3045                 return ret;
3046         buffer->ptr += len;
3047         buffer->len -= len;
3048         buffer->processed += len;
3049         return ret;
3050 }
3051
3052 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
3053                              gpa_t addr, gpa_t value)
3054 {
3055         int bytes = 8;
3056         int r;
3057
3058         if (!is_long_mode(vcpu) && !is_pae(vcpu))
3059                 bytes = 4;
3060
3061         r = mmu_topup_memory_caches(vcpu);
3062         if (r)
3063                 return r;
3064
3065         if (!emulator_write_phys(vcpu, addr, &value, bytes))
3066                 return -EFAULT;
3067
3068         return 1;
3069 }
3070
3071 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3072 {
3073         kvm_set_cr3(vcpu, vcpu->arch.cr3);
3074         return 1;
3075 }
3076
3077 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
3078 {
3079         spin_lock(&vcpu->kvm->mmu_lock);
3080         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
3081         spin_unlock(&vcpu->kvm->mmu_lock);
3082         return 1;
3083 }
3084
3085 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
3086                              struct kvm_pv_mmu_op_buffer *buffer)
3087 {
3088         struct kvm_mmu_op_header *header;
3089
3090         header = pv_mmu_peek_buffer(buffer, sizeof *header);
3091         if (!header)
3092                 return 0;
3093         switch (header->op) {
3094         case KVM_MMU_OP_WRITE_PTE: {
3095                 struct kvm_mmu_op_write_pte *wpte;
3096
3097                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
3098                 if (!wpte)
3099                         return 0;
3100                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
3101                                         wpte->pte_val);
3102         }
3103         case KVM_MMU_OP_FLUSH_TLB: {
3104                 struct kvm_mmu_op_flush_tlb *ftlb;
3105
3106                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
3107                 if (!ftlb)
3108                         return 0;
3109                 return kvm_pv_mmu_flush_tlb(vcpu);
3110         }
3111         case KVM_MMU_OP_RELEASE_PT: {
3112                 struct kvm_mmu_op_release_pt *rpt;
3113
3114                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3115                 if (!rpt)
3116                         return 0;
3117                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3118         }
3119         default: return 0;
3120         }
3121 }
3122
3123 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3124                   gpa_t addr, unsigned long *ret)
3125 {
3126         int r;
3127         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3128
3129         buffer->ptr = buffer->buf;
3130         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3131         buffer->processed = 0;
3132
3133         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3134         if (r)
3135                 goto out;
3136
3137         while (buffer->len) {
3138                 r = kvm_pv_mmu_op_one(vcpu, buffer);
3139                 if (r < 0)
3140                         goto out;
3141                 if (r == 0)
3142                         break;
3143         }
3144
3145         r = 1;
3146 out:
3147         *ret = buffer->processed;
3148         return r;
3149 }
3150
3151 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3152 {
3153         struct kvm_shadow_walk_iterator iterator;
3154         int nr_sptes = 0;
3155
3156         spin_lock(&vcpu->kvm->mmu_lock);
3157         for_each_shadow_entry(vcpu, addr, iterator) {
3158                 sptes[iterator.level-1] = *iterator.sptep;
3159                 nr_sptes++;
3160                 if (!is_shadow_present_pte(*iterator.sptep))
3161                         break;
3162         }
3163         spin_unlock(&vcpu->kvm->mmu_lock);
3164
3165         return nr_sptes;
3166 }
3167 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3168
3169 #ifdef AUDIT
3170
3171 static const char *audit_msg;
3172
3173 static gva_t canonicalize(gva_t gva)
3174 {
3175 #ifdef CONFIG_X86_64
3176         gva = (long long)(gva << 16) >> 16;
3177 #endif
3178         return gva;
3179 }
3180
3181
3182 typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
3183                                  u64 *sptep);
3184
3185 static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
3186                             inspect_spte_fn fn)
3187 {
3188         int i;
3189
3190         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3191                 u64 ent = sp->spt[i];
3192
3193                 if (is_shadow_present_pte(ent)) {
3194                         if (!is_last_spte(ent, sp->role.level)) {
3195                                 struct kvm_mmu_page *child;
3196                                 child = page_header(ent & PT64_BASE_ADDR_MASK);
3197                                 __mmu_spte_walk(kvm, child, fn);
3198                         } else
3199                                 fn(kvm, sp, &sp->spt[i]);
3200                 }
3201         }
3202 }
3203
3204 static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
3205 {
3206         int i;
3207         struct kvm_mmu_page *sp;
3208
3209         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3210                 return;
3211         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3212                 hpa_t root = vcpu->arch.mmu.root_hpa;
3213                 sp = page_header(root);
3214                 __mmu_spte_walk(vcpu->kvm, sp, fn);
3215                 return;
3216         }
3217         for (i = 0; i < 4; ++i) {
3218                 hpa_t root = vcpu->arch.mmu.pae_root[i];
3219
3220                 if (root && VALID_PAGE(root)) {
3221                         root &= PT64_BASE_ADDR_MASK;
3222                         sp = page_header(root);
3223                         __mmu_spte_walk(vcpu->kvm, sp, fn);
3224                 }
3225         }
3226         return;
3227 }
3228
3229 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3230                                 gva_t va, int level)
3231 {
3232         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3233         int i;
3234         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3235
3236         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3237                 u64 ent = pt[i];
3238
3239                 if (ent == shadow_trap_nonpresent_pte)
3240                         continue;
3241
3242                 va = canonicalize(va);
3243                 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
3244                         audit_mappings_page(vcpu, ent, va, level - 1);
3245                 else {
3246                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
3247                         gfn_t gfn = gpa >> PAGE_SHIFT;
3248                         pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3249                         hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3250
3251                         if (is_error_pfn(pfn)) {
3252                                 kvm_release_pfn_clean(pfn);
3253                                 continue;
3254                         }
3255
3256                         if (is_shadow_present_pte(ent)
3257                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
3258                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
3259                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3260                                        audit_msg, vcpu->arch.mmu.root_level,
3261                                        va, gpa, hpa, ent,
3262                                        is_shadow_present_pte(ent));
3263                         else if (ent == shadow_notrap_nonpresent_pte
3264                                  && !is_error_hpa(hpa))
3265                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
3266                                        " valid guest gva %lx\n", audit_msg, va);
3267                         kvm_release_pfn_clean(pfn);
3268
3269                 }
3270         }
3271 }
3272
3273 static void audit_mappings(struct kvm_vcpu *vcpu)
3274 {
3275         unsigned i;
3276
3277         if (vcpu->arch.mmu.root_level == 4)
3278                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3279         else
3280                 for (i = 0; i < 4; ++i)
3281                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3282                                 audit_mappings_page(vcpu,
3283                                                     vcpu->arch.mmu.pae_root[i],
3284                                                     i << 30,
3285                                                     2);
3286 }
3287
3288 static int count_rmaps(struct kvm_vcpu *vcpu)
3289 {
3290         int nmaps = 0;
3291         int i, j, k;
3292
3293         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3294                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
3295                 struct kvm_rmap_desc *d;
3296
3297                 for (j = 0; j < m->npages; ++j) {
3298                         unsigned long *rmapp = &m->rmap[j];
3299
3300                         if (!*rmapp)
3301                                 continue;
3302                         if (!(*rmapp & 1)) {
3303                                 ++nmaps;
3304                                 continue;
3305                         }
3306                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3307                         while (d) {
3308                                 for (k = 0; k < RMAP_EXT; ++k)
3309                                         if (d->sptes[k])
3310                                                 ++nmaps;
3311                                         else
3312                                                 break;
3313                                 d = d->more;
3314                         }
3315                 }
3316         }
3317         return nmaps;
3318 }
3319
3320 void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
3321 {
3322         unsigned long *rmapp;
3323         struct kvm_mmu_page *rev_sp;
3324         gfn_t gfn;
3325
3326         if (*sptep & PT_WRITABLE_MASK) {
3327                 rev_sp = page_header(__pa(sptep));
3328                 gfn = rev_sp->gfns[sptep - rev_sp->spt];
3329
3330                 if (!gfn_to_memslot(kvm, gfn)) {
3331                         if (!printk_ratelimit())
3332                                 return;
3333                         printk(KERN_ERR "%s: no memslot for gfn %ld\n",
3334                                          audit_msg, gfn);
3335                         printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
3336                                         audit_msg, sptep - rev_sp->spt,
3337                                         rev_sp->gfn);
3338                         dump_stack();
3339                         return;
3340                 }
3341
3342                 rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
3343                                     is_large_pte(*sptep));
3344                 if (!*rmapp) {
3345                         if (!printk_ratelimit())
3346                                 return;
3347                         printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
3348                                          audit_msg, *sptep);
3349                         dump_stack();
3350                 }
3351         }
3352
3353 }
3354
3355 void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
3356 {
3357         mmu_spte_walk(vcpu, inspect_spte_has_rmap);
3358 }
3359
3360 static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
3361 {
3362         struct kvm_mmu_page *sp;
3363         int i;
3364
3365         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3366                 u64 *pt = sp->spt;
3367
3368                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3369                         continue;
3370
3371                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3372                         u64 ent = pt[i];
3373
3374                         if (!(ent & PT_PRESENT_MASK))
3375                                 continue;
3376                         if (!(ent & PT_WRITABLE_MASK))
3377                                 continue;
3378                         inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
3379                 }
3380         }
3381         return;
3382 }
3383
3384 static void audit_rmap(struct kvm_vcpu *vcpu)
3385 {
3386         check_writable_mappings_rmap(vcpu);
3387         count_rmaps(vcpu);
3388 }
3389
3390 static void audit_write_protection(struct kvm_vcpu *vcpu)
3391 {
3392         struct kvm_mmu_page *sp;
3393         struct kvm_memory_slot *slot;
3394         unsigned long *rmapp;
3395         u64 *spte;
3396         gfn_t gfn;
3397
3398         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3399                 if (sp->role.direct)
3400                         continue;
3401                 if (sp->unsync)
3402                         continue;
3403
3404                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3405                 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3406                 rmapp = &slot->rmap[gfn - slot->base_gfn];
3407
3408                 spte = rmap_next(vcpu->kvm, rmapp, NULL);
3409                 while (spte) {
3410                         if (*spte & PT_WRITABLE_MASK)
3411                                 printk(KERN_ERR "%s: (%s) shadow page has "
3412                                 "writable mappings: gfn %lx role %x\n",
3413                                __func__, audit_msg, sp->gfn,
3414                                sp->role.word);
3415                         spte = rmap_next(vcpu->kvm, rmapp, spte);
3416                 }
3417         }
3418 }
3419
3420 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3421 {
3422         int olddbg = dbg;
3423
3424         dbg = 0;
3425         audit_msg = msg;
3426         audit_rmap(vcpu);
3427         audit_write_protection(vcpu);
3428         if (strcmp("pre pte write", audit_msg) != 0)
3429                 audit_mappings(vcpu);
3430         audit_writable_sptes_have_rmaps(vcpu);
3431         dbg = olddbg;
3432 }
3433
3434 #endif