KVM: MMU audit: update audit_write_protection
[safe/jmp/linux-2.6] / arch / x86 / kvm / mmu.c
index 32cf11e..4c2585c 100644 (file)
@@ -18,6 +18,7 @@
  */
 
 #include "mmu.h"
+#include "kvm_cache_regs.h"
 
 #include <linux/kvm_host.h>
 #include <linux/types.h>
@@ -126,6 +127,7 @@ module_param(oos_shadow, bool, 0644);
 #define PFERR_PRESENT_MASK (1U << 0)
 #define PFERR_WRITE_MASK (1U << 1)
 #define PFERR_USER_MASK (1U << 2)
+#define PFERR_RSVD_MASK (1U << 3)
 #define PFERR_FETCH_MASK (1U << 4)
 
 #define PT_DIRECTORY_LEVEL 2
@@ -141,7 +143,7 @@ module_param(oos_shadow, bool, 0644);
 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
 
 struct kvm_rmap_desc {
-       u64 *shadow_ptes[RMAP_EXT];
+       u64 *sptes[RMAP_EXT];
        struct kvm_rmap_desc *more;
 };
 
@@ -177,7 +179,11 @@ static u64 __read_mostly shadow_x_mask;    /* mutual exclusive with nx_mask */
 static u64 __read_mostly shadow_user_mask;
 static u64 __read_mostly shadow_accessed_mask;
 static u64 __read_mostly shadow_dirty_mask;
-static u64 __read_mostly shadow_mt_mask;
+
+static inline u64 rsvd_bits(int s, int e)
+{
+       return ((1ULL << (e - s + 1)) - 1) << s;
+}
 
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
 {
@@ -193,14 +199,13 @@ void kvm_mmu_set_base_ptes(u64 base_pte)
 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
 
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
-               u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask)
+               u64 dirty_mask, u64 nx_mask, u64 x_mask)
 {
        shadow_user_mask = user_mask;
        shadow_accessed_mask = accessed_mask;
        shadow_dirty_mask = dirty_mask;
        shadow_nx_mask = nx_mask;
        shadow_x_mask = x_mask;
-       shadow_mt_mask = mt_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
@@ -219,11 +224,6 @@ static int is_nx(struct kvm_vcpu *vcpu)
        return vcpu->arch.shadow_efer & EFER_NX;
 }
 
-static int is_present_pte(unsigned long pte)
-{
-       return pte & PT_PRESENT_MASK;
-}
-
 static int is_shadow_present_pte(u64 pte)
 {
        return pte != shadow_trap_nonpresent_pte
@@ -240,16 +240,25 @@ static int is_writeble_pte(unsigned long pte)
        return pte & PT_WRITABLE_MASK;
 }
 
-static int is_dirty_pte(unsigned long pte)
+static int is_dirty_gpte(unsigned long pte)
 {
-       return pte & shadow_dirty_mask;
+       return pte & PT_DIRTY_MASK;
 }
 
-static int is_rmap_pte(u64 pte)
+static int is_rmap_spte(u64 pte)
 {
        return is_shadow_present_pte(pte);
 }
 
+static int is_last_spte(u64 pte, int level)
+{
+       if (level == PT_PAGE_TABLE_LEVEL)
+               return 1;
+       if (level == PT_DIRECTORY_LEVEL && is_large_pte(pte))
+               return 1;
+       return 0;
+}
+
 static pfn_t spte_to_pfn(u64 pte)
 {
        return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -262,7 +271,7 @@ static gfn_t pse36_gfn_delta(u32 gpte)
        return (gpte & PT32_DIR_PSE36_MASK) << shift;
 }
 
-static void set_shadow_pte(u64 *sptep, u64 spte)
+static void __set_spte(u64 *sptep, u64 spte)
 {
 #ifdef CONFIG_X86_64
        set_64bit((unsigned long *)sptep, spte);
@@ -490,16 +499,20 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
  *
  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
  * containing more mappings.
+ *
+ * Returns the number of rmap entries before the spte was added or zero if
+ * the spte was not added.
+ *
  */
-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
+static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
 {
        struct kvm_mmu_page *sp;
        struct kvm_rmap_desc *desc;
        unsigned long *rmapp;
-       int i;
+       int i, count = 0;
 
-       if (!is_rmap_pte(*spte))
-               return;
+       if (!is_rmap_spte(*spte))
+               return count;
        gfn = unalias_gfn(vcpu->kvm, gfn);
        sp = page_header(__pa(spte));
        sp->gfns[spte - sp->spt] = gfn;
@@ -510,22 +523,25 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
        } else if (!(*rmapp & 1)) {
                rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
                desc = mmu_alloc_rmap_desc(vcpu);
-               desc->shadow_ptes[0] = (u64 *)*rmapp;
-               desc->shadow_ptes[1] = spte;
+               desc->sptes[0] = (u64 *)*rmapp;
+               desc->sptes[1] = spte;
                *rmapp = (unsigned long)desc | 1;
        } else {
                rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
                desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
-               while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
+               while (desc->sptes[RMAP_EXT-1] && desc->more) {
                        desc = desc->more;
-               if (desc->shadow_ptes[RMAP_EXT-1]) {
+                       count += RMAP_EXT;
+               }
+               if (desc->sptes[RMAP_EXT-1]) {
                        desc->more = mmu_alloc_rmap_desc(vcpu);
                        desc = desc->more;
                }
-               for (i = 0; desc->shadow_ptes[i]; ++i)
+               for (i = 0; desc->sptes[i]; ++i)
                        ;
-               desc->shadow_ptes[i] = spte;
+               desc->sptes[i] = spte;
        }
+       return count;
 }
 
 static void rmap_desc_remove_entry(unsigned long *rmapp,
@@ -535,14 +551,14 @@ static void rmap_desc_remove_entry(unsigned long *rmapp,
 {
        int j;
 
-       for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
+       for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
                ;
-       desc->shadow_ptes[i] = desc->shadow_ptes[j];
-       desc->shadow_ptes[j] = NULL;
+       desc->sptes[i] = desc->sptes[j];
+       desc->sptes[j] = NULL;
        if (j != 0)
                return;
        if (!prev_desc && !desc->more)
-               *rmapp = (unsigned long)desc->shadow_ptes[0];
+               *rmapp = (unsigned long)desc->sptes[0];
        else
                if (prev_desc)
                        prev_desc->more = desc->more;
@@ -560,7 +576,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        unsigned long *rmapp;
        int i;
 
-       if (!is_rmap_pte(*spte))
+       if (!is_rmap_spte(*spte))
                return;
        sp = page_header(__pa(spte));
        pfn = spte_to_pfn(*spte);
@@ -587,8 +603,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
                desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
                prev_desc = NULL;
                while (desc) {
-                       for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
-                               if (desc->shadow_ptes[i] == spte) {
+                       for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
+                               if (desc->sptes[i] == spte) {
                                        rmap_desc_remove_entry(rmapp,
                                                               desc, i,
                                                               prev_desc);
@@ -619,10 +635,10 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
        prev_desc = NULL;
        prev_spte = NULL;
        while (desc) {
-               for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
+               for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
                        if (prev_spte == spte)
-                               return desc->shadow_ptes[i];
-                       prev_spte = desc->shadow_ptes[i];
+                               return desc->sptes[i];
+                       prev_spte = desc->sptes[i];
                }
                desc = desc->more;
        }
@@ -644,7 +660,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
                if (is_writeble_pte(*spte)) {
-                       set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
+                       __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
                        write_protected = 1;
                }
                spte = rmap_next(kvm, rmapp, spte);
@@ -668,7 +684,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
                if (is_writeble_pte(*spte)) {
                        rmap_remove(kvm, spte);
                        --kvm->stat.lpages;
-                       set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+                       __set_spte(spte, shadow_trap_nonpresent_pte);
                        spte = NULL;
                        write_protected = 1;
                }
@@ -687,7 +703,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
                rmap_remove(kvm, spte);
-               set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+               __set_spte(spte, shadow_trap_nonpresent_pte);
                need_tlb_flush = 1;
        }
        return need_tlb_flush;
@@ -755,6 +771,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
        return young;
 }
 
+#define RMAP_RECYCLE_THRESHOLD 1000
+
+static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage)
+{
+       unsigned long *rmapp;
+
+       gfn = unalias_gfn(vcpu->kvm, gfn);
+       rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
+
+       kvm_unmap_rmapp(vcpu->kvm, rmapp);
+       kvm_flush_remote_tlbs(vcpu->kvm);
+}
+
 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
 {
        return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
@@ -1074,18 +1103,10 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
        return NULL;
 }
 
-static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
-       list_del(&sp->oos_link);
-       --kvm->stat.mmu_unsync_global;
-}
-
 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        WARN_ON(!sp->unsync);
        sp->unsync = 0;
-       if (sp->global)
-               kvm_unlink_unsync_global(kvm, sp);
        --kvm->stat.mmu_unsync;
 }
 
@@ -1248,7 +1269,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
        sp->gfn = gfn;
        sp->role = role;
-       sp->global = 0;
        hlist_add_head(&sp->hash_link, bucket);
        if (!direct) {
                if (rmap_write_protect(vcpu->kvm, gfn))
@@ -1302,25 +1322,17 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
 
        pt = sp->spt;
 
-       if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
-               for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
-                       if (is_shadow_present_pte(pt[i]))
-                               rmap_remove(kvm, &pt[i]);
-                       pt[i] = shadow_trap_nonpresent_pte;
-               }
-               return;
-       }
-
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
                ent = pt[i];
 
                if (is_shadow_present_pte(ent)) {
-                       if (!is_large_pte(ent)) {
+                       if (!is_last_spte(ent, sp->role.level)) {
                                ent &= PT64_BASE_ADDR_MASK;
                                mmu_page_remove_parent_pte(page_header(ent),
                                                           &pt[i]);
                        } else {
-                               --kvm->stat.lpages;
+                               if (is_large_pte(ent))
+                                       --kvm->stat.lpages;
                                rmap_remove(kvm, &pt[i]);
                        }
                }
@@ -1336,10 +1348,10 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
 {
        int i;
+       struct kvm_vcpu *vcpu;
 
-       for (i = 0; i < KVM_MAX_VCPUS; ++i)
-               if (kvm->vcpus[i])
-                       kvm->vcpus[i]->arch.last_pte_updated = NULL;
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               vcpu->arch.last_pte_updated = NULL;
 }
 
 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -1358,7 +1370,7 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
                }
                BUG_ON(!parent_pte);
                kvm_mmu_put_page(sp, parent_pte);
-               set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
+               __set_spte(parent_pte, shadow_trap_nonpresent_pte);
        }
 }
 
@@ -1417,24 +1429,25 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  */
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
 {
+       int used_pages;
+
+       used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
+       used_pages = max(0, used_pages);
+
        /*
         * If we set the number of mmu pages to be smaller be than the
         * number of actived pages , we must to free some mmu pages before we
         * change the value
         */
 
-       if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
-           kvm_nr_mmu_pages) {
-               int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
-                                      - kvm->arch.n_free_mmu_pages;
-
-               while (n_used_mmu_pages > kvm_nr_mmu_pages) {
+       if (used_pages > kvm_nr_mmu_pages) {
+               while (used_pages > kvm_nr_mmu_pages) {
                        struct kvm_mmu_page *page;
 
                        page = container_of(kvm->arch.active_mmu_pages.prev,
                                            struct kvm_mmu_page, link);
                        kvm_mmu_zap_page(kvm, page);
-                       n_used_mmu_pages--;
+                       used_pages--;
                }
                kvm->arch.n_free_mmu_pages = 0;
        }
@@ -1505,7 +1518,7 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp)
 
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
                if (pt[i] == shadow_notrap_nonpresent_pte)
-                       set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte);
+                       __set_spte(&pt[i], shadow_trap_nonpresent_pte);
        }
 }
 
@@ -1616,7 +1629,7 @@ static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
        return mtrr_state->def_type;
 }
 
-static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
+u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        u8 mtrr;
 
@@ -1626,6 +1639,7 @@ static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
                mtrr = MTRR_TYPE_WRBACK;
        return mtrr;
 }
+EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
 
 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
@@ -1646,11 +1660,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        ++vcpu->kvm->stat.mmu_unsync;
        sp->unsync = 1;
 
-       if (sp->global) {
-               list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
-               ++vcpu->kvm->stat.mmu_unsync_global;
-       } else
-               kvm_mmu_mark_parents_unsync(vcpu, sp);
+       kvm_mmu_mark_parents_unsync(vcpu, sp);
 
        mmu_convert_notrap(sp);
        return 0;
@@ -1674,24 +1684,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
        return 0;
 }
 
-static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
+static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                    unsigned pte_access, int user_fault,
                    int write_fault, int dirty, int largepage,
-                   int global, gfn_t gfn, pfn_t pfn, bool speculative,
+                   gfn_t gfn, pfn_t pfn, bool speculative,
                    bool can_unsync)
 {
        u64 spte;
        int ret = 0;
-       u64 mt_mask = shadow_mt_mask;
-       struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
-
-       if (!global && sp->global) {
-               sp->global = 0;
-               if (sp->unsync) {
-                       kvm_unlink_unsync_global(vcpu->kvm, sp);
-                       kvm_mmu_mark_parents_unsync(vcpu, sp);
-               }
-       }
 
        /*
         * We don't set the accessed bit, since we sometimes want to see
@@ -1711,16 +1711,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                spte |= shadow_user_mask;
        if (largepage)
                spte |= PT_PAGE_SIZE_MASK;
-       if (mt_mask) {
-               if (!kvm_is_mmio_pfn(pfn)) {
-                       mt_mask = get_memory_type(vcpu, gfn) <<
-                               kvm_x86_ops->get_mt_mask_shift();
-                       mt_mask |= VMX_EPT_IGMT_BIT;
-               } else
-                       mt_mask = MTRR_TYPE_UNCACHABLE <<
-                               kvm_x86_ops->get_mt_mask_shift();
-               spte |= mt_mask;
-       }
+       if (tdp_enabled)
+               spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
+                       kvm_is_mmio_pfn(pfn));
 
        spte |= (u64)pfn << PAGE_SHIFT;
 
@@ -1741,7 +1734,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                 * is responsibility of mmu_get_page / kvm_sync_page.
                 * Same reasoning can be applied to dirty page accounting.
                 */
-               if (!can_unsync && is_writeble_pte(*shadow_pte))
+               if (!can_unsync && is_writeble_pte(*sptep))
                        goto set_pte;
 
                if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
@@ -1758,62 +1751,65 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                mark_page_dirty(vcpu->kvm, gfn);
 
 set_pte:
-       set_shadow_pte(shadow_pte, spte);
+       __set_spte(sptep, spte);
        return ret;
 }
 
-static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
+static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
-                        int *ptwrite, int largepage, int global,
-                        gfn_t gfn, pfn_t pfn, bool speculative)
+                        int *ptwrite, int largepage, gfn_t gfn,
+                        pfn_t pfn, bool speculative)
 {
        int was_rmapped = 0;
-       int was_writeble = is_writeble_pte(*shadow_pte);
+       int was_writeble = is_writeble_pte(*sptep);
+       int rmap_count;
 
        pgprintk("%s: spte %llx access %x write_fault %d"
                 " user_fault %d gfn %lx\n",
-                __func__, *shadow_pte, pt_access,
+                __func__, *sptep, pt_access,
                 write_fault, user_fault, gfn);
 
-       if (is_rmap_pte(*shadow_pte)) {
+       if (is_rmap_spte(*sptep)) {
                /*
                 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
                 * the parent of the now unreachable PTE.
                 */
-               if (largepage && !is_large_pte(*shadow_pte)) {
+               if (largepage && !is_large_pte(*sptep)) {
                        struct kvm_mmu_page *child;
-                       u64 pte = *shadow_pte;
+                       u64 pte = *sptep;
 
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
-                       mmu_page_remove_parent_pte(child, shadow_pte);
-               } else if (pfn != spte_to_pfn(*shadow_pte)) {
+                       mmu_page_remove_parent_pte(child, sptep);
+               } else if (pfn != spte_to_pfn(*sptep)) {
                        pgprintk("hfn old %lx new %lx\n",
-                                spte_to_pfn(*shadow_pte), pfn);
-                       rmap_remove(vcpu->kvm, shadow_pte);
+                                spte_to_pfn(*sptep), pfn);
+                       rmap_remove(vcpu->kvm, sptep);
                } else
                        was_rmapped = 1;
        }
-       if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
-                     dirty, largepage, global, gfn, pfn, speculative, true)) {
+       if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
+                     dirty, largepage, gfn, pfn, speculative, true)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_x86_ops->tlb_flush(vcpu);
        }
 
-       pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte);
+       pgprintk("%s: setting spte %llx\n", __func__, *sptep);
        pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
-                is_large_pte(*shadow_pte)? "2MB" : "4kB",
-                is_present_pte(*shadow_pte)?"RW":"R", gfn,
-                *shadow_pte, shadow_pte);
-       if (!was_rmapped && is_large_pte(*shadow_pte))
+                is_large_pte(*sptep)? "2MB" : "4kB",
+                is_present_pte(*sptep)?"RW":"R", gfn,
+                *shadow_pte, sptep);
+       if (!was_rmapped && is_large_pte(*sptep))
                ++vcpu->kvm->stat.lpages;
 
-       page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
+       page_header_update_slot(vcpu->kvm, sptep, gfn);
        if (!was_rmapped) {
-               rmap_add(vcpu, shadow_pte, gfn, largepage);
-               if (!is_rmap_pte(*shadow_pte))
+               rmap_count = rmap_add(vcpu, sptep, gfn, largepage);
+               if (!is_rmap_spte(*sptep))
                        kvm_release_pfn_clean(pfn);
+               if (rmap_count > RMAP_RECYCLE_THRESHOLD)
+                       rmap_recycle(vcpu, gfn, largepage);
        } else {
                if (was_writeble)
                        kvm_release_pfn_dirty(pfn);
@@ -1821,7 +1817,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                        kvm_release_pfn_clean(pfn);
        }
        if (speculative) {
-               vcpu->arch.last_pte_updated = shadow_pte;
+               vcpu->arch.last_pte_updated = sptep;
                vcpu->arch.last_pte_gfn = gfn;
        }
 }
@@ -1843,7 +1839,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                    || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
                        mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
                                     0, write, 1, &pt_write,
-                                    largepage, 0, gfn, pfn, false);
+                                    largepage, gfn, pfn, false);
                        ++vcpu->stat.pf_fixed;
                        break;
                }
@@ -1859,10 +1855,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                                return -ENOMEM;
                        }
 
-                       set_shadow_pte(iterator.sptep,
-                                      __pa(sp->spt)
-                                      | PT_PRESENT_MASK | PT_WRITABLE_MASK
-                                      | shadow_user_mask | shadow_x_mask);
+                       __set_spte(iterator.sptep,
+                                  __pa(sp->spt)
+                                  | PT_PRESENT_MASK | PT_WRITABLE_MASK
+                                  | shadow_user_mask | shadow_x_mask);
                }
        }
        return pt_write;
@@ -1942,12 +1938,25 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
        vcpu->arch.mmu.root_hpa = INVALID_PAGE;
 }
 
-static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
+static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
+{
+       int ret = 0;
+
+       if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
+               set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+               ret = 1;
+       }
+
+       return ret;
+}
+
+static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
 {
        int i;
        gfn_t root_gfn;
        struct kvm_mmu_page *sp;
        int direct = 0;
+       u64 pdptr;
 
        root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
 
@@ -1957,13 +1966,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                ASSERT(!VALID_PAGE(root));
                if (tdp_enabled)
                        direct = 1;
+               if (mmu_check_root(vcpu, root_gfn))
+                       return 1;
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                      PT64_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
                vcpu->arch.mmu.root_hpa = root;
-               return;
+               return 0;
        }
        direct = !is_paging(vcpu);
        if (tdp_enabled)
@@ -1973,13 +1984,16 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
 
                ASSERT(!VALID_PAGE(root));
                if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
-                       if (!is_present_pte(vcpu->arch.pdptrs[i])) {
+                       pdptr = kvm_pdptr_read(vcpu, i);
+                       if (!is_present_gpte(pdptr)) {
                                vcpu->arch.mmu.pae_root[i] = 0;
                                continue;
                        }
-                       root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
+                       root_gfn = pdptr >> PAGE_SHIFT;
                } else if (vcpu->arch.mmu.root_level == 0)
                        root_gfn = 0;
+               if (mmu_check_root(vcpu, root_gfn))
+                       return 1;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
@@ -1988,6 +2002,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
        vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
+       return 0;
 }
 
 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
@@ -2006,7 +2021,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];
 
-               if (root) {
+               if (root && VALID_PAGE(root)) {
                        root &= PT64_BASE_ADDR_MASK;
                        sp = page_header(root);
                        mmu_sync_children(vcpu, sp);
@@ -2014,15 +2029,6 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
        }
 }
 
-static void mmu_sync_global(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = vcpu->kvm;
-       struct kvm_mmu_page *sp, *n;
-
-       list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
-               kvm_sync_page(vcpu, sp);
-}
-
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 {
        spin_lock(&vcpu->kvm->mmu_lock);
@@ -2030,13 +2036,6 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
-void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
-{
-       spin_lock(&vcpu->kvm->mmu_lock);
-       mmu_sync_global(vcpu);
-       spin_unlock(&vcpu->kvm->mmu_lock);
-}
-
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
        return vaddr;
@@ -2151,6 +2150,14 @@ static void paging_free(struct kvm_vcpu *vcpu)
        nonpaging_free(vcpu);
 }
 
+static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
+{
+       int bit7;
+
+       bit7 = (gpte >> 7) & 1;
+       return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
+}
+
 #define PTTYPE 64
 #include "paging_tmpl.h"
 #undef PTTYPE
@@ -2159,6 +2166,59 @@ static void paging_free(struct kvm_vcpu *vcpu)
 #include "paging_tmpl.h"
 #undef PTTYPE
 
+static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
+{
+       struct kvm_mmu *context = &vcpu->arch.mmu;
+       int maxphyaddr = cpuid_maxphyaddr(vcpu);
+       u64 exb_bit_rsvd = 0;
+
+       if (!is_nx(vcpu))
+               exb_bit_rsvd = rsvd_bits(63, 63);
+       switch (level) {
+       case PT32_ROOT_LEVEL:
+               /* no rsvd bits for 2 level 4K page table entries */
+               context->rsvd_bits_mask[0][1] = 0;
+               context->rsvd_bits_mask[0][0] = 0;
+               if (is_cpuid_PSE36())
+                       /* 36bits PSE 4MB page */
+                       context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
+               else
+                       /* 32 bits PSE 4MB page */
+                       context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
+               context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
+               break;
+       case PT32E_ROOT_LEVEL:
+               context->rsvd_bits_mask[0][2] =
+                       rsvd_bits(maxphyaddr, 63) |
+                       rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
+               context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 62);      /* PDE */
+               context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 62);      /* PTE */
+               context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 62) |
+                       rsvd_bits(13, 20);              /* large page */
+               context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
+               break;
+       case PT64_ROOT_LEVEL:
+               context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
+               context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
+               context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 51);
+               context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 51);
+               context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
+               context->rsvd_bits_mask[1][2] = context->rsvd_bits_mask[0][2];
+               context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 51) |
+                       rsvd_bits(13, 20);              /* large page */
+               context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
+               break;
+       }
+}
+
 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
 {
        struct kvm_mmu *context = &vcpu->arch.mmu;
@@ -2179,6 +2239,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
 
 static int paging64_init_context(struct kvm_vcpu *vcpu)
 {
+       reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
        return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
 }
 
@@ -2186,6 +2247,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *context = &vcpu->arch.mmu;
 
+       reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging32_page_fault;
        context->gva_to_gpa = paging32_gva_to_gpa;
@@ -2201,6 +2263,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
 
 static int paging32E_init_context(struct kvm_vcpu *vcpu)
 {
+       reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
        return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
 }
 
@@ -2221,12 +2284,15 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
                context->gva_to_gpa = nonpaging_gva_to_gpa;
                context->root_level = 0;
        } else if (is_long_mode(vcpu)) {
+               reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
                context->gva_to_gpa = paging64_gva_to_gpa;
                context->root_level = PT64_ROOT_LEVEL;
        } else if (is_pae(vcpu)) {
+               reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
                context->gva_to_gpa = paging64_gva_to_gpa;
                context->root_level = PT32E_ROOT_LEVEL;
        } else {
+               reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
                context->gva_to_gpa = paging32_gva_to_gpa;
                context->root_level = PT32_ROOT_LEVEL;
        }
@@ -2290,9 +2356,11 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
                goto out;
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
-       mmu_alloc_roots(vcpu);
+       r = mmu_alloc_roots(vcpu);
        mmu_sync_roots(vcpu);
        spin_unlock(&vcpu->kvm->mmu_lock);
+       if (r)
+               goto out;
        kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
        kvm_mmu_flush_tlb(vcpu);
 out:
@@ -2314,15 +2382,14 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
 
        pte = *spte;
        if (is_shadow_present_pte(pte)) {
-               if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
-                   is_large_pte(pte))
+               if (is_last_spte(pte, sp->role.level))
                        rmap_remove(vcpu->kvm, spte);
                else {
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
                        mmu_page_remove_parent_pte(child, spte);
                }
        }
-       set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+       __set_spte(spte, shadow_trap_nonpresent_pte);
        if (is_large_pte(pte))
                --vcpu->kvm->stat.lpages;
 }
@@ -2408,7 +2475,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                if ((bytes == 4) && (gpa % 4 == 0))
                        memcpy((void *)&gpte, new, 4);
        }
-       if (!is_present_pte(gpte))
+       if (!is_present_gpte(gpte))
                return;
        gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
 
@@ -2606,8 +2673,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
                ++vcpu->stat.mmio_exits;
                return 0;
        case EMULATE_FAIL:
-               kvm_report_emulation_failure(vcpu, "pagetable");
-               return 1;
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+               return 0;
        default:
                BUG();
        }
@@ -2638,14 +2706,6 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
 
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu_page *sp;
-
-       while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
-               sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
-                                 struct kvm_mmu_page, link);
-               kvm_mmu_zap_page(vcpu->kvm, sp);
-               cond_resched();
-       }
        free_page((unsigned long)vcpu->arch.mmu.pae_root);
 }
 
@@ -2710,7 +2770,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
        struct kvm_mmu_page *sp;
 
-       spin_lock(&kvm->mmu_lock);
        list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
                int i;
                u64 *pt;
@@ -2725,7 +2784,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                                pt[i] &= ~PT_WRITABLE_MASK;
        }
        kvm_flush_remote_tlbs(kvm);
-       spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_mmu_zap_all(struct kvm *kvm)
@@ -2987,6 +3045,55 @@ static gva_t canonicalize(gva_t gva)
        return gva;
 }
 
+
+typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
+                                u64 *sptep);
+
+static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
+                           inspect_spte_fn fn)
+{
+       int i;
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+               u64 ent = sp->spt[i];
+
+               if (is_shadow_present_pte(ent)) {
+                       if (sp->role.level > 1 && !is_large_pte(ent)) {
+                               struct kvm_mmu_page *child;
+                               child = page_header(ent & PT64_BASE_ADDR_MASK);
+                               __mmu_spte_walk(kvm, child, fn);
+                       }
+                       if (sp->role.level == 1)
+                               fn(kvm, sp, &sp->spt[i]);
+               }
+       }
+}
+
+static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
+{
+       int i;
+       struct kvm_mmu_page *sp;
+
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+               return;
+       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+               hpa_t root = vcpu->arch.mmu.root_hpa;
+               sp = page_header(root);
+               __mmu_spte_walk(vcpu->kvm, sp, fn);
+               return;
+       }
+       for (i = 0; i < 4; ++i) {
+               hpa_t root = vcpu->arch.mmu.pae_root[i];
+
+               if (root && VALID_PAGE(root)) {
+                       root &= PT64_BASE_ADDR_MASK;
+                       sp = page_header(root);
+                       __mmu_spte_walk(vcpu->kvm, sp, fn);
+               }
+       }
+       return;
+}
+
 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                                gva_t va, int level)
 {
@@ -3007,11 +3114,13 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                                       " in nonleaf level: levels %d gva %lx"
                                       " level %d pte %llx\n", audit_msg,
                                       vcpu->arch.mmu.root_level, va, level, ent);
-
-                       audit_mappings_page(vcpu, ent, va, level - 1);
+                       else
+                               audit_mappings_page(vcpu, ent, va, level - 1);
                } else {
                        gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
-                       hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
+                       gfn_t gfn = gpa >> PAGE_SHIFT;
+                       pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
+                       hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
 
                        if (is_shadow_present_pte(ent)
                            && (ent & PT64_BASE_ADDR_MASK) != hpa)
@@ -3066,7 +3175,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
                        d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
                        while (d) {
                                for (k = 0; k < RMAP_EXT; ++k)
-                                       if (d->shadow_ptes[k])
+                                       if (d->sptes[k])
                                                ++nmaps;
                                        else
                                                break;
@@ -3077,9 +3186,47 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
        return nmaps;
 }
 
-static int count_writable_mappings(struct kvm_vcpu *vcpu)
+void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
+{
+       unsigned long *rmapp;
+       struct kvm_mmu_page *rev_sp;
+       gfn_t gfn;
+
+       if (*sptep & PT_WRITABLE_MASK) {
+               rev_sp = page_header(__pa(sptep));
+               gfn = rev_sp->gfns[sptep - rev_sp->spt];
+
+               if (!gfn_to_memslot(kvm, gfn)) {
+                       if (!printk_ratelimit())
+                               return;
+                       printk(KERN_ERR "%s: no memslot for gfn %ld\n",
+                                        audit_msg, gfn);
+                       printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
+                                       audit_msg, sptep - rev_sp->spt,
+                                       rev_sp->gfn);
+                       dump_stack();
+                       return;
+               }
+
+               rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt], 0);
+               if (!*rmapp) {
+                       if (!printk_ratelimit())
+                               return;
+                       printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
+                                        audit_msg, *sptep);
+                       dump_stack();
+               }
+       }
+
+}
+
+void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
+{
+       mmu_spte_walk(vcpu, inspect_spte_has_rmap);
+}
+
+static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
 {
-       int nmaps = 0;
        struct kvm_mmu_page *sp;
        int i;
 
@@ -3096,20 +3243,16 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
                                continue;
                        if (!(ent & PT_WRITABLE_MASK))
                                continue;
-                       ++nmaps;
+                       inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
                }
        }
-       return nmaps;
+       return;
 }
 
 static void audit_rmap(struct kvm_vcpu *vcpu)
 {
-       int n_rmap = count_rmaps(vcpu);
-       int n_actual = count_writable_mappings(vcpu);
-
-       if (n_rmap != n_actual)
-               printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
-                      __func__, audit_msg, n_rmap, n_actual);
+       check_writable_mappings_rmap(vcpu);
+       count_rmaps(vcpu);
 }
 
 static void audit_write_protection(struct kvm_vcpu *vcpu)
@@ -3117,20 +3260,28 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
        struct kvm_mmu_page *sp;
        struct kvm_memory_slot *slot;
        unsigned long *rmapp;
+       u64 *spte;
        gfn_t gfn;
 
        list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
                if (sp->role.direct)
                        continue;
+               if (sp->unsync)
+                       continue;
 
                gfn = unalias_gfn(vcpu->kvm, sp->gfn);
                slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
                rmapp = &slot->rmap[gfn - slot->base_gfn];
-               if (*rmapp)
-                       printk(KERN_ERR "%s: (%s) shadow page has writable"
-                              " mappings: gfn %lx role %x\n",
+
+               spte = rmap_next(vcpu->kvm, rmapp, NULL);
+               while (spte) {
+                       if (*spte & PT_WRITABLE_MASK)
+                               printk(KERN_ERR "%s: (%s) shadow page has "
+                               "writable mappings: gfn %lx role %x\n",
                               __func__, audit_msg, sp->gfn,
                               sp->role.word);
+                       spte = rmap_next(vcpu->kvm, rmapp, spte);
+               }
        }
 }
 
@@ -3143,6 +3294,7 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
        audit_rmap(vcpu);
        audit_write_protection(vcpu);
        audit_mappings(vcpu);
+       audit_writable_sptes_have_rmaps(vcpu);
        dbg = olddbg;
 }