KVM: MMU: Allow 4K ptes with bit 7 (PAT) set
[safe/jmp/linux-2.6] / arch / x86 / kvm / mmu.c
index da3ad3c..7030b5f 100644 (file)
@@ -178,7 +178,6 @@ static u64 __read_mostly shadow_x_mask;     /* mutual exclusive with nx_mask */
 static u64 __read_mostly shadow_user_mask;
 static u64 __read_mostly shadow_accessed_mask;
 static u64 __read_mostly shadow_dirty_mask;
-static u64 __read_mostly shadow_mt_mask;
 
 static inline u64 rsvd_bits(int s, int e)
 {
@@ -199,14 +198,13 @@ void kvm_mmu_set_base_ptes(u64 base_pte)
 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
 
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
-               u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask)
+               u64 dirty_mask, u64 nx_mask, u64 x_mask)
 {
        shadow_user_mask = user_mask;
        shadow_accessed_mask = accessed_mask;
        shadow_dirty_mask = dirty_mask;
        shadow_nx_mask = nx_mask;
        shadow_x_mask = x_mask;
-       shadow_mt_mask = mt_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
@@ -1075,18 +1073,10 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
        return NULL;
 }
 
-static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
-       list_del(&sp->oos_link);
-       --kvm->stat.mmu_unsync_global;
-}
-
 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        WARN_ON(!sp->unsync);
        sp->unsync = 0;
-       if (sp->global)
-               kvm_unlink_unsync_global(kvm, sp);
        --kvm->stat.mmu_unsync;
 }
 
@@ -1249,7 +1239,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
        sp->gfn = gfn;
        sp->role = role;
-       sp->global = 0;
        hlist_add_head(&sp->hash_link, bucket);
        if (!direct) {
                if (rmap_write_protect(vcpu->kvm, gfn))
@@ -1617,7 +1606,7 @@ static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
        return mtrr_state->def_type;
 }
 
-static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
+u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        u8 mtrr;
 
@@ -1627,6 +1616,7 @@ static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
                mtrr = MTRR_TYPE_WRBACK;
        return mtrr;
 }
+EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
 
 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
@@ -1647,11 +1637,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        ++vcpu->kvm->stat.mmu_unsync;
        sp->unsync = 1;
 
-       if (sp->global) {
-               list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
-               ++vcpu->kvm->stat.mmu_unsync_global;
-       } else
-               kvm_mmu_mark_parents_unsync(vcpu, sp);
+       kvm_mmu_mark_parents_unsync(vcpu, sp);
 
        mmu_convert_notrap(sp);
        return 0;
@@ -1678,21 +1664,11 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                    unsigned pte_access, int user_fault,
                    int write_fault, int dirty, int largepage,
-                   int global, gfn_t gfn, pfn_t pfn, bool speculative,
+                   gfn_t gfn, pfn_t pfn, bool speculative,
                    bool can_unsync)
 {
        u64 spte;
        int ret = 0;
-       u64 mt_mask = shadow_mt_mask;
-       struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
-
-       if (!global && sp->global) {
-               sp->global = 0;
-               if (sp->unsync) {
-                       kvm_unlink_unsync_global(vcpu->kvm, sp);
-                       kvm_mmu_mark_parents_unsync(vcpu, sp);
-               }
-       }
 
        /*
         * We don't set the accessed bit, since we sometimes want to see
@@ -1712,16 +1688,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                spte |= shadow_user_mask;
        if (largepage)
                spte |= PT_PAGE_SIZE_MASK;
-       if (mt_mask) {
-               if (!kvm_is_mmio_pfn(pfn)) {
-                       mt_mask = get_memory_type(vcpu, gfn) <<
-                               kvm_x86_ops->get_mt_mask_shift();
-                       mt_mask |= VMX_EPT_IGMT_BIT;
-               } else
-                       mt_mask = MTRR_TYPE_UNCACHABLE <<
-                               kvm_x86_ops->get_mt_mask_shift();
-               spte |= mt_mask;
-       }
+       if (tdp_enabled)
+               spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
+                       kvm_is_mmio_pfn(pfn));
 
        spte |= (u64)pfn << PAGE_SHIFT;
 
@@ -1766,8 +1735,8 @@ set_pte:
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
-                        int *ptwrite, int largepage, int global,
-                        gfn_t gfn, pfn_t pfn, bool speculative)
+                        int *ptwrite, int largepage, gfn_t gfn,
+                        pfn_t pfn, bool speculative)
 {
        int was_rmapped = 0;
        int was_writeble = is_writeble_pte(*shadow_pte);
@@ -1796,7 +1765,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                        was_rmapped = 1;
        }
        if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
-                     dirty, largepage, global, gfn, pfn, speculative, true)) {
+                     dirty, largepage, gfn, pfn, speculative, true)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_x86_ops->tlb_flush(vcpu);
@@ -1844,7 +1813,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                    || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
                        mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
                                     0, write, 1, &pt_write,
-                                    largepage, 0, gfn, pfn, false);
+                                    largepage, gfn, pfn, false);
                        ++vcpu->stat.pf_fixed;
                        break;
                }
@@ -1943,7 +1912,19 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
        vcpu->arch.mmu.root_hpa = INVALID_PAGE;
 }
 
-static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
+static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
+{
+       int ret = 0;
+
+       if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
+               set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+               ret = 1;
+       }
+
+       return ret;
+}
+
+static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
 {
        int i;
        gfn_t root_gfn;
@@ -1958,13 +1939,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                ASSERT(!VALID_PAGE(root));
                if (tdp_enabled)
                        direct = 1;
+               if (mmu_check_root(vcpu, root_gfn))
+                       return 1;
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                      PT64_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
                vcpu->arch.mmu.root_hpa = root;
-               return;
+               return 0;
        }
        direct = !is_paging(vcpu);
        if (tdp_enabled)
@@ -1981,6 +1964,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
                } else if (vcpu->arch.mmu.root_level == 0)
                        root_gfn = 0;
+               if (mmu_check_root(vcpu, root_gfn))
+                       return 1;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
@@ -1989,6 +1974,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
        vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
+       return 0;
 }
 
 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
@@ -2007,7 +1993,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];
 
-               if (root) {
+               if (root && VALID_PAGE(root)) {
                        root &= PT64_BASE_ADDR_MASK;
                        sp = page_header(root);
                        mmu_sync_children(vcpu, sp);
@@ -2015,15 +2001,6 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
        }
 }
 
-static void mmu_sync_global(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = vcpu->kvm;
-       struct kvm_mmu_page *sp, *n;
-
-       list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
-               kvm_sync_page(vcpu, sp);
-}
-
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 {
        spin_lock(&vcpu->kvm->mmu_lock);
@@ -2031,13 +2008,6 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
-void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
-{
-       spin_lock(&vcpu->kvm->mmu_lock);
-       mmu_sync_global(vcpu);
-       spin_unlock(&vcpu->kvm->mmu_lock);
-}
-
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
        return vaddr;
@@ -2187,20 +2157,20 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
                else
                        /* 32 bits PSE 4MB page */
                        context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
-               context->rsvd_bits_mask[1][0] = ~0ull;
+               context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
                break;
        case PT32E_ROOT_LEVEL:
                context->rsvd_bits_mask[0][2] =
                        rsvd_bits(maxphyaddr, 63) |
                        rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
                context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
-                       rsvd_bits(maxphyaddr, 62);              /* PDE */
+                       rsvd_bits(maxphyaddr, 62);      /* PDE */
                context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
                        rsvd_bits(maxphyaddr, 62);      /* PTE */
                context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
                        rsvd_bits(maxphyaddr, 62) |
                        rsvd_bits(13, 20);              /* large page */
-               context->rsvd_bits_mask[1][0] = ~0ull;
+               context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
                break;
        case PT64_ROOT_LEVEL:
                context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
@@ -2208,14 +2178,15 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
                context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
                        rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
                context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
-                       rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
+                       rsvd_bits(maxphyaddr, 51);
                context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
                        rsvd_bits(maxphyaddr, 51);
                context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
                context->rsvd_bits_mask[1][2] = context->rsvd_bits_mask[0][2];
                context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
-                       rsvd_bits(maxphyaddr, 51) | rsvd_bits(13, 20);
-               context->rsvd_bits_mask[1][0] = ~0ull;
+                       rsvd_bits(maxphyaddr, 51) |
+                       rsvd_bits(13, 20);              /* large page */
+               context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
                break;
        }
 }
@@ -2357,9 +2328,11 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
                goto out;
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
-       mmu_alloc_roots(vcpu);
+       r = mmu_alloc_roots(vcpu);
        mmu_sync_roots(vcpu);
        spin_unlock(&vcpu->kvm->mmu_lock);
+       if (r)
+               goto out;
        kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
        kvm_mmu_flush_tlb(vcpu);
 out:
@@ -2769,7 +2742,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
        struct kvm_mmu_page *sp;
 
-       spin_lock(&kvm->mmu_lock);
        list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
                int i;
                u64 *pt;
@@ -2784,7 +2756,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                                pt[i] &= ~PT_WRITABLE_MASK;
        }
        kvm_flush_remote_tlbs(kvm);
-       spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_mmu_zap_all(struct kvm *kvm)
@@ -3066,11 +3037,13 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                                       " in nonleaf level: levels %d gva %lx"
                                       " level %d pte %llx\n", audit_msg,
                                       vcpu->arch.mmu.root_level, va, level, ent);
-
-                       audit_mappings_page(vcpu, ent, va, level - 1);
+                       else
+                               audit_mappings_page(vcpu, ent, va, level - 1);
                } else {
                        gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
-                       hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
+                       gfn_t gfn = gpa >> PAGE_SHIFT;
+                       pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
+                       hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
 
                        if (is_shadow_present_pte(ent)
                            && (ent & PT64_BASE_ADDR_MASK) != hpa)