X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fkvm%2Fpaging_tmpl.h;h=b24bc7c86078423b6ec821355b12493c408de93b;hb=0c7ac28d3dd27d718493aae4bbc7de89a2d9d386;hp=3ade9445ab239da38ad4f8596186f2e922c9302c;hpb=1b0973bd8f788178f21d9eebdd879203464f8528;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 3ade944..b24bc7c 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h @@ -31,7 +31,12 @@ #define PT_INDEX(addr, level) PT64_INDEX(addr, level) #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) - #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK + #define PT_LEVEL_BITS PT64_LEVEL_BITS + #ifdef CONFIG_X86_64 + #define PT_MAX_FULL_LEVELS 4 + #else + #define PT_MAX_FULL_LEVELS 2 + #endif #elif PTTYPE == 32 #define pt_element_t u32 #define guest_walker guest_walker32 @@ -41,230 +46,323 @@ #define PT_INDEX(addr, level) PT32_INDEX(addr, level) #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) - #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK + #define PT_LEVEL_BITS PT32_LEVEL_BITS + #define PT_MAX_FULL_LEVELS 2 #else #error Invalid PTTYPE value #endif +#define gpte_to_gfn FNAME(gpte_to_gfn) +#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde) + /* * The guest_walker structure emulates the behavior of the hardware page * table walker. */ struct guest_walker { int level; - gfn_t table_gfn; - pt_element_t *table; - pt_element_t *ptep; + gfn_t table_gfn[PT_MAX_FULL_LEVELS]; + pt_element_t pte; pt_element_t inherited_ar; + gfn_t gfn; + u32 error_code; }; +static gfn_t gpte_to_gfn(pt_element_t gpte) +{ + return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; +} + +static gfn_t gpte_to_gfn_pde(pt_element_t gpte) +{ + return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; +} + /* * Fetch a guest pte for a guest virtual address */ -static void FNAME(walk_addr)(struct guest_walker *walker, - struct kvm_vcpu *vcpu, gva_t addr) +static int FNAME(walk_addr)(struct guest_walker *walker, + struct kvm_vcpu *vcpu, gva_t addr, + int write_fault, int user_fault, int fetch_fault) { - hpa_t hpa; - struct kvm_memory_slot *slot; - pt_element_t *ptep; - pt_element_t root; + pt_element_t pte; + gfn_t table_gfn; + unsigned index; + gpa_t pte_gpa; + pgprintk("%s: addr %lx\n", __FUNCTION__, addr); walker->level = vcpu->mmu.root_level; - walker->table = NULL; - root = vcpu->cr3; + pte = vcpu->cr3; #if PTTYPE == 64 if (!is_long_mode(vcpu)) { - walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3]; - root = *walker->ptep; - if (!(root & PT_PRESENT_MASK)) - return; + pte = vcpu->pdptrs[(addr >> 30) & 3]; + if (!is_present_pte(pte)) + goto not_present; --walker->level; } #endif - walker->table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; - slot = gfn_to_memslot(vcpu->kvm, walker->table_gfn); - hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK); - walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); - ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || - (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); + (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0); walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK; for (;;) { - int index = PT_INDEX(addr, walker->level); - hpa_t paddr; + index = PT_INDEX(addr, walker->level); + + table_gfn = gpte_to_gfn(pte); + pte_gpa = gfn_to_gpa(table_gfn); + pte_gpa += index * sizeof(pt_element_t); + walker->table_gfn[walker->level - 1] = table_gfn; + pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, + walker->level - 1, table_gfn); + + kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); - ptep = &walker->table[index]; - ASSERT(((unsigned long)walker->table & PAGE_MASK) == - ((unsigned long)ptep & PAGE_MASK)); + if (!is_present_pte(pte)) + goto not_present; - if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK)) - *ptep |= PT_ACCESSED_MASK; + if (write_fault && !is_writeble_pte(pte)) + if (user_fault || is_write_protection(vcpu)) + goto access_error; - if (!is_present_pte(*ptep) || - walker->level == PT_PAGE_TABLE_LEVEL || - (walker->level == PT_DIRECTORY_LEVEL && - (*ptep & PT_PAGE_SIZE_MASK) && - (PTTYPE == 64 || is_pse(vcpu)))) + if (user_fault && !(pte & PT_USER_MASK)) + goto access_error; + +#if PTTYPE == 64 + if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK)) + goto access_error; +#endif + + if (!(pte & PT_ACCESSED_MASK)) { + mark_page_dirty(vcpu->kvm, table_gfn); + pte |= PT_ACCESSED_MASK; + kvm_write_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); + } + + if (walker->level == PT_PAGE_TABLE_LEVEL) { + walker->gfn = gpte_to_gfn(pte); break; + } + + if (walker->level == PT_DIRECTORY_LEVEL + && (pte & PT_PAGE_SIZE_MASK) + && (PTTYPE == 64 || is_pse(vcpu))) { + walker->gfn = gpte_to_gfn_pde(pte); + walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL); + if (PTTYPE == 32 && is_cpuid_PSE36()) + walker->gfn += pse36_gfn_delta(pte); + break; + } - if (walker->level != 3 || is_long_mode(vcpu)) - walker->inherited_ar &= walker->table[index]; - walker->table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; - paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK); - kunmap_atomic(walker->table, KM_USER0); - walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT), - KM_USER0); + walker->inherited_ar &= pte; --walker->level; } - walker->ptep = ptep; -} -static void FNAME(release_walker)(struct guest_walker *walker) -{ - if (walker->table) - kunmap_atomic(walker->table, KM_USER0); + if (write_fault && !is_dirty_pte(pte)) { + mark_page_dirty(vcpu->kvm, table_gfn); + pte |= PT_DIRTY_MASK; + kvm_write_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); + kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte)); + } + + walker->pte = pte; + pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)pte); + return 1; + +not_present: + walker->error_code = 0; + goto err; + +access_error: + walker->error_code = PFERR_PRESENT_MASK; + +err: + if (write_fault) + walker->error_code |= PFERR_WRITE_MASK; + if (user_fault) + walker->error_code |= PFERR_USER_MASK; + if (fetch_fault) + walker->error_code |= PFERR_FETCH_MASK; + return 0; } -static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, - u64 *shadow_pte, u64 access_bits) +static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte, + u64 *shadow_pte, u64 access_bits, + int user_fault, int write_fault, + int *ptwrite, struct guest_walker *walker, + gfn_t gfn) { - ASSERT(*shadow_pte == 0); - access_bits &= guest_pte; - *shadow_pte = (guest_pte & PT_PTE_COPY_MASK); - set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK, - guest_pte & PT_DIRTY_MASK, access_bits); + int dirty = gpte & PT_DIRTY_MASK; + u64 spte; + int was_rmapped = is_rmap_pte(*shadow_pte); + struct page *page; + + pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d" + " user_fault %d gfn %lx\n", + __FUNCTION__, *shadow_pte, (u64)gpte, access_bits, + write_fault, user_fault, gfn); + + access_bits &= gpte; + /* + * We don't set the accessed bit, since we sometimes want to see + * whether the guest actually used the pte (in order to detect + * demand paging). + */ + spte = PT_PRESENT_MASK | PT_DIRTY_MASK; + spte |= gpte & PT64_NX_MASK; + if (!dirty) + access_bits &= ~PT_WRITABLE_MASK; + + page = gfn_to_page(vcpu->kvm, gfn); + + spte |= PT_PRESENT_MASK; + if (access_bits & PT_USER_MASK) + spte |= PT_USER_MASK; + + if (is_error_page(page)) { + set_shadow_pte(shadow_pte, + shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK); + kvm_release_page_clean(page); + return; + } + + spte |= page_to_phys(page); + + if ((access_bits & PT_WRITABLE_MASK) + || (write_fault && !is_write_protection(vcpu) && !user_fault)) { + struct kvm_mmu_page *shadow; + + spte |= PT_WRITABLE_MASK; + if (user_fault) { + mmu_unshadow(vcpu->kvm, gfn); + goto unshadowed; + } + + shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); + if (shadow) { + pgprintk("%s: found shadow page for %lx, marking ro\n", + __FUNCTION__, gfn); + access_bits &= ~PT_WRITABLE_MASK; + if (is_writeble_pte(spte)) { + spte &= ~PT_WRITABLE_MASK; + kvm_x86_ops->tlb_flush(vcpu); + } + if (write_fault) + *ptwrite = 1; + } + } + +unshadowed: + + if (access_bits & PT_WRITABLE_MASK) + mark_page_dirty(vcpu->kvm, gfn); + + pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte); + set_shadow_pte(shadow_pte, spte); + page_header_update_slot(vcpu->kvm, shadow_pte, gfn); + if (!was_rmapped) { + rmap_add(vcpu, shadow_pte, gfn); + if (!is_rmap_pte(*shadow_pte)) + kvm_release_page_clean(page); + } + else + kvm_release_page_clean(page); + if (!ptwrite || !*ptwrite) + vcpu->last_pte_updated = shadow_pte; } -static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, - u64 *shadow_pte, u64 access_bits, - int index) +static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, + u64 *spte, const void *pte, int bytes, + int offset_in_pte) { - gpa_t gaddr; - - ASSERT(*shadow_pte == 0); - access_bits &= guest_pde; - gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index; - if (PTTYPE == 32 && is_cpuid_PSE36()) - gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << - (32 - PT32_DIR_PSE36_SHIFT); - *shadow_pte = guest_pde & PT_PTE_COPY_MASK; - set_pte_common(vcpu, shadow_pte, gaddr, - guest_pde & PT_DIRTY_MASK, access_bits); + pt_element_t gpte; + + gpte = *(const pt_element_t *)pte; + if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { + if (!offset_in_pte && !is_present_pte(gpte)) + set_shadow_pte(spte, shadow_notrap_nonpresent_pte); + return; + } + if (bytes < sizeof(pt_element_t)) + return; + pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); + FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0, + 0, NULL, NULL, gpte_to_gfn(gpte)); } /* * Fetch a shadow pte for a specific level in the paging hierarchy. */ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, - struct guest_walker *walker) + struct guest_walker *walker, + int user_fault, int write_fault, int *ptwrite) { hpa_t shadow_addr; int level; + u64 *shadow_ent; u64 *prev_shadow_ent = NULL; - pt_element_t *guest_ent = walker->ptep; - if (!is_present_pte(*guest_ent)) + if (!is_present_pte(walker->pte)) return NULL; shadow_addr = vcpu->mmu.root_hpa; level = vcpu->mmu.shadow_root_level; + if (level == PT32E_ROOT_LEVEL) { + shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3]; + shadow_addr &= PT64_BASE_ADDR_MASK; + --level; + } for (; ; level--) { u32 index = SHADOW_PT_INDEX(addr, level); - u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; + struct kvm_mmu_page *shadow_page; u64 shadow_pte; + int metaphysical; + gfn_t table_gfn; + unsigned hugepage_access = 0; - if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { + shadow_ent = ((u64 *)__va(shadow_addr)) + index; + if (is_shadow_present_pte(*shadow_ent)) { if (level == PT_PAGE_TABLE_LEVEL) - return shadow_ent; + break; shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK; prev_shadow_ent = shadow_ent; continue; } - if (level == PT_PAGE_TABLE_LEVEL) { - - if (walker->level == PT_DIRECTORY_LEVEL) { - if (prev_shadow_ent) - *prev_shadow_ent |= PT_SHADOW_PS_MARK; - FNAME(set_pde)(vcpu, *guest_ent, shadow_ent, - walker->inherited_ar, - PT_INDEX(addr, PT_PAGE_TABLE_LEVEL)); - } else { - ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); - FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar); - } - return shadow_ent; - } + if (level == PT_PAGE_TABLE_LEVEL) + break; - shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent); - if (!VALID_PAGE(shadow_addr)) - return ERR_PTR(-ENOMEM); - shadow_pte = shadow_addr | PT_PRESENT_MASK; - if (vcpu->mmu.root_level > 3 || level != 3) - shadow_pte |= PT_ACCESSED_MASK - | PT_WRITABLE_MASK | PT_USER_MASK; + if (level - 1 == PT_PAGE_TABLE_LEVEL + && walker->level == PT_DIRECTORY_LEVEL) { + metaphysical = 1; + hugepage_access = walker->pte; + hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK; + if (!is_dirty_pte(walker->pte)) + hugepage_access &= ~PT_WRITABLE_MASK; + hugepage_access >>= PT_WRITABLE_SHIFT; + if (walker->pte & PT64_NX_MASK) + hugepage_access |= (1 << 2); + table_gfn = gpte_to_gfn(walker->pte); + } else { + metaphysical = 0; + table_gfn = walker->table_gfn[level - 2]; + } + shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, + metaphysical, hugepage_access, + shadow_ent); + shadow_addr = __pa(shadow_page->spt); + shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK + | PT_WRITABLE_MASK | PT_USER_MASK; *shadow_ent = shadow_pte; prev_shadow_ent = shadow_ent; } -} - -/* - * The guest faulted for write. We need to - * - * - check write permissions - * - update the guest pte dirty bit - * - update our own dirty page tracking structures - */ -static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, - u64 *shadow_ent, - struct guest_walker *walker, - gva_t addr, - int user) -{ - pt_element_t *guest_ent; - int writable_shadow; - gfn_t gfn; - - if (is_writeble_pte(*shadow_ent)) - return 0; - - writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK; - if (user) { - /* - * User mode access. Fail if it's a kernel page or a read-only - * page. - */ - if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow) - return 0; - ASSERT(*shadow_ent & PT_USER_MASK); - } else - /* - * Kernel mode access. Fail if it's a read-only page and - * supervisor write protection is enabled. - */ - if (!writable_shadow) { - if (is_write_protection(vcpu)) - return 0; - *shadow_ent &= ~PT_USER_MASK; - } - - guest_ent = walker->ptep; - - if (!is_present_pte(*guest_ent)) { - *shadow_ent = 0; - return 0; - } - gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; - mark_page_dirty(vcpu->kvm, gfn); - *shadow_ent |= PT_WRITABLE_MASK; - *guest_ent |= PT_DIRTY_MASK; - rmap_add(vcpu->kvm, shadow_ent); + FNAME(set_pte)(vcpu, walker->pte, shadow_ent, + walker->inherited_ar, user_fault, write_fault, + ptwrite, walker, walker->gfn); - return 1; + return shadow_ent; } /* @@ -278,106 +376,103 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, * - normal guest page fault due to the guest pte marked not present, not * writable, or not executable * - * Returns: 1 if we need to emulate the instruction, 0 otherwise + * Returns: 1 if we need to emulate the instruction, 0 otherwise, or + * a negative value on error. */ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code) { int write_fault = error_code & PFERR_WRITE_MASK; - int pte_present = error_code & PFERR_PRESENT_MASK; int user_fault = error_code & PFERR_USER_MASK; + int fetch_fault = error_code & PFERR_FETCH_MASK; struct guest_walker walker; u64 *shadow_pte; - int fixed; + int write_pt = 0; + int r; + + pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); + kvm_mmu_audit(vcpu, "pre page fault"); + + r = mmu_topup_memory_caches(vcpu); + if (r) + return r; /* * Look up the shadow pte for the faulting address. */ - for (;;) { - FNAME(walk_addr)(&walker, vcpu, addr); - shadow_pte = FNAME(fetch)(vcpu, addr, &walker); - if (IS_ERR(shadow_pte)) { /* must be -ENOMEM */ - nonpaging_flush(vcpu); - FNAME(release_walker)(&walker); - continue; - } - break; - } + r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault, + fetch_fault); /* * The page is not mapped by the guest. Let the guest handle it. */ - if (!shadow_pte) { - inject_page_fault(vcpu, addr, error_code); - FNAME(release_walker)(&walker); + if (!r) { + pgprintk("%s: guest page fault\n", __FUNCTION__); + inject_page_fault(vcpu, addr, walker.error_code); + vcpu->last_pt_write_count = 0; /* reset fork detector */ return 0; } - /* - * Update the shadow pte. - */ - if (write_fault) - fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr, - user_fault); - else - fixed = fix_read_pf(shadow_pte); + shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, + &write_pt); + pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, + shadow_pte, *shadow_pte, write_pt); - FNAME(release_walker)(&walker); + if (!write_pt) + vcpu->last_pt_write_count = 0; /* reset fork detector */ /* * mmio: emulate if accessible, otherwise its a guest fault. */ - if (is_io_pte(*shadow_pte)) { - if (may_access(*shadow_pte, write_fault, user_fault)) - return 1; - pgprintk("%s: io work, no access\n", __FUNCTION__); - inject_page_fault(vcpu, addr, - error_code | PFERR_PRESENT_MASK); - return 0; - } + if (is_io_pte(*shadow_pte)) + return 1; - /* - * pte not present, guest page fault. - */ - if (pte_present && !fixed) { - inject_page_fault(vcpu, addr, error_code); - return 0; - } - - ++kvm_stat.pf_fixed; + ++vcpu->stat.pf_fixed; + kvm_mmu_audit(vcpu, "post page fault (fixed)"); - return 0; + return write_pt; } static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) { struct guest_walker walker; - pt_element_t guest_pte; - gpa_t gpa; + gpa_t gpa = UNMAPPED_GVA; + int r; - FNAME(walk_addr)(&walker, vcpu, vaddr); - guest_pte = *walker.ptep; - FNAME(release_walker)(&walker); + r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0); - if (!is_present_pte(guest_pte)) - return UNMAPPED_GVA; - - if (walker.level == PT_DIRECTORY_LEVEL) { - ASSERT((guest_pte & PT_PAGE_SIZE_MASK)); - ASSERT(PTTYPE == 64 || is_pse(vcpu)); + if (r) { + gpa = gfn_to_gpa(walker.gfn); + gpa |= vaddr & ~PAGE_MASK; + } - gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr & - (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK)); + return gpa; +} - if (PTTYPE == 32 && is_cpuid_PSE36()) - gpa |= (guest_pte & PT32_DIR_PSE36_MASK) << - (32 - PT32_DIR_PSE36_SHIFT); - } else { - gpa = (guest_pte & PT_BASE_ADDR_MASK); - gpa |= (vaddr & ~PAGE_MASK); +static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp) +{ + int i, offset = 0; + pt_element_t *gpt; + struct page *page; + + if (sp->role.metaphysical + || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { + nonpaging_prefetch_page(vcpu, sp); + return; } - return gpa; + if (PTTYPE == 32) + offset = sp->role.quadrant << PT64_LEVEL_BITS; + page = gfn_to_page(vcpu->kvm, sp->gfn); + gpt = kmap_atomic(page, KM_USER0); + for (i = 0; i < PT64_ENT_PER_PAGE; ++i) + if (is_present_pte(gpt[offset + i])) + sp->spt[i] = shadow_trap_nonpresent_pte; + else + sp->spt[i] = shadow_notrap_nonpresent_pte; + kunmap_atomic(gpt, KM_USER0); + kvm_release_page_clean(page); } #undef pt_element_t @@ -387,6 +482,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) #undef PT_INDEX #undef SHADOW_PT_INDEX #undef PT_LEVEL_MASK -#undef PT_PTE_COPY_MASK -#undef PT_NON_PTE_COPY_MASK #undef PT_DIR_BASE_ADDR_MASK +#undef PT_LEVEL_BITS +#undef PT_MAX_FULL_LEVELS +#undef gpte_to_gfn +#undef gpte_to_gfn_pde