#error Invalid PTTYPE value
#endif
+#define gpte_to_gfn FNAME(gpte_to_gfn)
+#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
+
/*
* The guest_walker structure emulates the behavior of the hardware page
* table walker.
u32 error_code;
};
+static gfn_t gpte_to_gfn(pt_element_t gpte)
+{
+ return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
+}
+
+static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
+{
+ return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
+}
+
/*
* Fetch a guest pte for a guest virtual address
*/
struct kvm_vcpu *vcpu, gva_t addr,
int write_fault, int user_fault, int fetch_fault)
{
- hpa_t hpa;
- struct kvm_memory_slot *slot;
- struct page *page;
- pt_element_t *table;
pt_element_t pte;
gfn_t table_gfn;
unsigned index;
for (;;) {
index = PT_INDEX(addr, walker->level);
- table_gfn = (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
+ table_gfn = gpte_to_gfn(pte);
+ pte_gpa = gfn_to_gpa(table_gfn);
+ pte_gpa += index * sizeof(pt_element_t);
walker->table_gfn[walker->level - 1] = table_gfn;
pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
walker->level - 1, table_gfn);
- slot = gfn_to_memslot(vcpu->kvm, table_gfn);
- hpa = safe_gpa_to_hpa(vcpu->kvm, pte & PT64_BASE_ADDR_MASK);
- page = pfn_to_page(hpa >> PAGE_SHIFT);
-
- table = kmap_atomic(page, KM_USER0);
- pte = table[index];
- kunmap_atomic(table, KM_USER0);
+ kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
if (!is_present_pte(pte))
goto not_present;
if (!(pte & PT_ACCESSED_MASK)) {
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_ACCESSED_MASK;
- table = kmap_atomic(page, KM_USER0);
- table[index] = pte;
- kunmap_atomic(table, KM_USER0);
+ kvm_write_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
}
if (walker->level == PT_PAGE_TABLE_LEVEL) {
- walker->gfn = (pte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
+ walker->gfn = gpte_to_gfn(pte);
break;
}
if (walker->level == PT_DIRECTORY_LEVEL
&& (pte & PT_PAGE_SIZE_MASK)
&& (PTTYPE == 64 || is_pse(vcpu))) {
- walker->gfn = (pte & PT_DIR_BASE_ADDR_MASK)
- >> PAGE_SHIFT;
+ walker->gfn = gpte_to_gfn_pde(pte);
walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
+ if (PTTYPE == 32 && is_cpuid_PSE36())
+ walker->gfn += pse36_gfn_delta(pte);
break;
}
if (write_fault && !is_dirty_pte(pte)) {
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_DIRTY_MASK;
- table = kmap_atomic(page, KM_USER0);
- table[index] = pte;
- kunmap_atomic(table, KM_USER0);
- pte_gpa = table_gfn << PAGE_SHIFT;
- pte_gpa += index * sizeof(pt_element_t);
+ kvm_write_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
}
return 0;
}
-static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
- u64 *shadow_pte,
- gpa_t gaddr,
- pt_element_t gpte,
- u64 access_bits,
- int user_fault,
- int write_fault,
- int *ptwrite,
- struct guest_walker *walker,
- gfn_t gfn)
+static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
+ u64 *shadow_pte, u64 access_bits,
+ int user_fault, int write_fault,
+ int *ptwrite, struct guest_walker *walker,
+ gfn_t gfn)
{
- hpa_t paddr;
int dirty = gpte & PT_DIRTY_MASK;
u64 spte;
int was_rmapped = is_rmap_pte(*shadow_pte);
+ struct page *page;
pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
" user_fault %d gfn %lx\n",
__FUNCTION__, *shadow_pte, (u64)gpte, access_bits,
write_fault, user_fault, gfn);
+ access_bits &= gpte;
/*
* We don't set the accessed bit, since we sometimes want to see
* whether the guest actually used the pte (in order to detect
if (!dirty)
access_bits &= ~PT_WRITABLE_MASK;
- paddr = gpa_to_hpa(vcpu->kvm, gaddr & PT64_BASE_ADDR_MASK);
+ page = gfn_to_page(vcpu->kvm, gfn);
spte |= PT_PRESENT_MASK;
if (access_bits & PT_USER_MASK)
spte |= PT_USER_MASK;
- if (is_error_hpa(paddr)) {
+ if (is_error_page(page)) {
set_shadow_pte(shadow_pte,
shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
+ kvm_release_page_clean(page);
return;
}
- spte |= paddr;
+ spte |= page_to_phys(page);
if ((access_bits & PT_WRITABLE_MASK)
|| (write_fault && !is_write_protection(vcpu) && !user_fault)) {
unshadowed:
if (access_bits & PT_WRITABLE_MASK)
- mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
+ mark_page_dirty(vcpu->kvm, gfn);
pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
set_shadow_pte(shadow_pte, spte);
- page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
- if (!was_rmapped)
- rmap_add(vcpu, shadow_pte, (gaddr & PT64_BASE_ADDR_MASK)
- >> PAGE_SHIFT);
+ page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
+ if (!was_rmapped) {
+ rmap_add(vcpu, shadow_pte, gfn);
+ if (!is_rmap_pte(*shadow_pte))
+ kvm_release_page_clean(page);
+ }
+ else
+ kvm_release_page_clean(page);
if (!ptwrite || !*ptwrite)
vcpu->last_pte_updated = shadow_pte;
}
-static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
- u64 *shadow_pte, u64 access_bits,
- int user_fault, int write_fault, int *ptwrite,
- struct guest_walker *walker, gfn_t gfn)
-{
- access_bits &= gpte;
- FNAME(set_pte_common)(vcpu, shadow_pte, gpte & PT_BASE_ADDR_MASK,
- gpte, access_bits, user_fault, write_fault,
- ptwrite, walker, gfn);
-}
-
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
u64 *spte, const void *pte, int bytes,
int offset_in_pte)
return;
pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
- 0, NULL, NULL,
- (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
-}
-
-static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t gpde,
- u64 *shadow_pte, u64 access_bits,
- int user_fault, int write_fault, int *ptwrite,
- struct guest_walker *walker, gfn_t gfn)
-{
- gpa_t gaddr;
-
- access_bits &= gpde;
- gaddr = (gpa_t)gfn << PAGE_SHIFT;
- if (PTTYPE == 32 && is_cpuid_PSE36())
- gaddr |= (gpde & PT32_DIR_PSE36_MASK) <<
- (32 - PT32_DIR_PSE36_SHIFT);
- FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
- gpde, access_bits, user_fault, write_fault,
- ptwrite, walker, gfn);
+ 0, NULL, NULL, gpte_to_gfn(gpte));
}
/*
hugepage_access >>= PT_WRITABLE_SHIFT;
if (walker->pte & PT64_NX_MASK)
hugepage_access |= (1 << 2);
- table_gfn = (walker->pte & PT_BASE_ADDR_MASK)
- >> PAGE_SHIFT;
+ table_gfn = gpte_to_gfn(walker->pte);
} else {
metaphysical = 0;
table_gfn = walker->table_gfn[level - 2];
prev_shadow_ent = shadow_ent;
}
- if (walker->level == PT_DIRECTORY_LEVEL) {
- FNAME(set_pde)(vcpu, walker->pte, shadow_ent,
- walker->inherited_ar, user_fault, write_fault,
- ptwrite, walker, walker->gfn);
- } else {
- ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
- FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
- walker->inherited_ar, user_fault, write_fault,
- ptwrite, walker, walker->gfn);
- }
+ FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
+ walker->inherited_ar, user_fault, write_fault,
+ ptwrite, walker, walker->gfn);
+
return shadow_ent;
}
r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
if (r) {
- gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
+ gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK;
}
static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
- int i;
+ int i, offset = 0;
pt_element_t *gpt;
+ struct page *page;
- if (sp->role.metaphysical || PTTYPE == 32) {
+ if (sp->role.metaphysical
+ || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
nonpaging_prefetch_page(vcpu, sp);
return;
}
- gpt = kmap_atomic(gfn_to_page(vcpu->kvm, sp->gfn), KM_USER0);
+ if (PTTYPE == 32)
+ offset = sp->role.quadrant << PT64_LEVEL_BITS;
+ page = gfn_to_page(vcpu->kvm, sp->gfn);
+ gpt = kmap_atomic(page, KM_USER0);
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
- if (is_present_pte(gpt[i]))
+ if (is_present_pte(gpt[offset + i]))
sp->spt[i] = shadow_trap_nonpresent_pte;
else
sp->spt[i] = shadow_notrap_nonpresent_pte;
kunmap_atomic(gpt, KM_USER0);
+ kvm_release_page_clean(page);
}
#undef pt_element_t
#undef PT_DIR_BASE_ADDR_MASK
#undef PT_LEVEL_BITS
#undef PT_MAX_FULL_LEVELS
+#undef gpte_to_gfn
+#undef gpte_to_gfn_pde