2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
38 #define PT_MAX_FULL_LEVELS 2
41 #define pt_element_t u32
42 #define guest_walker guest_walker32
43 #define FNAME(name) paging##32_##name
44 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_LEVEL_BITS PT32_LEVEL_BITS
50 #define PT_MAX_FULL_LEVELS 2
52 #error Invalid PTTYPE value
56 * The guest_walker structure emulates the behavior of the hardware page
61 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
67 pt_element_t inherited_ar;
73 * Fetch a guest pte for a guest virtual address
75 static int FNAME(walk_addr)(struct guest_walker *walker,
76 struct kvm_vcpu *vcpu, gva_t addr,
77 int write_fault, int user_fault, int fetch_fault)
80 struct kvm_memory_slot *slot;
85 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
86 walker->level = vcpu->mmu.root_level;
92 if (!is_long_mode(vcpu)) {
93 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
96 if (!(root & PT_PRESENT_MASK))
101 table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
102 walker->table_gfn[walker->level - 1] = table_gfn;
103 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
104 walker->level - 1, table_gfn);
105 slot = gfn_to_memslot(vcpu->kvm, table_gfn);
106 hpa = safe_gpa_to_hpa(vcpu->kvm, root & PT64_BASE_ADDR_MASK);
107 walker->page = pfn_to_page(hpa >> PAGE_SHIFT);
108 walker->table = kmap_atomic(walker->page, KM_USER0);
110 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
111 (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
113 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
116 int index = PT_INDEX(addr, walker->level);
119 ptep = &walker->table[index];
120 walker->index = index;
121 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
122 ((unsigned long)ptep & PAGE_MASK));
124 if (!is_present_pte(*ptep))
127 if (write_fault && !is_writeble_pte(*ptep))
128 if (user_fault || is_write_protection(vcpu))
131 if (user_fault && !(*ptep & PT_USER_MASK))
135 if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
139 if (!(*ptep & PT_ACCESSED_MASK)) {
140 mark_page_dirty(vcpu->kvm, table_gfn);
141 *ptep |= PT_ACCESSED_MASK;
144 if (walker->level == PT_PAGE_TABLE_LEVEL) {
145 walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
150 if (walker->level == PT_DIRECTORY_LEVEL
151 && (*ptep & PT_PAGE_SIZE_MASK)
152 && (PTTYPE == 64 || is_pse(vcpu))) {
153 walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
155 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
159 walker->inherited_ar &= walker->table[index];
160 table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
161 kunmap_atomic(walker->table, KM_USER0);
162 paddr = safe_gpa_to_hpa(vcpu->kvm, table_gfn << PAGE_SHIFT);
163 walker->page = pfn_to_page(paddr >> PAGE_SHIFT);
164 walker->table = kmap_atomic(walker->page, KM_USER0);
166 walker->table_gfn[walker->level - 1] = table_gfn;
167 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
168 walker->level - 1, table_gfn);
174 kunmap_atomic(walker->table, KM_USER0);
175 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
179 walker->error_code = 0;
183 walker->error_code = PFERR_PRESENT_MASK;
187 walker->error_code |= PFERR_WRITE_MASK;
189 walker->error_code |= PFERR_USER_MASK;
191 walker->error_code |= PFERR_FETCH_MASK;
193 kunmap_atomic(walker->table, KM_USER0);
197 static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
198 struct guest_walker *walker)
200 mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
203 static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
211 struct guest_walker *walker,
215 int dirty = gpte & PT_DIRTY_MASK;
217 int was_rmapped = is_rmap_pte(*shadow_pte);
219 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
220 " user_fault %d gfn %lx\n",
221 __FUNCTION__, *shadow_pte, (u64)gpte, access_bits,
222 write_fault, user_fault, gfn);
224 if (write_fault && !dirty) {
225 pt_element_t *guest_ent, *tmp = NULL;
228 guest_ent = walker->ptep;
230 tmp = kmap_atomic(walker->page, KM_USER0);
231 guest_ent = &tmp[walker->index];
234 *guest_ent |= PT_DIRTY_MASK;
236 kunmap_atomic(tmp, KM_USER0);
238 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
242 * We don't set the accessed bit, since we sometimes want to see
243 * whether the guest actually used the pte (in order to detect
246 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
247 spte |= gpte & PT64_NX_MASK;
249 access_bits &= ~PT_WRITABLE_MASK;
251 paddr = gpa_to_hpa(vcpu->kvm, gaddr & PT64_BASE_ADDR_MASK);
253 spte |= PT_PRESENT_MASK;
254 if (access_bits & PT_USER_MASK)
255 spte |= PT_USER_MASK;
257 if (is_error_hpa(paddr)) {
258 set_shadow_pte(shadow_pte,
259 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
265 if ((access_bits & PT_WRITABLE_MASK)
266 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
267 struct kvm_mmu_page *shadow;
269 spte |= PT_WRITABLE_MASK;
271 mmu_unshadow(vcpu->kvm, gfn);
275 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
277 pgprintk("%s: found shadow page for %lx, marking ro\n",
279 access_bits &= ~PT_WRITABLE_MASK;
280 if (is_writeble_pte(spte)) {
281 spte &= ~PT_WRITABLE_MASK;
282 kvm_x86_ops->tlb_flush(vcpu);
291 if (access_bits & PT_WRITABLE_MASK)
292 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
294 pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
295 set_shadow_pte(shadow_pte, spte);
296 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
298 rmap_add(vcpu, shadow_pte, (gaddr & PT64_BASE_ADDR_MASK)
300 if (!ptwrite || !*ptwrite)
301 vcpu->last_pte_updated = shadow_pte;
304 static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
305 u64 *shadow_pte, u64 access_bits,
306 int user_fault, int write_fault, int *ptwrite,
307 struct guest_walker *walker, gfn_t gfn)
310 FNAME(set_pte_common)(vcpu, shadow_pte, gpte & PT_BASE_ADDR_MASK,
311 gpte, access_bits, user_fault, write_fault,
312 ptwrite, walker, gfn);
315 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
316 u64 *spte, const void *pte, int bytes,
321 gpte = *(const pt_element_t *)pte;
322 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
323 if (!offset_in_pte && !is_present_pte(gpte))
324 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
327 if (bytes < sizeof(pt_element_t))
329 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
330 FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
332 (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
335 static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t gpde,
336 u64 *shadow_pte, u64 access_bits,
337 int user_fault, int write_fault, int *ptwrite,
338 struct guest_walker *walker, gfn_t gfn)
343 gaddr = (gpa_t)gfn << PAGE_SHIFT;
344 if (PTTYPE == 32 && is_cpuid_PSE36())
345 gaddr |= (gpde & PT32_DIR_PSE36_MASK) <<
346 (32 - PT32_DIR_PSE36_SHIFT);
347 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
348 gpde, access_bits, user_fault, write_fault,
349 ptwrite, walker, gfn);
353 * Fetch a shadow pte for a specific level in the paging hierarchy.
355 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
356 struct guest_walker *walker,
357 int user_fault, int write_fault, int *ptwrite)
362 u64 *prev_shadow_ent = NULL;
364 if (!is_present_pte(walker->pte))
367 shadow_addr = vcpu->mmu.root_hpa;
368 level = vcpu->mmu.shadow_root_level;
369 if (level == PT32E_ROOT_LEVEL) {
370 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
371 shadow_addr &= PT64_BASE_ADDR_MASK;
376 u32 index = SHADOW_PT_INDEX(addr, level);
377 struct kvm_mmu_page *shadow_page;
381 unsigned hugepage_access = 0;
383 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
384 if (is_shadow_present_pte(*shadow_ent)) {
385 if (level == PT_PAGE_TABLE_LEVEL)
387 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
388 prev_shadow_ent = shadow_ent;
392 if (level == PT_PAGE_TABLE_LEVEL)
395 if (level - 1 == PT_PAGE_TABLE_LEVEL
396 && walker->level == PT_DIRECTORY_LEVEL) {
398 hugepage_access = walker->pte;
399 hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
400 if (walker->pte & PT64_NX_MASK)
401 hugepage_access |= (1 << 2);
402 hugepage_access >>= PT_WRITABLE_SHIFT;
403 table_gfn = (walker->pte & PT_BASE_ADDR_MASK)
407 table_gfn = walker->table_gfn[level - 2];
409 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
410 metaphysical, hugepage_access,
412 shadow_addr = __pa(shadow_page->spt);
413 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
414 | PT_WRITABLE_MASK | PT_USER_MASK;
415 *shadow_ent = shadow_pte;
416 prev_shadow_ent = shadow_ent;
419 if (walker->level == PT_DIRECTORY_LEVEL) {
420 FNAME(set_pde)(vcpu, walker->pte, shadow_ent,
421 walker->inherited_ar, user_fault, write_fault,
422 ptwrite, walker, walker->gfn);
424 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
425 FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
426 walker->inherited_ar, user_fault, write_fault,
427 ptwrite, walker, walker->gfn);
433 * Page fault handler. There are several causes for a page fault:
434 * - there is no shadow pte for the guest pte
435 * - write access through a shadow pte marked read only so that we can set
437 * - write access to a shadow pte marked read only so we can update the page
438 * dirty bitmap, when userspace requests it
439 * - mmio access; in this case we will never install a present shadow pte
440 * - normal guest page fault due to the guest pte marked not present, not
441 * writable, or not executable
443 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
444 * a negative value on error.
446 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
449 int write_fault = error_code & PFERR_WRITE_MASK;
450 int user_fault = error_code & PFERR_USER_MASK;
451 int fetch_fault = error_code & PFERR_FETCH_MASK;
452 struct guest_walker walker;
457 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
458 kvm_mmu_audit(vcpu, "pre page fault");
460 r = mmu_topup_memory_caches(vcpu);
465 * Look up the shadow pte for the faulting address.
467 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
471 * The page is not mapped by the guest. Let the guest handle it.
474 pgprintk("%s: guest page fault\n", __FUNCTION__);
475 inject_page_fault(vcpu, addr, walker.error_code);
476 vcpu->last_pt_write_count = 0; /* reset fork detector */
480 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
482 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
483 shadow_pte, *shadow_pte, write_pt);
486 vcpu->last_pt_write_count = 0; /* reset fork detector */
489 * mmio: emulate if accessible, otherwise its a guest fault.
491 if (is_io_pte(*shadow_pte))
494 ++vcpu->stat.pf_fixed;
495 kvm_mmu_audit(vcpu, "post page fault (fixed)");
500 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
502 struct guest_walker walker;
503 gpa_t gpa = UNMAPPED_GVA;
506 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
509 gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
510 gpa |= vaddr & ~PAGE_MASK;
516 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
517 struct kvm_mmu_page *sp)
522 if (sp->role.metaphysical || PTTYPE == 32) {
523 nonpaging_prefetch_page(vcpu, sp);
527 gpt = kmap_atomic(gfn_to_page(vcpu->kvm, sp->gfn), KM_USER0);
528 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
529 if (is_present_pte(gpt[i]))
530 sp->spt[i] = shadow_trap_nonpresent_pte;
532 sp->spt[i] = shadow_notrap_nonpresent_pte;
533 kunmap_atomic(gpt, KM_USER0);
539 #undef PT_BASE_ADDR_MASK
541 #undef SHADOW_PT_INDEX
543 #undef PT_DIR_BASE_ADDR_MASK
545 #undef PT_MAX_FULL_LEVELS