99ac9b15f773ca3104503078fb614f6f69195477
[safe/jmp/linux-2.6] / drivers / kvm / paging_tmpl.h
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 /*
21  * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22  * so the code in this file is compiled twice, once per pte size.
23  */
24
25 #if PTTYPE == 64
26         #define pt_element_t u64
27         #define guest_walker guest_walker64
28         #define FNAME(name) paging##64_##name
29         #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30         #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31         #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32         #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33         #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34         #define PT_LEVEL_BITS PT64_LEVEL_BITS
35         #ifdef CONFIG_X86_64
36         #define PT_MAX_FULL_LEVELS 4
37         #else
38         #define PT_MAX_FULL_LEVELS 2
39         #endif
40 #elif PTTYPE == 32
41         #define pt_element_t u32
42         #define guest_walker guest_walker32
43         #define FNAME(name) paging##32_##name
44         #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45         #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46         #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47         #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48         #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49         #define PT_LEVEL_BITS PT32_LEVEL_BITS
50         #define PT_MAX_FULL_LEVELS 2
51 #else
52         #error Invalid PTTYPE value
53 #endif
54
55 /*
56  * The guest_walker structure emulates the behavior of the hardware page
57  * table walker.
58  */
59 struct guest_walker {
60         int level;
61         gfn_t table_gfn[PT_MAX_FULL_LEVELS];
62         pt_element_t *table;
63         pt_element_t pte;
64         pt_element_t *ptep;
65         struct page *page;
66         int index;
67         pt_element_t inherited_ar;
68         gfn_t gfn;
69         u32 error_code;
70 };
71
72 /*
73  * Fetch a guest pte for a guest virtual address
74  */
75 static int FNAME(walk_addr)(struct guest_walker *walker,
76                             struct kvm_vcpu *vcpu, gva_t addr,
77                             int write_fault, int user_fault, int fetch_fault)
78 {
79         hpa_t hpa;
80         struct kvm_memory_slot *slot;
81         pt_element_t *ptep;
82         pt_element_t root;
83         gfn_t table_gfn;
84
85         pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
86         walker->level = vcpu->mmu.root_level;
87         walker->table = NULL;
88         walker->page = NULL;
89         walker->ptep = NULL;
90         root = vcpu->cr3;
91 #if PTTYPE == 64
92         if (!is_long_mode(vcpu)) {
93                 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
94                 root = *walker->ptep;
95                 walker->pte = root;
96                 if (!(root & PT_PRESENT_MASK))
97                         goto not_present;
98                 --walker->level;
99         }
100 #endif
101         table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
102         walker->table_gfn[walker->level - 1] = table_gfn;
103         pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
104                  walker->level - 1, table_gfn);
105         slot = gfn_to_memslot(vcpu->kvm, table_gfn);
106         hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
107         walker->page = pfn_to_page(hpa >> PAGE_SHIFT);
108         walker->table = kmap_atomic(walker->page, KM_USER0);
109
110         ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
111                (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
112
113         walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
114
115         for (;;) {
116                 int index = PT_INDEX(addr, walker->level);
117                 hpa_t paddr;
118
119                 ptep = &walker->table[index];
120                 walker->index = index;
121                 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
122                        ((unsigned long)ptep & PAGE_MASK));
123
124                 if (!is_present_pte(*ptep))
125                         goto not_present;
126
127                 if (write_fault && !is_writeble_pte(*ptep))
128                         if (user_fault || is_write_protection(vcpu))
129                                 goto access_error;
130
131                 if (user_fault && !(*ptep & PT_USER_MASK))
132                         goto access_error;
133
134 #if PTTYPE == 64
135                 if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
136                         goto access_error;
137 #endif
138
139                 if (!(*ptep & PT_ACCESSED_MASK)) {
140                         mark_page_dirty(vcpu->kvm, table_gfn);
141                         *ptep |= PT_ACCESSED_MASK;
142                 }
143
144                 if (walker->level == PT_PAGE_TABLE_LEVEL) {
145                         walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
146                                 >> PAGE_SHIFT;
147                         break;
148                 }
149
150                 if (walker->level == PT_DIRECTORY_LEVEL
151                     && (*ptep & PT_PAGE_SIZE_MASK)
152                     && (PTTYPE == 64 || is_pse(vcpu))) {
153                         walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
154                                 >> PAGE_SHIFT;
155                         walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
156                         break;
157                 }
158
159                 walker->inherited_ar &= walker->table[index];
160                 table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
161                 kunmap_atomic(walker->table, KM_USER0);
162                 paddr = safe_gpa_to_hpa(vcpu, table_gfn << PAGE_SHIFT);
163                 walker->page = pfn_to_page(paddr >> PAGE_SHIFT);
164                 walker->table = kmap_atomic(walker->page, KM_USER0);
165                 --walker->level;
166                 walker->table_gfn[walker->level - 1 ] = table_gfn;
167                 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
168                          walker->level - 1, table_gfn);
169         }
170         walker->pte = *ptep;
171         if (walker->page)
172                 walker->ptep = NULL;
173         if (walker->table)
174                 kunmap_atomic(walker->table, KM_USER0);
175         pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
176         return 1;
177
178 not_present:
179         walker->error_code = 0;
180         goto err;
181
182 access_error:
183         walker->error_code = PFERR_PRESENT_MASK;
184
185 err:
186         if (write_fault)
187                 walker->error_code |= PFERR_WRITE_MASK;
188         if (user_fault)
189                 walker->error_code |= PFERR_USER_MASK;
190         if (fetch_fault)
191                 walker->error_code |= PFERR_FETCH_MASK;
192         if (walker->table)
193                 kunmap_atomic(walker->table, KM_USER0);
194         return 0;
195 }
196
197 static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
198                                         struct guest_walker *walker)
199 {
200         mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
201 }
202
203 static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
204                                   u64 *shadow_pte,
205                                   gpa_t gaddr,
206                                   pt_element_t gpte,
207                                   u64 access_bits,
208                                   int user_fault,
209                                   int write_fault,
210                                   int *ptwrite,
211                                   struct guest_walker *walker,
212                                   gfn_t gfn)
213 {
214         hpa_t paddr;
215         int dirty = gpte & PT_DIRTY_MASK;
216         u64 spte;
217         int was_rmapped = is_rmap_pte(*shadow_pte);
218
219         pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
220                  " user_fault %d gfn %lx\n",
221                  __FUNCTION__, *shadow_pte, (u64)gpte, access_bits,
222                  write_fault, user_fault, gfn);
223
224         if (write_fault && !dirty) {
225                 pt_element_t *guest_ent, *tmp = NULL;
226
227                 if (walker->ptep)
228                         guest_ent = walker->ptep;
229                 else {
230                         tmp = kmap_atomic(walker->page, KM_USER0);
231                         guest_ent = &tmp[walker->index];
232                 }
233
234                 *guest_ent |= PT_DIRTY_MASK;
235                 if (!walker->ptep)
236                         kunmap_atomic(tmp, KM_USER0);
237                 dirty = 1;
238                 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
239         }
240
241         spte = PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK;
242         spte |= gpte & PT64_NX_MASK;
243         if (!dirty)
244                 access_bits &= ~PT_WRITABLE_MASK;
245
246         paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
247
248         spte |= PT_PRESENT_MASK;
249         if (access_bits & PT_USER_MASK)
250                 spte |= PT_USER_MASK;
251
252         if (is_error_hpa(paddr)) {
253                 set_shadow_pte(shadow_pte,
254                                shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
255                 return;
256         }
257
258         spte |= paddr;
259
260         if ((access_bits & PT_WRITABLE_MASK)
261             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
262                 struct kvm_mmu_page *shadow;
263
264                 spte |= PT_WRITABLE_MASK;
265                 if (user_fault) {
266                         mmu_unshadow(vcpu, gfn);
267                         goto unshadowed;
268                 }
269
270                 shadow = kvm_mmu_lookup_page(vcpu, gfn);
271                 if (shadow) {
272                         pgprintk("%s: found shadow page for %lx, marking ro\n",
273                                  __FUNCTION__, gfn);
274                         access_bits &= ~PT_WRITABLE_MASK;
275                         if (is_writeble_pte(spte)) {
276                                 spte &= ~PT_WRITABLE_MASK;
277                                 kvm_x86_ops->tlb_flush(vcpu);
278                         }
279                         if (write_fault)
280                                 *ptwrite = 1;
281                 }
282         }
283
284 unshadowed:
285
286         if (access_bits & PT_WRITABLE_MASK)
287                 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
288
289         pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
290         set_shadow_pte(shadow_pte, spte);
291         page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
292         if (!was_rmapped)
293                 rmap_add(vcpu, shadow_pte);
294 }
295
296 static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
297                            u64 *shadow_pte, u64 access_bits,
298                            int user_fault, int write_fault, int *ptwrite,
299                            struct guest_walker *walker, gfn_t gfn)
300 {
301         access_bits &= gpte;
302         FNAME(set_pte_common)(vcpu, shadow_pte, gpte & PT_BASE_ADDR_MASK,
303                               gpte, access_bits, user_fault, write_fault,
304                               ptwrite, walker, gfn);
305 }
306
307 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
308                               u64 *spte, const void *pte, int bytes,
309                               int offset_in_pte)
310 {
311         pt_element_t gpte;
312
313         gpte = *(const pt_element_t *)pte;
314         if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
315                 if (!offset_in_pte && !is_present_pte(gpte))
316                         set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
317                 return;
318         }
319         if (bytes < sizeof(pt_element_t))
320                 return;
321         pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
322         FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
323                        0, NULL, NULL,
324                        (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
325 }
326
327 static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t gpde,
328                            u64 *shadow_pte, u64 access_bits,
329                            int user_fault, int write_fault, int *ptwrite,
330                            struct guest_walker *walker, gfn_t gfn)
331 {
332         gpa_t gaddr;
333
334         access_bits &= gpde;
335         gaddr = (gpa_t)gfn << PAGE_SHIFT;
336         if (PTTYPE == 32 && is_cpuid_PSE36())
337                 gaddr |= (gpde & PT32_DIR_PSE36_MASK) <<
338                         (32 - PT32_DIR_PSE36_SHIFT);
339         FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
340                               gpde, access_bits, user_fault, write_fault,
341                               ptwrite, walker, gfn);
342 }
343
344 /*
345  * Fetch a shadow pte for a specific level in the paging hierarchy.
346  */
347 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
348                          struct guest_walker *walker,
349                          int user_fault, int write_fault, int *ptwrite)
350 {
351         hpa_t shadow_addr;
352         int level;
353         u64 *shadow_ent;
354         u64 *prev_shadow_ent = NULL;
355
356         if (!is_present_pte(walker->pte))
357                 return NULL;
358
359         shadow_addr = vcpu->mmu.root_hpa;
360         level = vcpu->mmu.shadow_root_level;
361         if (level == PT32E_ROOT_LEVEL) {
362                 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
363                 shadow_addr &= PT64_BASE_ADDR_MASK;
364                 --level;
365         }
366
367         for (; ; level--) {
368                 u32 index = SHADOW_PT_INDEX(addr, level);
369                 struct kvm_mmu_page *shadow_page;
370                 u64 shadow_pte;
371                 int metaphysical;
372                 gfn_t table_gfn;
373                 unsigned hugepage_access = 0;
374
375                 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
376                 if (is_shadow_present_pte(*shadow_ent)) {
377                         if (level == PT_PAGE_TABLE_LEVEL)
378                                 break;
379                         shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
380                         prev_shadow_ent = shadow_ent;
381                         continue;
382                 }
383
384                 if (level == PT_PAGE_TABLE_LEVEL)
385                         break;
386
387                 if (level - 1 == PT_PAGE_TABLE_LEVEL
388                     && walker->level == PT_DIRECTORY_LEVEL) {
389                         metaphysical = 1;
390                         hugepage_access = walker->pte;
391                         hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
392                         if (walker->pte & PT64_NX_MASK)
393                                 hugepage_access |= (1 << 2);
394                         hugepage_access >>= PT_WRITABLE_SHIFT;
395                         table_gfn = (walker->pte & PT_BASE_ADDR_MASK)
396                                 >> PAGE_SHIFT;
397                 } else {
398                         metaphysical = 0;
399                         table_gfn = walker->table_gfn[level - 2];
400                 }
401                 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
402                                                metaphysical, hugepage_access,
403                                                shadow_ent);
404                 shadow_addr = __pa(shadow_page->spt);
405                 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
406                         | PT_WRITABLE_MASK | PT_USER_MASK;
407                 *shadow_ent = shadow_pte;
408                 prev_shadow_ent = shadow_ent;
409         }
410
411         if (walker->level == PT_DIRECTORY_LEVEL) {
412                 FNAME(set_pde)(vcpu, walker->pte, shadow_ent,
413                                walker->inherited_ar, user_fault, write_fault,
414                                ptwrite, walker, walker->gfn);
415         } else {
416                 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
417                 FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
418                                walker->inherited_ar, user_fault, write_fault,
419                                ptwrite, walker, walker->gfn);
420         }
421         return shadow_ent;
422 }
423
424 /*
425  * Page fault handler.  There are several causes for a page fault:
426  *   - there is no shadow pte for the guest pte
427  *   - write access through a shadow pte marked read only so that we can set
428  *     the dirty bit
429  *   - write access to a shadow pte marked read only so we can update the page
430  *     dirty bitmap, when userspace requests it
431  *   - mmio access; in this case we will never install a present shadow pte
432  *   - normal guest page fault due to the guest pte marked not present, not
433  *     writable, or not executable
434  *
435  *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
436  *           a negative value on error.
437  */
438 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
439                                u32 error_code)
440 {
441         int write_fault = error_code & PFERR_WRITE_MASK;
442         int user_fault = error_code & PFERR_USER_MASK;
443         int fetch_fault = error_code & PFERR_FETCH_MASK;
444         struct guest_walker walker;
445         u64 *shadow_pte;
446         int write_pt = 0;
447         int r;
448
449         pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
450         kvm_mmu_audit(vcpu, "pre page fault");
451
452         r = mmu_topup_memory_caches(vcpu);
453         if (r)
454                 return r;
455
456         /*
457          * Look up the shadow pte for the faulting address.
458          */
459         r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
460                              fetch_fault);
461
462         /*
463          * The page is not mapped by the guest.  Let the guest handle it.
464          */
465         if (!r) {
466                 pgprintk("%s: guest page fault\n", __FUNCTION__);
467                 inject_page_fault(vcpu, addr, walker.error_code);
468                 vcpu->last_pt_write_count = 0; /* reset fork detector */
469                 return 0;
470         }
471
472         shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
473                                   &write_pt);
474         pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
475                  shadow_pte, *shadow_pte, write_pt);
476
477         if (!write_pt)
478                 vcpu->last_pt_write_count = 0; /* reset fork detector */
479
480         /*
481          * mmio: emulate if accessible, otherwise its a guest fault.
482          */
483         if (is_io_pte(*shadow_pte))
484                 return 1;
485
486         ++vcpu->stat.pf_fixed;
487         kvm_mmu_audit(vcpu, "post page fault (fixed)");
488
489         return write_pt;
490 }
491
492 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
493 {
494         struct guest_walker walker;
495         gpa_t gpa = UNMAPPED_GVA;
496         int r;
497
498         r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
499
500         if (r) {
501                 gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
502                 gpa |= vaddr & ~PAGE_MASK;
503         }
504
505         return gpa;
506 }
507
508 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
509                                  struct kvm_mmu_page *sp)
510 {
511         int i;
512         pt_element_t *gpt;
513
514         if (sp->role.metaphysical || PTTYPE == 32) {
515                 nonpaging_prefetch_page(vcpu, sp);
516                 return;
517         }
518
519         gpt = kmap_atomic(gfn_to_page(vcpu->kvm, sp->gfn), KM_USER0);
520         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
521                 if (is_present_pte(gpt[i]))
522                         sp->spt[i] = shadow_trap_nonpresent_pte;
523                 else
524                         sp->spt[i] = shadow_notrap_nonpresent_pte;
525         kunmap_atomic(gpt, KM_USER0);
526 }
527
528 #undef pt_element_t
529 #undef guest_walker
530 #undef FNAME
531 #undef PT_BASE_ADDR_MASK
532 #undef PT_INDEX
533 #undef SHADOW_PT_INDEX
534 #undef PT_LEVEL_MASK
535 #undef PT_DIR_BASE_ADDR_MASK
536 #undef PT_LEVEL_BITS
537 #undef PT_MAX_FULL_LEVELS