5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/list.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
16 #include <linux/preempt.h>
17 #include <asm/signal.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_para.h>
22 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
23 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
24 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
26 #define KVM_GUEST_CR0_MASK \
27 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
28 | X86_CR0_NW | X86_CR0_CD)
29 #define KVM_VM_CR0_ALWAYS_ON \
30 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
32 #define KVM_GUEST_CR4_MASK \
33 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
34 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
35 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
37 #define INVALID_PAGE (~(hpa_t)0)
38 #define UNMAPPED_GVA (~(gpa_t)0)
40 #define KVM_MAX_VCPUS 4
41 #define KVM_ALIAS_SLOTS 4
42 #define KVM_MEMORY_SLOTS 4
43 #define KVM_NUM_MMU_PAGES 1024
44 #define KVM_MIN_FREE_MMU_PAGES 5
45 #define KVM_REFILL_PAGES 25
46 #define KVM_MAX_CPUID_ENTRIES 40
48 #define FX_IMAGE_SIZE 512
49 #define FX_IMAGE_ALIGN 16
50 #define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
61 #define SELECTOR_TI_MASK (1 << 2)
62 #define SELECTOR_RPL_MASK 0x03
66 #define KVM_PIO_PAGE_OFFSET 1
69 * vcpu->requests bit members
71 #define KVM_TLB_FLUSH 0
76 * gva - guest virtual address
77 * gpa - guest physical address
78 * gfn - guest frame number
79 * hva - host virtual address
80 * hpa - host physical address
81 * hfn - host frame number
84 typedef unsigned long gva_t;
86 typedef unsigned long gfn_t;
88 typedef unsigned long hva_t;
90 typedef unsigned long hfn_t;
92 #define NR_PTE_CHAIN_ENTRIES 5
94 struct kvm_pte_chain {
95 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
96 struct hlist_node link;
100 * kvm_mmu_page_role, below, is defined as:
102 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
103 * bits 4:7 - page table level for this shadow (1-4)
104 * bits 8:9 - page table quadrant for 2-level guests
105 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
106 * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
108 union kvm_mmu_page_role {
111 unsigned glevels : 4;
113 unsigned quadrant : 2;
114 unsigned pad_for_nice_hex_output : 6;
115 unsigned metaphysical : 1;
116 unsigned hugepage_access : 3;
120 struct kvm_mmu_page {
121 struct list_head link;
122 struct hlist_node hash_link;
125 * The following two entries are used to key the shadow page in the
129 union kvm_mmu_page_role role;
132 unsigned long slot_bitmap; /* One bit set per slot which has memory
133 * in this shadow page.
135 int multimapped; /* More than one parent_pte? */
136 int root_count; /* Currently serving as active root */
138 u64 *parent_pte; /* !multimapped */
139 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
144 extern struct kmem_cache *kvm_vcpu_cache;
147 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
148 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
152 void (*new_cr3)(struct kvm_vcpu *vcpu);
153 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
154 void (*free)(struct kvm_vcpu *vcpu);
155 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
158 int shadow_root_level;
163 #define KVM_NR_MEM_OBJS 20
165 struct kvm_mmu_memory_cache {
167 void *objects[KVM_NR_MEM_OBJS];
171 * We don't want allocation failures within the mmu code, so we preallocate
172 * enough memory for a single page fault in a cache.
174 struct kvm_guest_debug {
213 struct kvm_pio_request {
216 struct page *guest_pages[2];
217 unsigned guest_page_offset;
236 u32 irq_window_exits;
238 u32 request_irq_exits;
244 struct kvm_io_device {
245 void (*read)(struct kvm_io_device *this,
249 void (*write)(struct kvm_io_device *this,
253 int (*in_range)(struct kvm_io_device *this, gpa_t addr);
254 void (*destructor)(struct kvm_io_device *this);
259 static inline void kvm_iodevice_read(struct kvm_io_device *dev,
264 dev->read(dev, addr, len, val);
267 static inline void kvm_iodevice_write(struct kvm_io_device *dev,
272 dev->write(dev, addr, len, val);
275 static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
277 return dev->in_range(dev, addr);
280 static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
283 dev->destructor(dev);
287 * It would be nice to use something smarter than a linear search, TBD...
288 * Thankfully we dont expect many devices to register (famous last words :),
289 * so until then it will suffice. At least its abstracted so we can change
294 #define NR_IOBUS_DEVS 6
295 struct kvm_io_device *devs[NR_IOBUS_DEVS];
298 void kvm_io_bus_init(struct kvm_io_bus *bus);
299 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
300 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
301 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
302 struct kvm_io_device *dev);
306 struct preempt_notifier preempt_notifier;
312 int interrupt_window_open;
314 unsigned long requests;
315 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
316 DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
317 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
318 unsigned long rip; /* needs vcpu_load_rsp_rip() */
323 gpa_t para_state_gpa;
324 struct page *para_state_page;
328 u64 pdptrs[4]; /* pae */
331 u64 ia32_misc_enable_msr;
335 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
336 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
337 struct kvm_mmu_memory_cache mmu_page_cache;
338 struct kvm_mmu_memory_cache mmu_page_header_cache;
340 gfn_t last_pt_write_gfn;
341 int last_pt_write_count;
343 struct kvm_guest_debug guest_debug;
345 char fx_buf[FX_BUF_SIZE];
347 char *guest_fx_image;
349 int guest_fpu_loaded;
352 int mmio_read_completed;
355 unsigned char mmio_data[8];
356 gpa_t mmio_phys_addr;
357 gva_t mmio_fault_cr2;
358 struct kvm_pio_request pio;
364 struct kvm_stat stat;
369 struct kvm_save_segment {
374 } tr, es, ds, fs, gs;
376 int halt_request; /* real mode on Intel only */
379 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
382 struct kvm_mem_alias {
384 unsigned long npages;
388 struct kvm_memory_slot {
390 unsigned long npages;
392 struct page **phys_mem;
393 unsigned long *dirty_bitmap;
397 struct mutex lock; /* protects everything except vcpus */
399 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
401 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
403 * Hash table of struct kvm_mmu_page.
405 struct list_head active_mmu_pages;
406 int n_free_mmu_pages;
407 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
408 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
409 int memory_config_version;
411 unsigned long rmap_overflow;
412 struct list_head vm_list;
414 struct kvm_io_bus mmio_bus;
415 struct kvm_io_bus pio_bus;
418 struct descriptor_table {
421 } __attribute__((packed));
423 struct kvm_arch_ops {
424 int (*cpu_has_kvm_support)(void); /* __init */
425 int (*disabled_by_bios)(void); /* __init */
426 void (*hardware_enable)(void *dummy); /* __init */
427 void (*hardware_disable)(void *dummy);
428 int (*hardware_setup)(void); /* __init */
429 void (*hardware_unsetup)(void); /* __exit */
431 /* Create, but do not attach this VCPU */
432 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
433 void (*vcpu_free)(struct kvm_vcpu *vcpu);
435 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
436 void (*vcpu_put)(struct kvm_vcpu *vcpu);
437 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
439 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
440 struct kvm_debug_guest *dbg);
441 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
442 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
443 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
444 void (*get_segment)(struct kvm_vcpu *vcpu,
445 struct kvm_segment *var, int seg);
446 void (*set_segment)(struct kvm_vcpu *vcpu,
447 struct kvm_segment *var, int seg);
448 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
449 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
450 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
451 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
452 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
453 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
454 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
455 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
456 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
457 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
458 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
459 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
461 void (*cache_regs)(struct kvm_vcpu *vcpu);
462 void (*decache_regs)(struct kvm_vcpu *vcpu);
463 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
464 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
466 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr);
467 void (*tlb_flush)(struct kvm_vcpu *vcpu);
468 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
469 unsigned long addr, u32 err_code);
471 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
473 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
474 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
475 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
476 unsigned char *hypercall_addr);
479 extern struct kvm_arch_ops *kvm_arch_ops;
481 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
482 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
484 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
485 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
487 int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
488 struct module *module);
489 void kvm_exit_arch(void);
491 int kvm_mmu_module_init(void);
492 void kvm_mmu_module_exit(void);
494 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
495 int kvm_mmu_create(struct kvm_vcpu *vcpu);
496 int kvm_mmu_setup(struct kvm_vcpu *vcpu);
498 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
499 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
500 void kvm_mmu_zap_all(struct kvm *kvm);
502 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
503 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
504 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
505 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
506 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
507 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
509 void kvm_emulator_want_group7_invlpg(void);
511 extern hpa_t bad_page_address;
513 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
514 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
515 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
517 enum emulation_result {
518 EMULATE_DONE, /* no further processing */
519 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
520 EMULATE_FAIL, /* can't emulate this instruction */
523 int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
524 unsigned long cr2, u16 error_code);
525 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
526 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
527 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
528 unsigned long *rflags);
530 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
531 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
532 unsigned long *rflags);
533 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
534 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
536 struct x86_emulate_ctxt;
538 int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
539 int size, unsigned long count, int string, int down,
540 gva_t address, int rep, unsigned port);
541 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
542 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
543 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
544 int emulate_clts(struct kvm_vcpu *vcpu);
545 int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
546 unsigned long *dest);
547 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
548 unsigned long value);
550 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
551 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
552 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
553 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
554 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
556 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
557 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
559 void fx_init(struct kvm_vcpu *vcpu);
561 void kvm_resched(struct kvm_vcpu *vcpu);
562 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
563 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
564 void kvm_flush_remote_tlbs(struct kvm *kvm);
566 int emulator_read_std(unsigned long addr,
569 struct kvm_vcpu *vcpu);
570 int emulator_write_emulated(unsigned long addr,
573 struct kvm_vcpu *vcpu);
575 unsigned long segment_base(u16 selector);
577 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
578 const u8 *new, int bytes);
579 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
580 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
581 int kvm_mmu_load(struct kvm_vcpu *vcpu);
582 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
584 int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
586 static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
589 return vcpu->mmu.page_fault(vcpu, gva, error_code);
592 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
594 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
595 __kvm_mmu_free_some_pages(vcpu);
598 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
600 if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
603 return kvm_mmu_load(vcpu);
606 static inline int is_long_mode(struct kvm_vcpu *vcpu)
609 return vcpu->shadow_efer & EFER_LME;
615 static inline int is_pae(struct kvm_vcpu *vcpu)
617 return vcpu->cr4 & X86_CR4_PAE;
620 static inline int is_pse(struct kvm_vcpu *vcpu)
622 return vcpu->cr4 & X86_CR4_PSE;
625 static inline int is_paging(struct kvm_vcpu *vcpu)
627 return vcpu->cr0 & X86_CR0_PG;
630 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
632 return slot - kvm->memslots;
635 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
637 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
639 return (struct kvm_mmu_page *)page_private(page);
642 static inline u16 read_fs(void)
645 asm ("mov %%fs, %0" : "=g"(seg));
649 static inline u16 read_gs(void)
652 asm ("mov %%gs, %0" : "=g"(seg));
656 static inline u16 read_ldt(void)
659 asm ("sldt %0" : "=g"(ldt));
663 static inline void load_fs(u16 sel)
665 asm ("mov %0, %%fs" : : "rm"(sel));
668 static inline void load_gs(u16 sel)
670 asm ("mov %0, %%gs" : : "rm"(sel));
674 static inline void load_ldt(u16 sel)
676 asm ("lldt %0" : : "rm"(sel));
680 static inline void get_idt(struct descriptor_table *table)
682 asm ("sidt %0" : "=m"(*table));
685 static inline void get_gdt(struct descriptor_table *table)
687 asm ("sgdt %0" : "=m"(*table));
690 static inline unsigned long read_tr_base(void)
693 asm ("str %0" : "=g"(tr));
694 return segment_base(tr);
698 static inline unsigned long read_msr(unsigned long msr)
707 static inline void fx_save(void *image)
709 asm ("fxsave (%0)":: "r" (image));
712 static inline void fx_restore(void *image)
714 asm ("fxrstor (%0)":: "r" (image));
717 static inline void fpu_init(void)
722 static inline u32 get_rdx_init_val(void)
724 return 0x600; /* P6 family */
727 #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
728 #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
729 #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
730 #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
731 #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
732 #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
733 #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
734 #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
735 #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
737 #define MSR_IA32_TIME_STAMP_COUNTER 0x010
739 #define TSS_IOPB_BASE_OFFSET 0x66
740 #define TSS_BASE_SIZE 0x68
741 #define TSS_IOPB_SIZE (65536 / 8)
742 #define TSS_REDIRECTION_SIZE (256 / 8)
743 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)