KVM: Remove set_cr0_no_modeswitch() arch op
[safe/jmp/linux-2.6] / drivers / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "kvm.h"
19 #include "vmx.h"
20 #include "kvm_vmx.h"
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/highmem.h>
25 #include <linux/profile.h>
26 #include <asm/io.h>
27 #include <asm/desc.h>
28
29 #include "segment_descriptor.h"
30
31 MODULE_AUTHOR("Qumranet");
32 MODULE_LICENSE("GPL");
33
34 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
35 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
36
37 #ifdef CONFIG_X86_64
38 #define HOST_IS_64 1
39 #else
40 #define HOST_IS_64 0
41 #endif
42
43 static struct vmcs_descriptor {
44         int size;
45         int order;
46         u32 revision_id;
47 } vmcs_descriptor;
48
49 #define VMX_SEGMENT_FIELD(seg)                                  \
50         [VCPU_SREG_##seg] = {                                   \
51                 .selector = GUEST_##seg##_SELECTOR,             \
52                 .base = GUEST_##seg##_BASE,                     \
53                 .limit = GUEST_##seg##_LIMIT,                   \
54                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
55         }
56
57 static struct kvm_vmx_segment_field {
58         unsigned selector;
59         unsigned base;
60         unsigned limit;
61         unsigned ar_bytes;
62 } kvm_vmx_segment_fields[] = {
63         VMX_SEGMENT_FIELD(CS),
64         VMX_SEGMENT_FIELD(DS),
65         VMX_SEGMENT_FIELD(ES),
66         VMX_SEGMENT_FIELD(FS),
67         VMX_SEGMENT_FIELD(GS),
68         VMX_SEGMENT_FIELD(SS),
69         VMX_SEGMENT_FIELD(TR),
70         VMX_SEGMENT_FIELD(LDTR),
71 };
72
73 static const u32 vmx_msr_index[] = {
74 #ifdef CONFIG_X86_64
75         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
76 #endif
77         MSR_EFER, MSR_K6_STAR,
78 };
79 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
80
81 static inline int is_page_fault(u32 intr_info)
82 {
83         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
84                              INTR_INFO_VALID_MASK)) ==
85                 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
86 }
87
88 static inline int is_external_interrupt(u32 intr_info)
89 {
90         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
91                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
92 }
93
94 static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
95 {
96         int i;
97
98         for (i = 0; i < vcpu->nmsrs; ++i)
99                 if (vcpu->guest_msrs[i].index == msr)
100                         return &vcpu->guest_msrs[i];
101         return NULL;
102 }
103
104 static void vmcs_clear(struct vmcs *vmcs)
105 {
106         u64 phys_addr = __pa(vmcs);
107         u8 error;
108
109         asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
110                       : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
111                       : "cc", "memory");
112         if (error)
113                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
114                        vmcs, phys_addr);
115 }
116
117 static void __vcpu_clear(void *arg)
118 {
119         struct kvm_vcpu *vcpu = arg;
120         int cpu = raw_smp_processor_id();
121
122         if (vcpu->cpu == cpu)
123                 vmcs_clear(vcpu->vmcs);
124         if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
125                 per_cpu(current_vmcs, cpu) = NULL;
126 }
127
128 static void vcpu_clear(struct kvm_vcpu *vcpu)
129 {
130         if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1)
131                 smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
132         else
133                 __vcpu_clear(vcpu);
134         vcpu->launched = 0;
135 }
136
137 static unsigned long vmcs_readl(unsigned long field)
138 {
139         unsigned long value;
140
141         asm volatile (ASM_VMX_VMREAD_RDX_RAX
142                       : "=a"(value) : "d"(field) : "cc");
143         return value;
144 }
145
146 static u16 vmcs_read16(unsigned long field)
147 {
148         return vmcs_readl(field);
149 }
150
151 static u32 vmcs_read32(unsigned long field)
152 {
153         return vmcs_readl(field);
154 }
155
156 static u64 vmcs_read64(unsigned long field)
157 {
158 #ifdef CONFIG_X86_64
159         return vmcs_readl(field);
160 #else
161         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
162 #endif
163 }
164
165 static noinline void vmwrite_error(unsigned long field, unsigned long value)
166 {
167         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
168                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
169         dump_stack();
170 }
171
172 static void vmcs_writel(unsigned long field, unsigned long value)
173 {
174         u8 error;
175
176         asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
177                        : "=q"(error) : "a"(value), "d"(field) : "cc" );
178         if (unlikely(error))
179                 vmwrite_error(field, value);
180 }
181
182 static void vmcs_write16(unsigned long field, u16 value)
183 {
184         vmcs_writel(field, value);
185 }
186
187 static void vmcs_write32(unsigned long field, u32 value)
188 {
189         vmcs_writel(field, value);
190 }
191
192 static void vmcs_write64(unsigned long field, u64 value)
193 {
194 #ifdef CONFIG_X86_64
195         vmcs_writel(field, value);
196 #else
197         vmcs_writel(field, value);
198         asm volatile ("");
199         vmcs_writel(field+1, value >> 32);
200 #endif
201 }
202
203 /*
204  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
205  * vcpu mutex is already taken.
206  */
207 static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
208 {
209         u64 phys_addr = __pa(vcpu->vmcs);
210         int cpu;
211
212         cpu = get_cpu();
213
214         if (vcpu->cpu != cpu)
215                 vcpu_clear(vcpu);
216
217         if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) {
218                 u8 error;
219
220                 per_cpu(current_vmcs, cpu) = vcpu->vmcs;
221                 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
222                               : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
223                               : "cc");
224                 if (error)
225                         printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
226                                vcpu->vmcs, phys_addr);
227         }
228
229         if (vcpu->cpu != cpu) {
230                 struct descriptor_table dt;
231                 unsigned long sysenter_esp;
232
233                 vcpu->cpu = cpu;
234                 /*
235                  * Linux uses per-cpu TSS and GDT, so set these when switching
236                  * processors.
237                  */
238                 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
239                 get_gdt(&dt);
240                 vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
241
242                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
243                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
244         }
245 }
246
247 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
248 {
249         put_cpu();
250 }
251
252 static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
253 {
254         vcpu_clear(vcpu);
255 }
256
257 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
258 {
259         return vmcs_readl(GUEST_RFLAGS);
260 }
261
262 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
263 {
264         vmcs_writel(GUEST_RFLAGS, rflags);
265 }
266
267 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
268 {
269         unsigned long rip;
270         u32 interruptibility;
271
272         rip = vmcs_readl(GUEST_RIP);
273         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
274         vmcs_writel(GUEST_RIP, rip);
275
276         /*
277          * We emulated an instruction, so temporary interrupt blocking
278          * should be removed, if set.
279          */
280         interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
281         if (interruptibility & 3)
282                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
283                              interruptibility & ~3);
284         vcpu->interrupt_window_open = 1;
285 }
286
287 static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
288 {
289         printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
290                vmcs_readl(GUEST_RIP));
291         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
292         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
293                      GP_VECTOR |
294                      INTR_TYPE_EXCEPTION |
295                      INTR_INFO_DELIEVER_CODE_MASK |
296                      INTR_INFO_VALID_MASK);
297 }
298
299 /*
300  * reads and returns guest's timestamp counter "register"
301  * guest_tsc = host_tsc + tsc_offset    -- 21.3
302  */
303 static u64 guest_read_tsc(void)
304 {
305         u64 host_tsc, tsc_offset;
306
307         rdtscll(host_tsc);
308         tsc_offset = vmcs_read64(TSC_OFFSET);
309         return host_tsc + tsc_offset;
310 }
311
312 /*
313  * writes 'guest_tsc' into guest's timestamp counter "register"
314  * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
315  */
316 static void guest_write_tsc(u64 guest_tsc)
317 {
318         u64 host_tsc;
319
320         rdtscll(host_tsc);
321         vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
322 }
323
324 static void reload_tss(void)
325 {
326 #ifndef CONFIG_X86_64
327
328         /*
329          * VT restores TR but not its size.  Useless.
330          */
331         struct descriptor_table gdt;
332         struct segment_descriptor *descs;
333
334         get_gdt(&gdt);
335         descs = (void *)gdt.base;
336         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
337         load_TR_desc();
338 #endif
339 }
340
341 /*
342  * Reads an msr value (of 'msr_index') into 'pdata'.
343  * Returns 0 on success, non-0 otherwise.
344  * Assumes vcpu_load() was already called.
345  */
346 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
347 {
348         u64 data;
349         struct vmx_msr_entry *msr;
350
351         if (!pdata) {
352                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
353                 return -EINVAL;
354         }
355
356         switch (msr_index) {
357 #ifdef CONFIG_X86_64
358         case MSR_FS_BASE:
359                 data = vmcs_readl(GUEST_FS_BASE);
360                 break;
361         case MSR_GS_BASE:
362                 data = vmcs_readl(GUEST_GS_BASE);
363                 break;
364         case MSR_EFER:
365                 return kvm_get_msr_common(vcpu, msr_index, pdata);
366 #endif
367         case MSR_IA32_TIME_STAMP_COUNTER:
368                 data = guest_read_tsc();
369                 break;
370         case MSR_IA32_SYSENTER_CS:
371                 data = vmcs_read32(GUEST_SYSENTER_CS);
372                 break;
373         case MSR_IA32_SYSENTER_EIP:
374                 data = vmcs_readl(GUEST_SYSENTER_EIP);
375                 break;
376         case MSR_IA32_SYSENTER_ESP:
377                 data = vmcs_readl(GUEST_SYSENTER_ESP);
378                 break;
379         default:
380                 msr = find_msr_entry(vcpu, msr_index);
381                 if (msr) {
382                         data = msr->data;
383                         break;
384                 }
385                 return kvm_get_msr_common(vcpu, msr_index, pdata);
386         }
387
388         *pdata = data;
389         return 0;
390 }
391
392 /*
393  * Writes msr value into into the appropriate "register".
394  * Returns 0 on success, non-0 otherwise.
395  * Assumes vcpu_load() was already called.
396  */
397 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
398 {
399         struct vmx_msr_entry *msr;
400         switch (msr_index) {
401 #ifdef CONFIG_X86_64
402         case MSR_EFER:
403                 return kvm_set_msr_common(vcpu, msr_index, data);
404         case MSR_FS_BASE:
405                 vmcs_writel(GUEST_FS_BASE, data);
406                 break;
407         case MSR_GS_BASE:
408                 vmcs_writel(GUEST_GS_BASE, data);
409                 break;
410 #endif
411         case MSR_IA32_SYSENTER_CS:
412                 vmcs_write32(GUEST_SYSENTER_CS, data);
413                 break;
414         case MSR_IA32_SYSENTER_EIP:
415                 vmcs_writel(GUEST_SYSENTER_EIP, data);
416                 break;
417         case MSR_IA32_SYSENTER_ESP:
418                 vmcs_writel(GUEST_SYSENTER_ESP, data);
419                 break;
420         case MSR_IA32_TIME_STAMP_COUNTER:
421                 guest_write_tsc(data);
422                 break;
423         default:
424                 msr = find_msr_entry(vcpu, msr_index);
425                 if (msr) {
426                         msr->data = data;
427                         break;
428                 }
429                 return kvm_set_msr_common(vcpu, msr_index, data);
430                 msr->data = data;
431                 break;
432         }
433
434         return 0;
435 }
436
437 /*
438  * Sync the rsp and rip registers into the vcpu structure.  This allows
439  * registers to be accessed by indexing vcpu->regs.
440  */
441 static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
442 {
443         vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
444         vcpu->rip = vmcs_readl(GUEST_RIP);
445 }
446
447 /*
448  * Syncs rsp and rip back into the vmcs.  Should be called after possible
449  * modification.
450  */
451 static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
452 {
453         vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
454         vmcs_writel(GUEST_RIP, vcpu->rip);
455 }
456
457 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
458 {
459         unsigned long dr7 = 0x400;
460         u32 exception_bitmap;
461         int old_singlestep;
462
463         exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
464         old_singlestep = vcpu->guest_debug.singlestep;
465
466         vcpu->guest_debug.enabled = dbg->enabled;
467         if (vcpu->guest_debug.enabled) {
468                 int i;
469
470                 dr7 |= 0x200;  /* exact */
471                 for (i = 0; i < 4; ++i) {
472                         if (!dbg->breakpoints[i].enabled)
473                                 continue;
474                         vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
475                         dr7 |= 2 << (i*2);    /* global enable */
476                         dr7 |= 0 << (i*4+16); /* execution breakpoint */
477                 }
478
479                 exception_bitmap |= (1u << 1);  /* Trap debug exceptions */
480
481                 vcpu->guest_debug.singlestep = dbg->singlestep;
482         } else {
483                 exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
484                 vcpu->guest_debug.singlestep = 0;
485         }
486
487         if (old_singlestep && !vcpu->guest_debug.singlestep) {
488                 unsigned long flags;
489
490                 flags = vmcs_readl(GUEST_RFLAGS);
491                 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
492                 vmcs_writel(GUEST_RFLAGS, flags);
493         }
494
495         vmcs_write32(EXCEPTION_BITMAP, exception_bitmap);
496         vmcs_writel(GUEST_DR7, dr7);
497
498         return 0;
499 }
500
501 static __init int cpu_has_kvm_support(void)
502 {
503         unsigned long ecx = cpuid_ecx(1);
504         return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
505 }
506
507 static __init int vmx_disabled_by_bios(void)
508 {
509         u64 msr;
510
511         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
512         return (msr & 5) == 1; /* locked but not enabled */
513 }
514
515 static void hardware_enable(void *garbage)
516 {
517         int cpu = raw_smp_processor_id();
518         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
519         u64 old;
520
521         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
522         if ((old & 5) != 5)
523                 /* enable and lock */
524                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5);
525         write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */
526         asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
527                       : "memory", "cc");
528 }
529
530 static void hardware_disable(void *garbage)
531 {
532         asm volatile (ASM_VMX_VMXOFF : : : "cc");
533 }
534
535 static __init void setup_vmcs_descriptor(void)
536 {
537         u32 vmx_msr_low, vmx_msr_high;
538
539         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
540         vmcs_descriptor.size = vmx_msr_high & 0x1fff;
541         vmcs_descriptor.order = get_order(vmcs_descriptor.size);
542         vmcs_descriptor.revision_id = vmx_msr_low;
543 }
544
545 static struct vmcs *alloc_vmcs_cpu(int cpu)
546 {
547         int node = cpu_to_node(cpu);
548         struct page *pages;
549         struct vmcs *vmcs;
550
551         pages = alloc_pages_node(node, GFP_KERNEL, vmcs_descriptor.order);
552         if (!pages)
553                 return NULL;
554         vmcs = page_address(pages);
555         memset(vmcs, 0, vmcs_descriptor.size);
556         vmcs->revision_id = vmcs_descriptor.revision_id; /* vmcs revision id */
557         return vmcs;
558 }
559
560 static struct vmcs *alloc_vmcs(void)
561 {
562         return alloc_vmcs_cpu(raw_smp_processor_id());
563 }
564
565 static void free_vmcs(struct vmcs *vmcs)
566 {
567         free_pages((unsigned long)vmcs, vmcs_descriptor.order);
568 }
569
570 static __exit void free_kvm_area(void)
571 {
572         int cpu;
573
574         for_each_online_cpu(cpu)
575                 free_vmcs(per_cpu(vmxarea, cpu));
576 }
577
578 extern struct vmcs *alloc_vmcs_cpu(int cpu);
579
580 static __init int alloc_kvm_area(void)
581 {
582         int cpu;
583
584         for_each_online_cpu(cpu) {
585                 struct vmcs *vmcs;
586
587                 vmcs = alloc_vmcs_cpu(cpu);
588                 if (!vmcs) {
589                         free_kvm_area();
590                         return -ENOMEM;
591                 }
592
593                 per_cpu(vmxarea, cpu) = vmcs;
594         }
595         return 0;
596 }
597
598 static __init int hardware_setup(void)
599 {
600         setup_vmcs_descriptor();
601         return alloc_kvm_area();
602 }
603
604 static __exit void hardware_unsetup(void)
605 {
606         free_kvm_area();
607 }
608
609 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
610 {
611         if (vcpu->rmode.active)
612                 vmcs_write32(EXCEPTION_BITMAP, ~0);
613         else
614                 vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
615 }
616
617 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
618 {
619         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
620
621         if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
622                 vmcs_write16(sf->selector, save->selector);
623                 vmcs_writel(sf->base, save->base);
624                 vmcs_write32(sf->limit, save->limit);
625                 vmcs_write32(sf->ar_bytes, save->ar);
626         } else {
627                 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
628                         << AR_DPL_SHIFT;
629                 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
630         }
631 }
632
633 static void enter_pmode(struct kvm_vcpu *vcpu)
634 {
635         unsigned long flags;
636
637         vcpu->rmode.active = 0;
638
639         vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
640         vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
641         vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
642
643         flags = vmcs_readl(GUEST_RFLAGS);
644         flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
645         flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
646         vmcs_writel(GUEST_RFLAGS, flags);
647
648         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~CR4_VME_MASK) |
649                         (vmcs_readl(CR4_READ_SHADOW) & CR4_VME_MASK));
650
651         update_exception_bitmap(vcpu);
652
653         fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
654         fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
655         fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
656         fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
657
658         vmcs_write16(GUEST_SS_SELECTOR, 0);
659         vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
660
661         vmcs_write16(GUEST_CS_SELECTOR,
662                      vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
663         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
664 }
665
666 static int rmode_tss_base(struct kvm* kvm)
667 {
668         gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
669         return base_gfn << PAGE_SHIFT;
670 }
671
672 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
673 {
674         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
675
676         save->selector = vmcs_read16(sf->selector);
677         save->base = vmcs_readl(sf->base);
678         save->limit = vmcs_read32(sf->limit);
679         save->ar = vmcs_read32(sf->ar_bytes);
680         vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
681         vmcs_write32(sf->limit, 0xffff);
682         vmcs_write32(sf->ar_bytes, 0xf3);
683 }
684
685 static void enter_rmode(struct kvm_vcpu *vcpu)
686 {
687         unsigned long flags;
688
689         vcpu->rmode.active = 1;
690
691         vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
692         vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
693
694         vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
695         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
696
697         vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
698         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
699
700         flags = vmcs_readl(GUEST_RFLAGS);
701         vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
702
703         flags |= IOPL_MASK | X86_EFLAGS_VM;
704
705         vmcs_writel(GUEST_RFLAGS, flags);
706         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | CR4_VME_MASK);
707         update_exception_bitmap(vcpu);
708
709         vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
710         vmcs_write32(GUEST_SS_LIMIT, 0xffff);
711         vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
712
713         vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
714         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
715         if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
716                 vmcs_writel(GUEST_CS_BASE, 0xf0000);
717         vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
718
719         fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
720         fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
721         fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
722         fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
723 }
724
725 #ifdef CONFIG_X86_64
726
727 static void enter_lmode(struct kvm_vcpu *vcpu)
728 {
729         u32 guest_tr_ar;
730
731         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
732         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
733                 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
734                        __FUNCTION__);
735                 vmcs_write32(GUEST_TR_AR_BYTES,
736                              (guest_tr_ar & ~AR_TYPE_MASK)
737                              | AR_TYPE_BUSY_64_TSS);
738         }
739
740         vcpu->shadow_efer |= EFER_LMA;
741
742         find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
743         vmcs_write32(VM_ENTRY_CONTROLS,
744                      vmcs_read32(VM_ENTRY_CONTROLS)
745                      | VM_ENTRY_CONTROLS_IA32E_MASK);
746 }
747
748 static void exit_lmode(struct kvm_vcpu *vcpu)
749 {
750         vcpu->shadow_efer &= ~EFER_LMA;
751
752         vmcs_write32(VM_ENTRY_CONTROLS,
753                      vmcs_read32(VM_ENTRY_CONTROLS)
754                      & ~VM_ENTRY_CONTROLS_IA32E_MASK);
755 }
756
757 #endif
758
759 static void vmx_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu)
760 {
761         vcpu->cr0 &= KVM_GUEST_CR0_MASK;
762         vcpu->cr0 |= vmcs_readl(GUEST_CR0) & ~KVM_GUEST_CR0_MASK;
763
764         vcpu->cr4 &= KVM_GUEST_CR4_MASK;
765         vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
766 }
767
768 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
769 {
770         if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
771                 enter_pmode(vcpu);
772
773         if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
774                 enter_rmode(vcpu);
775
776 #ifdef CONFIG_X86_64
777         if (vcpu->shadow_efer & EFER_LME) {
778                 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK))
779                         enter_lmode(vcpu);
780                 if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK))
781                         exit_lmode(vcpu);
782         }
783 #endif
784
785         vmcs_writel(CR0_READ_SHADOW, cr0);
786         vmcs_writel(GUEST_CR0,
787                     (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
788         vcpu->cr0 = cr0;
789 }
790
791 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
792 {
793         vmcs_writel(GUEST_CR3, cr3);
794 }
795
796 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
797 {
798         vmcs_writel(CR4_READ_SHADOW, cr4);
799         vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
800                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
801         vcpu->cr4 = cr4;
802 }
803
804 #ifdef CONFIG_X86_64
805
806 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
807 {
808         struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
809
810         vcpu->shadow_efer = efer;
811         if (efer & EFER_LMA) {
812                 vmcs_write32(VM_ENTRY_CONTROLS,
813                                      vmcs_read32(VM_ENTRY_CONTROLS) |
814                                      VM_ENTRY_CONTROLS_IA32E_MASK);
815                 msr->data = efer;
816
817         } else {
818                 vmcs_write32(VM_ENTRY_CONTROLS,
819                                      vmcs_read32(VM_ENTRY_CONTROLS) &
820                                      ~VM_ENTRY_CONTROLS_IA32E_MASK);
821
822                 msr->data = efer & ~EFER_LME;
823         }
824 }
825
826 #endif
827
828 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
829 {
830         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
831
832         return vmcs_readl(sf->base);
833 }
834
835 static void vmx_get_segment(struct kvm_vcpu *vcpu,
836                             struct kvm_segment *var, int seg)
837 {
838         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
839         u32 ar;
840
841         var->base = vmcs_readl(sf->base);
842         var->limit = vmcs_read32(sf->limit);
843         var->selector = vmcs_read16(sf->selector);
844         ar = vmcs_read32(sf->ar_bytes);
845         if (ar & AR_UNUSABLE_MASK)
846                 ar = 0;
847         var->type = ar & 15;
848         var->s = (ar >> 4) & 1;
849         var->dpl = (ar >> 5) & 3;
850         var->present = (ar >> 7) & 1;
851         var->avl = (ar >> 12) & 1;
852         var->l = (ar >> 13) & 1;
853         var->db = (ar >> 14) & 1;
854         var->g = (ar >> 15) & 1;
855         var->unusable = (ar >> 16) & 1;
856 }
857
858 static void vmx_set_segment(struct kvm_vcpu *vcpu,
859                             struct kvm_segment *var, int seg)
860 {
861         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
862         u32 ar;
863
864         vmcs_writel(sf->base, var->base);
865         vmcs_write32(sf->limit, var->limit);
866         vmcs_write16(sf->selector, var->selector);
867         if (var->unusable)
868                 ar = 1 << 16;
869         else {
870                 ar = var->type & 15;
871                 ar |= (var->s & 1) << 4;
872                 ar |= (var->dpl & 3) << 5;
873                 ar |= (var->present & 1) << 7;
874                 ar |= (var->avl & 1) << 12;
875                 ar |= (var->l & 1) << 13;
876                 ar |= (var->db & 1) << 14;
877                 ar |= (var->g & 1) << 15;
878         }
879         if (ar == 0) /* a 0 value means unusable */
880                 ar = AR_UNUSABLE_MASK;
881         vmcs_write32(sf->ar_bytes, ar);
882 }
883
884 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
885 {
886         u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
887
888         *db = (ar >> 14) & 1;
889         *l = (ar >> 13) & 1;
890 }
891
892 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
893 {
894         dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
895         dt->base = vmcs_readl(GUEST_IDTR_BASE);
896 }
897
898 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
899 {
900         vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
901         vmcs_writel(GUEST_IDTR_BASE, dt->base);
902 }
903
904 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
905 {
906         dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
907         dt->base = vmcs_readl(GUEST_GDTR_BASE);
908 }
909
910 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
911 {
912         vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
913         vmcs_writel(GUEST_GDTR_BASE, dt->base);
914 }
915
916 static int init_rmode_tss(struct kvm* kvm)
917 {
918         struct page *p1, *p2, *p3;
919         gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
920         char *page;
921
922         p1 = _gfn_to_page(kvm, fn++);
923         p2 = _gfn_to_page(kvm, fn++);
924         p3 = _gfn_to_page(kvm, fn);
925
926         if (!p1 || !p2 || !p3) {
927                 kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
928                 return 0;
929         }
930
931         page = kmap_atomic(p1, KM_USER0);
932         memset(page, 0, PAGE_SIZE);
933         *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
934         kunmap_atomic(page, KM_USER0);
935
936         page = kmap_atomic(p2, KM_USER0);
937         memset(page, 0, PAGE_SIZE);
938         kunmap_atomic(page, KM_USER0);
939
940         page = kmap_atomic(p3, KM_USER0);
941         memset(page, 0, PAGE_SIZE);
942         *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
943         kunmap_atomic(page, KM_USER0);
944
945         return 1;
946 }
947
948 static void vmcs_write32_fixedbits(u32 msr, u32 vmcs_field, u32 val)
949 {
950         u32 msr_high, msr_low;
951
952         rdmsr(msr, msr_low, msr_high);
953
954         val &= msr_high;
955         val |= msr_low;
956         vmcs_write32(vmcs_field, val);
957 }
958
959 static void seg_setup(int seg)
960 {
961         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
962
963         vmcs_write16(sf->selector, 0);
964         vmcs_writel(sf->base, 0);
965         vmcs_write32(sf->limit, 0xffff);
966         vmcs_write32(sf->ar_bytes, 0x93);
967 }
968
969 /*
970  * Sets up the vmcs for emulated real mode.
971  */
972 static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
973 {
974         u32 host_sysenter_cs;
975         u32 junk;
976         unsigned long a;
977         struct descriptor_table dt;
978         int i;
979         int ret = 0;
980         int nr_good_msrs;
981         extern asmlinkage void kvm_vmx_return(void);
982
983         if (!init_rmode_tss(vcpu->kvm)) {
984                 ret = -ENOMEM;
985                 goto out;
986         }
987
988         memset(vcpu->regs, 0, sizeof(vcpu->regs));
989         vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
990         vcpu->cr8 = 0;
991         vcpu->apic_base = 0xfee00000 |
992                         /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
993                         MSR_IA32_APICBASE_ENABLE;
994
995         fx_init(vcpu);
996
997         /*
998          * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
999          * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
1000          */
1001         vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1002         vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1003         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1004         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1005
1006         seg_setup(VCPU_SREG_DS);
1007         seg_setup(VCPU_SREG_ES);
1008         seg_setup(VCPU_SREG_FS);
1009         seg_setup(VCPU_SREG_GS);
1010         seg_setup(VCPU_SREG_SS);
1011
1012         vmcs_write16(GUEST_TR_SELECTOR, 0);
1013         vmcs_writel(GUEST_TR_BASE, 0);
1014         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1015         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1016
1017         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1018         vmcs_writel(GUEST_LDTR_BASE, 0);
1019         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1020         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1021
1022         vmcs_write32(GUEST_SYSENTER_CS, 0);
1023         vmcs_writel(GUEST_SYSENTER_ESP, 0);
1024         vmcs_writel(GUEST_SYSENTER_EIP, 0);
1025
1026         vmcs_writel(GUEST_RFLAGS, 0x02);
1027         vmcs_writel(GUEST_RIP, 0xfff0);
1028         vmcs_writel(GUEST_RSP, 0);
1029
1030         //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1031         vmcs_writel(GUEST_DR7, 0x400);
1032
1033         vmcs_writel(GUEST_GDTR_BASE, 0);
1034         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1035
1036         vmcs_writel(GUEST_IDTR_BASE, 0);
1037         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1038
1039         vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1040         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1041         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1042
1043         /* I/O */
1044         vmcs_write64(IO_BITMAP_A, 0);
1045         vmcs_write64(IO_BITMAP_B, 0);
1046
1047         guest_write_tsc(0);
1048
1049         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1050
1051         /* Special registers */
1052         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1053
1054         /* Control */
1055         vmcs_write32_fixedbits(MSR_IA32_VMX_PINBASED_CTLS,
1056                                PIN_BASED_VM_EXEC_CONTROL,
1057                                PIN_BASED_EXT_INTR_MASK   /* 20.6.1 */
1058                                | PIN_BASED_NMI_EXITING   /* 20.6.1 */
1059                         );
1060         vmcs_write32_fixedbits(MSR_IA32_VMX_PROCBASED_CTLS,
1061                                CPU_BASED_VM_EXEC_CONTROL,
1062                                CPU_BASED_HLT_EXITING         /* 20.6.2 */
1063                                | CPU_BASED_CR8_LOAD_EXITING    /* 20.6.2 */
1064                                | CPU_BASED_CR8_STORE_EXITING   /* 20.6.2 */
1065                                | CPU_BASED_UNCOND_IO_EXITING   /* 20.6.2 */
1066                                | CPU_BASED_MOV_DR_EXITING
1067                                | CPU_BASED_USE_TSC_OFFSETING   /* 21.3 */
1068                         );
1069
1070         vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
1071         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1072         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1073         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
1074
1075         vmcs_writel(HOST_CR0, read_cr0());  /* 22.2.3 */
1076         vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
1077         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
1078
1079         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
1080         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1081         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1082         vmcs_write16(HOST_FS_SELECTOR, read_fs());    /* 22.2.4 */
1083         vmcs_write16(HOST_GS_SELECTOR, read_gs());    /* 22.2.4 */
1084         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1085 #ifdef CONFIG_X86_64
1086         rdmsrl(MSR_FS_BASE, a);
1087         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1088         rdmsrl(MSR_GS_BASE, a);
1089         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1090 #else
1091         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1092         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1093 #endif
1094
1095         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
1096
1097         get_idt(&dt);
1098         vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
1099
1100
1101         vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */
1102
1103         rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1104         vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1105         rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1106         vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
1107         rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1108         vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
1109
1110         for (i = 0; i < NR_VMX_MSR; ++i) {
1111                 u32 index = vmx_msr_index[i];
1112                 u32 data_low, data_high;
1113                 u64 data;
1114                 int j = vcpu->nmsrs;
1115
1116                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1117                         continue;
1118                 if (wrmsr_safe(index, data_low, data_high) < 0)
1119                         continue;
1120                 data = data_low | ((u64)data_high << 32);
1121                 vcpu->host_msrs[j].index = index;
1122                 vcpu->host_msrs[j].reserved = 0;
1123                 vcpu->host_msrs[j].data = data;
1124                 vcpu->guest_msrs[j] = vcpu->host_msrs[j];
1125                 ++vcpu->nmsrs;
1126         }
1127         printk(KERN_DEBUG "kvm: msrs: %d\n", vcpu->nmsrs);
1128
1129         nr_good_msrs = vcpu->nmsrs - NR_BAD_MSRS;
1130         vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
1131                     virt_to_phys(vcpu->guest_msrs + NR_BAD_MSRS));
1132         vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
1133                     virt_to_phys(vcpu->guest_msrs + NR_BAD_MSRS));
1134         vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
1135                     virt_to_phys(vcpu->host_msrs + NR_BAD_MSRS));
1136         vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS,
1137                                (HOST_IS_64 << 9));  /* 22.2,1, 20.7.1 */
1138         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
1139         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs);  /* 22.2.2 */
1140         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
1141
1142
1143         /* 22.2.1, 20.8.1 */
1144         vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS,
1145                                VM_ENTRY_CONTROLS, 0);
1146         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
1147
1148 #ifdef CONFIG_X86_64
1149         vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
1150         vmcs_writel(TPR_THRESHOLD, 0);
1151 #endif
1152
1153         vmcs_writel(CR0_GUEST_HOST_MASK, KVM_GUEST_CR0_MASK);
1154         vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1155
1156         vcpu->cr0 = 0x60000010;
1157         vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
1158         vmx_set_cr4(vcpu, 0);
1159 #ifdef CONFIG_X86_64
1160         vmx_set_efer(vcpu, 0);
1161 #endif
1162
1163         return 0;
1164
1165 out:
1166         return ret;
1167 }
1168
1169 static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1170 {
1171         u16 ent[2];
1172         u16 cs;
1173         u16 ip;
1174         unsigned long flags;
1175         unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
1176         u16 sp =  vmcs_readl(GUEST_RSP);
1177         u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1178
1179         if (sp > ss_limit || sp - 6 > sp) {
1180                 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1181                             __FUNCTION__,
1182                             vmcs_readl(GUEST_RSP),
1183                             vmcs_readl(GUEST_SS_BASE),
1184                             vmcs_read32(GUEST_SS_LIMIT));
1185                 return;
1186         }
1187
1188         if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
1189                                                                 sizeof(ent)) {
1190                 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1191                 return;
1192         }
1193
1194         flags =  vmcs_readl(GUEST_RFLAGS);
1195         cs =  vmcs_readl(GUEST_CS_BASE) >> 4;
1196         ip =  vmcs_readl(GUEST_RIP);
1197
1198
1199         if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
1200             kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
1201             kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
1202                 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1203                 return;
1204         }
1205
1206         vmcs_writel(GUEST_RFLAGS, flags &
1207                     ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1208         vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1209         vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1210         vmcs_writel(GUEST_RIP, ent[0]);
1211         vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1212 }
1213
1214 static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1215 {
1216         int word_index = __ffs(vcpu->irq_summary);
1217         int bit_index = __ffs(vcpu->irq_pending[word_index]);
1218         int irq = word_index * BITS_PER_LONG + bit_index;
1219
1220         clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1221         if (!vcpu->irq_pending[word_index])
1222                 clear_bit(word_index, &vcpu->irq_summary);
1223
1224         if (vcpu->rmode.active) {
1225                 inject_rmode_irq(vcpu, irq);
1226                 return;
1227         }
1228         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1229                         irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1230 }
1231
1232
1233 static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1234                                        struct kvm_run *kvm_run)
1235 {
1236         u32 cpu_based_vm_exec_control;
1237
1238         vcpu->interrupt_window_open =
1239                 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1240                  (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1241
1242         if (vcpu->interrupt_window_open &&
1243             vcpu->irq_summary &&
1244             !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1245                 /*
1246                  * If interrupts enabled, and not blocked by sti or mov ss. Good.
1247                  */
1248                 kvm_do_inject_irq(vcpu);
1249
1250         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1251         if (!vcpu->interrupt_window_open &&
1252             (vcpu->irq_summary || kvm_run->request_interrupt_window))
1253                 /*
1254                  * Interrupts blocked.  Wait for unblock.
1255                  */
1256                 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1257         else
1258                 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1259         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
1260 }
1261
1262 static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1263 {
1264         struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1265
1266         set_debugreg(dbg->bp[0], 0);
1267         set_debugreg(dbg->bp[1], 1);
1268         set_debugreg(dbg->bp[2], 2);
1269         set_debugreg(dbg->bp[3], 3);
1270
1271         if (dbg->singlestep) {
1272                 unsigned long flags;
1273
1274                 flags = vmcs_readl(GUEST_RFLAGS);
1275                 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1276                 vmcs_writel(GUEST_RFLAGS, flags);
1277         }
1278 }
1279
1280 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1281                                   int vec, u32 err_code)
1282 {
1283         if (!vcpu->rmode.active)
1284                 return 0;
1285
1286         if (vec == GP_VECTOR && err_code == 0)
1287                 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1288                         return 1;
1289         return 0;
1290 }
1291
1292 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1293 {
1294         u32 intr_info, error_code;
1295         unsigned long cr2, rip;
1296         u32 vect_info;
1297         enum emulation_result er;
1298         int r;
1299
1300         vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1301         intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1302
1303         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1304                                                 !is_page_fault(intr_info)) {
1305                 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1306                        "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1307         }
1308
1309         if (is_external_interrupt(vect_info)) {
1310                 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1311                 set_bit(irq, vcpu->irq_pending);
1312                 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1313         }
1314
1315         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
1316                 asm ("int $2");
1317                 return 1;
1318         }
1319         error_code = 0;
1320         rip = vmcs_readl(GUEST_RIP);
1321         if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1322                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1323         if (is_page_fault(intr_info)) {
1324                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1325
1326                 spin_lock(&vcpu->kvm->lock);
1327                 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1328                 if (r < 0) {
1329                         spin_unlock(&vcpu->kvm->lock);
1330                         return r;
1331                 }
1332                 if (!r) {
1333                         spin_unlock(&vcpu->kvm->lock);
1334                         return 1;
1335                 }
1336
1337                 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
1338                 spin_unlock(&vcpu->kvm->lock);
1339
1340                 switch (er) {
1341                 case EMULATE_DONE:
1342                         return 1;
1343                 case EMULATE_DO_MMIO:
1344                         ++kvm_stat.mmio_exits;
1345                         kvm_run->exit_reason = KVM_EXIT_MMIO;
1346                         return 0;
1347                  case EMULATE_FAIL:
1348                         vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
1349                         break;
1350                 default:
1351                         BUG();
1352                 }
1353         }
1354
1355         if (vcpu->rmode.active &&
1356             handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1357                                                                 error_code))
1358                 return 1;
1359
1360         if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1361                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1362                 return 0;
1363         }
1364         kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1365         kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1366         kvm_run->ex.error_code = error_code;
1367         return 0;
1368 }
1369
1370 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1371                                      struct kvm_run *kvm_run)
1372 {
1373         ++kvm_stat.irq_exits;
1374         return 1;
1375 }
1376
1377 static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1378 {
1379         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1380         return 0;
1381 }
1382
1383 static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
1384 {
1385         u64 inst;
1386         gva_t rip;
1387         int countr_size;
1388         int i, n;
1389
1390         if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
1391                 countr_size = 2;
1392         } else {
1393                 u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
1394
1395                 countr_size = (cs_ar & AR_L_MASK) ? 8:
1396                               (cs_ar & AR_DB_MASK) ? 4: 2;
1397         }
1398
1399         rip =  vmcs_readl(GUEST_RIP);
1400         if (countr_size != 8)
1401                 rip += vmcs_readl(GUEST_CS_BASE);
1402
1403         n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
1404
1405         for (i = 0; i < n; i++) {
1406                 switch (((u8*)&inst)[i]) {
1407                 case 0xf0:
1408                 case 0xf2:
1409                 case 0xf3:
1410                 case 0x2e:
1411                 case 0x36:
1412                 case 0x3e:
1413                 case 0x26:
1414                 case 0x64:
1415                 case 0x65:
1416                 case 0x66:
1417                         break;
1418                 case 0x67:
1419                         countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
1420                 default:
1421                         goto done;
1422                 }
1423         }
1424         return 0;
1425 done:
1426         countr_size *= 8;
1427         *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
1428         //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
1429         return 1;
1430 }
1431
1432 static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1433 {
1434         u64 exit_qualification;
1435         int size, down, in, string, rep;
1436         unsigned port;
1437         unsigned long count;
1438         gva_t address;
1439
1440         ++kvm_stat.io_exits;
1441         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1442         in = (exit_qualification & 8) != 0;
1443         size = (exit_qualification & 7) + 1;
1444         string = (exit_qualification & 16) != 0;
1445         down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1446         count = 1;
1447         rep = (exit_qualification & 32) != 0;
1448         port = exit_qualification >> 16;
1449         address = 0;
1450         if (string) {
1451                 if (rep && !get_io_count(vcpu, &count))
1452                         return 1;
1453                 address = vmcs_readl(GUEST_LINEAR_ADDRESS);
1454         }
1455         return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1456                              address, rep, port);
1457 }
1458
1459 static void
1460 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1461 {
1462         /*
1463          * Patch in the VMCALL instruction:
1464          */
1465         hypercall[0] = 0x0f;
1466         hypercall[1] = 0x01;
1467         hypercall[2] = 0xc1;
1468         hypercall[3] = 0xc3;
1469 }
1470
1471 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1472 {
1473         u64 exit_qualification;
1474         int cr;
1475         int reg;
1476
1477         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1478         cr = exit_qualification & 15;
1479         reg = (exit_qualification >> 8) & 15;
1480         switch ((exit_qualification >> 4) & 3) {
1481         case 0: /* mov to cr */
1482                 switch (cr) {
1483                 case 0:
1484                         vcpu_load_rsp_rip(vcpu);
1485                         set_cr0(vcpu, vcpu->regs[reg]);
1486                         skip_emulated_instruction(vcpu);
1487                         return 1;
1488                 case 3:
1489                         vcpu_load_rsp_rip(vcpu);
1490                         set_cr3(vcpu, vcpu->regs[reg]);
1491                         skip_emulated_instruction(vcpu);
1492                         return 1;
1493                 case 4:
1494                         vcpu_load_rsp_rip(vcpu);
1495                         set_cr4(vcpu, vcpu->regs[reg]);
1496                         skip_emulated_instruction(vcpu);
1497                         return 1;
1498                 case 8:
1499                         vcpu_load_rsp_rip(vcpu);
1500                         set_cr8(vcpu, vcpu->regs[reg]);
1501                         skip_emulated_instruction(vcpu);
1502                         return 1;
1503                 };
1504                 break;
1505         case 1: /*mov from cr*/
1506                 switch (cr) {
1507                 case 3:
1508                         vcpu_load_rsp_rip(vcpu);
1509                         vcpu->regs[reg] = vcpu->cr3;
1510                         vcpu_put_rsp_rip(vcpu);
1511                         skip_emulated_instruction(vcpu);
1512                         return 1;
1513                 case 8:
1514                         printk(KERN_DEBUG "handle_cr: read CR8 "
1515                                "cpu erratum AA15\n");
1516                         vcpu_load_rsp_rip(vcpu);
1517                         vcpu->regs[reg] = vcpu->cr8;
1518                         vcpu_put_rsp_rip(vcpu);
1519                         skip_emulated_instruction(vcpu);
1520                         return 1;
1521                 }
1522                 break;
1523         case 3: /* lmsw */
1524                 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
1525
1526                 skip_emulated_instruction(vcpu);
1527                 return 1;
1528         default:
1529                 break;
1530         }
1531         kvm_run->exit_reason = 0;
1532         printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n",
1533                (int)(exit_qualification >> 4) & 3, cr);
1534         return 0;
1535 }
1536
1537 static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1538 {
1539         u64 exit_qualification;
1540         unsigned long val;
1541         int dr, reg;
1542
1543         /*
1544          * FIXME: this code assumes the host is debugging the guest.
1545          *        need to deal with guest debugging itself too.
1546          */
1547         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1548         dr = exit_qualification & 7;
1549         reg = (exit_qualification >> 8) & 15;
1550         vcpu_load_rsp_rip(vcpu);
1551         if (exit_qualification & 16) {
1552                 /* mov from dr */
1553                 switch (dr) {
1554                 case 6:
1555                         val = 0xffff0ff0;
1556                         break;
1557                 case 7:
1558                         val = 0x400;
1559                         break;
1560                 default:
1561                         val = 0;
1562                 }
1563                 vcpu->regs[reg] = val;
1564         } else {
1565                 /* mov to dr */
1566         }
1567         vcpu_put_rsp_rip(vcpu);
1568         skip_emulated_instruction(vcpu);
1569         return 1;
1570 }
1571
1572 static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1573 {
1574         kvm_emulate_cpuid(vcpu);
1575         return 1;
1576 }
1577
1578 static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1579 {
1580         u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1581         u64 data;
1582
1583         if (vmx_get_msr(vcpu, ecx, &data)) {
1584                 vmx_inject_gp(vcpu, 0);
1585                 return 1;
1586         }
1587
1588         /* FIXME: handling of bits 32:63 of rax, rdx */
1589         vcpu->regs[VCPU_REGS_RAX] = data & -1u;
1590         vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
1591         skip_emulated_instruction(vcpu);
1592         return 1;
1593 }
1594
1595 static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1596 {
1597         u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1598         u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
1599                 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
1600
1601         if (vmx_set_msr(vcpu, ecx, data) != 0) {
1602                 vmx_inject_gp(vcpu, 0);
1603                 return 1;
1604         }
1605
1606         skip_emulated_instruction(vcpu);
1607         return 1;
1608 }
1609
1610 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1611                               struct kvm_run *kvm_run)
1612 {
1613         kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
1614         kvm_run->cr8 = vcpu->cr8;
1615         kvm_run->apic_base = vcpu->apic_base;
1616         kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1617                                                   vcpu->irq_summary == 0);
1618 }
1619
1620 static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1621                                    struct kvm_run *kvm_run)
1622 {
1623         /*
1624          * If the user space waits to inject interrupts, exit as soon as
1625          * possible
1626          */
1627         if (kvm_run->request_interrupt_window &&
1628             !vcpu->irq_summary) {
1629                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1630                 ++kvm_stat.irq_window_exits;
1631                 return 0;
1632         }
1633         return 1;
1634 }
1635
1636 static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1637 {
1638         skip_emulated_instruction(vcpu);
1639         if (vcpu->irq_summary)
1640                 return 1;
1641
1642         kvm_run->exit_reason = KVM_EXIT_HLT;
1643         ++kvm_stat.halt_exits;
1644         return 0;
1645 }
1646
1647 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1648 {
1649         skip_emulated_instruction(vcpu);
1650         return kvm_hypercall(vcpu, kvm_run);
1651 }
1652
1653 /*
1654  * The exit handlers return 1 if the exit was handled fully and guest execution
1655  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
1656  * to be done to userspace and return 0.
1657  */
1658 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1659                                       struct kvm_run *kvm_run) = {
1660         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
1661         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
1662         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
1663         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
1664         [EXIT_REASON_CR_ACCESS]               = handle_cr,
1665         [EXIT_REASON_DR_ACCESS]               = handle_dr,
1666         [EXIT_REASON_CPUID]                   = handle_cpuid,
1667         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
1668         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
1669         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
1670         [EXIT_REASON_HLT]                     = handle_halt,
1671         [EXIT_REASON_VMCALL]                  = handle_vmcall,
1672 };
1673
1674 static const int kvm_vmx_max_exit_handlers =
1675         sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers);
1676
1677 /*
1678  * The guest has exited.  See if we can fix it or if we need userspace
1679  * assistance.
1680  */
1681 static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1682 {
1683         u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1684         u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
1685
1686         if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
1687                                 exit_reason != EXIT_REASON_EXCEPTION_NMI )
1688                 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
1689                        "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
1690         kvm_run->instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1691         if (exit_reason < kvm_vmx_max_exit_handlers
1692             && kvm_vmx_exit_handlers[exit_reason])
1693                 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
1694         else {
1695                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1696                 kvm_run->hw.hardware_exit_reason = exit_reason;
1697         }
1698         return 0;
1699 }
1700
1701 /*
1702  * Check if userspace requested an interrupt window, and that the
1703  * interrupt window is open.
1704  *
1705  * No need to exit to userspace if we already have an interrupt queued.
1706  */
1707 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1708                                           struct kvm_run *kvm_run)
1709 {
1710         return (!vcpu->irq_summary &&
1711                 kvm_run->request_interrupt_window &&
1712                 vcpu->interrupt_window_open &&
1713                 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
1714 }
1715
1716 static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1717 {
1718         u8 fail;
1719         u16 fs_sel, gs_sel, ldt_sel;
1720         int fs_gs_ldt_reload_needed;
1721         int r;
1722
1723 again:
1724         /*
1725          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
1726          * allow segment selectors with cpl > 0 or ti == 1.
1727          */
1728         fs_sel = read_fs();
1729         gs_sel = read_gs();
1730         ldt_sel = read_ldt();
1731         fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
1732         if (!fs_gs_ldt_reload_needed) {
1733                 vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1734                 vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1735         } else {
1736                 vmcs_write16(HOST_FS_SELECTOR, 0);
1737                 vmcs_write16(HOST_GS_SELECTOR, 0);
1738         }
1739
1740 #ifdef CONFIG_X86_64
1741         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1742         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1743 #else
1744         vmcs_writel(HOST_FS_BASE, segment_base(fs_sel));
1745         vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
1746 #endif
1747
1748         if (!vcpu->mmio_read_completed)
1749                 do_interrupt_requests(vcpu, kvm_run);
1750
1751         if (vcpu->guest_debug.enabled)
1752                 kvm_guest_debug_pre(vcpu);
1753
1754         fx_save(vcpu->host_fx_image);
1755         fx_restore(vcpu->guest_fx_image);
1756
1757         save_msrs(vcpu->host_msrs, vcpu->nmsrs);
1758         load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
1759
1760         asm (
1761                 /* Store host registers */
1762                 "pushf \n\t"
1763 #ifdef CONFIG_X86_64
1764                 "push %%rax; push %%rbx; push %%rdx;"
1765                 "push %%rsi; push %%rdi; push %%rbp;"
1766                 "push %%r8;  push %%r9;  push %%r10; push %%r11;"
1767                 "push %%r12; push %%r13; push %%r14; push %%r15;"
1768                 "push %%rcx \n\t"
1769                 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
1770 #else
1771                 "pusha; push %%ecx \n\t"
1772                 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
1773 #endif
1774                 /* Check if vmlaunch of vmresume is needed */
1775                 "cmp $0, %1 \n\t"
1776                 /* Load guest registers.  Don't clobber flags. */
1777 #ifdef CONFIG_X86_64
1778                 "mov %c[cr2](%3), %%rax \n\t"
1779                 "mov %%rax, %%cr2 \n\t"
1780                 "mov %c[rax](%3), %%rax \n\t"
1781                 "mov %c[rbx](%3), %%rbx \n\t"
1782                 "mov %c[rdx](%3), %%rdx \n\t"
1783                 "mov %c[rsi](%3), %%rsi \n\t"
1784                 "mov %c[rdi](%3), %%rdi \n\t"
1785                 "mov %c[rbp](%3), %%rbp \n\t"
1786                 "mov %c[r8](%3),  %%r8  \n\t"
1787                 "mov %c[r9](%3),  %%r9  \n\t"
1788                 "mov %c[r10](%3), %%r10 \n\t"
1789                 "mov %c[r11](%3), %%r11 \n\t"
1790                 "mov %c[r12](%3), %%r12 \n\t"
1791                 "mov %c[r13](%3), %%r13 \n\t"
1792                 "mov %c[r14](%3), %%r14 \n\t"
1793                 "mov %c[r15](%3), %%r15 \n\t"
1794                 "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
1795 #else
1796                 "mov %c[cr2](%3), %%eax \n\t"
1797                 "mov %%eax,   %%cr2 \n\t"
1798                 "mov %c[rax](%3), %%eax \n\t"
1799                 "mov %c[rbx](%3), %%ebx \n\t"
1800                 "mov %c[rdx](%3), %%edx \n\t"
1801                 "mov %c[rsi](%3), %%esi \n\t"
1802                 "mov %c[rdi](%3), %%edi \n\t"
1803                 "mov %c[rbp](%3), %%ebp \n\t"
1804                 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
1805 #endif
1806                 /* Enter guest mode */
1807                 "jne launched \n\t"
1808                 ASM_VMX_VMLAUNCH "\n\t"
1809                 "jmp kvm_vmx_return \n\t"
1810                 "launched: " ASM_VMX_VMRESUME "\n\t"
1811                 ".globl kvm_vmx_return \n\t"
1812                 "kvm_vmx_return: "
1813                 /* Save guest registers, load host registers, keep flags */
1814 #ifdef CONFIG_X86_64
1815                 "xchg %3,     (%%rsp) \n\t"
1816                 "mov %%rax, %c[rax](%3) \n\t"
1817                 "mov %%rbx, %c[rbx](%3) \n\t"
1818                 "pushq (%%rsp); popq %c[rcx](%3) \n\t"
1819                 "mov %%rdx, %c[rdx](%3) \n\t"
1820                 "mov %%rsi, %c[rsi](%3) \n\t"
1821                 "mov %%rdi, %c[rdi](%3) \n\t"
1822                 "mov %%rbp, %c[rbp](%3) \n\t"
1823                 "mov %%r8,  %c[r8](%3) \n\t"
1824                 "mov %%r9,  %c[r9](%3) \n\t"
1825                 "mov %%r10, %c[r10](%3) \n\t"
1826                 "mov %%r11, %c[r11](%3) \n\t"
1827                 "mov %%r12, %c[r12](%3) \n\t"
1828                 "mov %%r13, %c[r13](%3) \n\t"
1829                 "mov %%r14, %c[r14](%3) \n\t"
1830                 "mov %%r15, %c[r15](%3) \n\t"
1831                 "mov %%cr2, %%rax   \n\t"
1832                 "mov %%rax, %c[cr2](%3) \n\t"
1833                 "mov (%%rsp), %3 \n\t"
1834
1835                 "pop  %%rcx; pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
1836                 "pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
1837                 "pop  %%rbp; pop  %%rdi; pop  %%rsi;"
1838                 "pop  %%rdx; pop  %%rbx; pop  %%rax \n\t"
1839 #else
1840                 "xchg %3, (%%esp) \n\t"
1841                 "mov %%eax, %c[rax](%3) \n\t"
1842                 "mov %%ebx, %c[rbx](%3) \n\t"
1843                 "pushl (%%esp); popl %c[rcx](%3) \n\t"
1844                 "mov %%edx, %c[rdx](%3) \n\t"
1845                 "mov %%esi, %c[rsi](%3) \n\t"
1846                 "mov %%edi, %c[rdi](%3) \n\t"
1847                 "mov %%ebp, %c[rbp](%3) \n\t"
1848                 "mov %%cr2, %%eax  \n\t"
1849                 "mov %%eax, %c[cr2](%3) \n\t"
1850                 "mov (%%esp), %3 \n\t"
1851
1852                 "pop %%ecx; popa \n\t"
1853 #endif
1854                 "setbe %0 \n\t"
1855                 "popf \n\t"
1856               : "=q" (fail)
1857               : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
1858                 "c"(vcpu),
1859                 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
1860                 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
1861                 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
1862                 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
1863                 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1864                 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1865                 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
1866 #ifdef CONFIG_X86_64
1867                 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1868                 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1869                 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
1870                 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
1871                 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
1872                 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
1873                 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
1874                 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
1875 #endif
1876                 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
1877               : "cc", "memory" );
1878
1879         /*
1880          * Reload segment selectors ASAP. (it's needed for a functional
1881          * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
1882          * relies on having 0 in %gs for the CPU PDA to work.)
1883          */
1884         if (fs_gs_ldt_reload_needed) {
1885                 load_ldt(ldt_sel);
1886                 load_fs(fs_sel);
1887                 /*
1888                  * If we have to reload gs, we must take care to
1889                  * preserve our gs base.
1890                  */
1891                 local_irq_disable();
1892                 load_gs(gs_sel);
1893 #ifdef CONFIG_X86_64
1894                 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
1895 #endif
1896                 local_irq_enable();
1897
1898                 reload_tss();
1899         }
1900         ++kvm_stat.exits;
1901
1902         save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
1903         load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
1904
1905         fx_save(vcpu->guest_fx_image);
1906         fx_restore(vcpu->host_fx_image);
1907         vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
1908
1909         asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
1910
1911         if (fail) {
1912                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1913                 kvm_run->fail_entry.hardware_entry_failure_reason
1914                         = vmcs_read32(VM_INSTRUCTION_ERROR);
1915                 r = 0;
1916         } else {
1917                 /*
1918                  * Profile KVM exit RIPs:
1919                  */
1920                 if (unlikely(prof_on == KVM_PROFILING))
1921                         profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
1922
1923                 vcpu->launched = 1;
1924                 r = kvm_handle_exit(kvm_run, vcpu);
1925                 if (r > 0) {
1926                         /* Give scheduler a change to reschedule. */
1927                         if (signal_pending(current)) {
1928                                 ++kvm_stat.signal_exits;
1929                                 post_kvm_run_save(vcpu, kvm_run);
1930                                 kvm_run->exit_reason = KVM_EXIT_INTR;
1931                                 return -EINTR;
1932                         }
1933
1934                         if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1935                                 ++kvm_stat.request_irq_exits;
1936                                 post_kvm_run_save(vcpu, kvm_run);
1937                                 kvm_run->exit_reason = KVM_EXIT_INTR;
1938                                 return -EINTR;
1939                         }
1940
1941                         kvm_resched(vcpu);
1942                         goto again;
1943                 }
1944         }
1945
1946         post_kvm_run_save(vcpu, kvm_run);
1947         return r;
1948 }
1949
1950 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1951 {
1952         vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
1953 }
1954
1955 static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
1956                                   unsigned long addr,
1957                                   u32 err_code)
1958 {
1959         u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1960
1961         ++kvm_stat.pf_guest;
1962
1963         if (is_page_fault(vect_info)) {
1964                 printk(KERN_DEBUG "inject_page_fault: "
1965                        "double fault 0x%lx @ 0x%lx\n",
1966                        addr, vmcs_readl(GUEST_RIP));
1967                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
1968                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1969                              DF_VECTOR |
1970                              INTR_TYPE_EXCEPTION |
1971                              INTR_INFO_DELIEVER_CODE_MASK |
1972                              INTR_INFO_VALID_MASK);
1973                 return;
1974         }
1975         vcpu->cr2 = addr;
1976         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
1977         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1978                      PF_VECTOR |
1979                      INTR_TYPE_EXCEPTION |
1980                      INTR_INFO_DELIEVER_CODE_MASK |
1981                      INTR_INFO_VALID_MASK);
1982
1983 }
1984
1985 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
1986 {
1987         if (vcpu->vmcs) {
1988                 on_each_cpu(__vcpu_clear, vcpu, 0, 1);
1989                 free_vmcs(vcpu->vmcs);
1990                 vcpu->vmcs = NULL;
1991         }
1992 }
1993
1994 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
1995 {
1996         vmx_free_vmcs(vcpu);
1997 }
1998
1999 static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
2000 {
2001         struct vmcs *vmcs;
2002
2003         vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2004         if (!vcpu->guest_msrs)
2005                 return -ENOMEM;
2006
2007         vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2008         if (!vcpu->host_msrs)
2009                 goto out_free_guest_msrs;
2010
2011         vmcs = alloc_vmcs();
2012         if (!vmcs)
2013                 goto out_free_msrs;
2014
2015         vmcs_clear(vmcs);
2016         vcpu->vmcs = vmcs;
2017         vcpu->launched = 0;
2018
2019         return 0;
2020
2021 out_free_msrs:
2022         kfree(vcpu->host_msrs);
2023         vcpu->host_msrs = NULL;
2024
2025 out_free_guest_msrs:
2026         kfree(vcpu->guest_msrs);
2027         vcpu->guest_msrs = NULL;
2028
2029         return -ENOMEM;
2030 }
2031
2032 static struct kvm_arch_ops vmx_arch_ops = {
2033         .cpu_has_kvm_support = cpu_has_kvm_support,
2034         .disabled_by_bios = vmx_disabled_by_bios,
2035         .hardware_setup = hardware_setup,
2036         .hardware_unsetup = hardware_unsetup,
2037         .hardware_enable = hardware_enable,
2038         .hardware_disable = hardware_disable,
2039
2040         .vcpu_create = vmx_create_vcpu,
2041         .vcpu_free = vmx_free_vcpu,
2042
2043         .vcpu_load = vmx_vcpu_load,
2044         .vcpu_put = vmx_vcpu_put,
2045         .vcpu_decache = vmx_vcpu_decache,
2046
2047         .set_guest_debug = set_guest_debug,
2048         .get_msr = vmx_get_msr,
2049         .set_msr = vmx_set_msr,
2050         .get_segment_base = vmx_get_segment_base,
2051         .get_segment = vmx_get_segment,
2052         .set_segment = vmx_set_segment,
2053         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
2054         .decache_cr0_cr4_guest_bits = vmx_decache_cr0_cr4_guest_bits,
2055         .set_cr0 = vmx_set_cr0,
2056         .set_cr3 = vmx_set_cr3,
2057         .set_cr4 = vmx_set_cr4,
2058 #ifdef CONFIG_X86_64
2059         .set_efer = vmx_set_efer,
2060 #endif
2061         .get_idt = vmx_get_idt,
2062         .set_idt = vmx_set_idt,
2063         .get_gdt = vmx_get_gdt,
2064         .set_gdt = vmx_set_gdt,
2065         .cache_regs = vcpu_load_rsp_rip,
2066         .decache_regs = vcpu_put_rsp_rip,
2067         .get_rflags = vmx_get_rflags,
2068         .set_rflags = vmx_set_rflags,
2069
2070         .tlb_flush = vmx_flush_tlb,
2071         .inject_page_fault = vmx_inject_page_fault,
2072
2073         .inject_gp = vmx_inject_gp,
2074
2075         .run = vmx_vcpu_run,
2076         .skip_emulated_instruction = skip_emulated_instruction,
2077         .vcpu_setup = vmx_vcpu_setup,
2078         .patch_hypercall = vmx_patch_hypercall,
2079 };
2080
2081 static int __init vmx_init(void)
2082 {
2083         return kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
2084 }
2085
2086 static void __exit vmx_exit(void)
2087 {
2088         kvm_exit_arch();
2089 }
2090
2091 module_init(vmx_init)
2092 module_exit(vmx_exit)