KVM: x86: drop duplicate kvm_flush_remote_tlb calls
[safe/jmp/linux-2.6] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Amit Shah    <amit.shah@qumranet.com>
14  *   Ben-Ami Yassour <benami@il.ibm.com>
15  *
16  * This work is licensed under the terms of the GNU GPL, version 2.  See
17  * the COPYING file in the top-level directory.
18  *
19  */
20
21 #include <linux/kvm_host.h>
22 #include "irq.h"
23 #include "mmu.h"
24 #include "i8254.h"
25 #include "tss.h"
26 #include "kvm_cache_regs.h"
27 #include "x86.h"
28
29 #include <linux/clocksource.h>
30 #include <linux/interrupt.h>
31 #include <linux/kvm.h>
32 #include <linux/fs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/highmem.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/cpufreq.h>
40 #include <trace/events/kvm.h>
41 #undef TRACE_INCLUDE_FILE
42 #define CREATE_TRACE_POINTS
43 #include "trace.h"
44
45 #include <asm/uaccess.h>
46 #include <asm/msr.h>
47 #include <asm/desc.h>
48 #include <asm/mtrr.h>
49 #include <asm/mce.h>
50
51 #define MAX_IO_MSRS 256
52 #define CR0_RESERVED_BITS                                               \
53         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
54                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
55                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
56 #define CR4_RESERVED_BITS                                               \
57         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
58                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
59                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
60                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
61
62 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
63
64 #define KVM_MAX_MCE_BANKS 32
65 #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
66
67 /* EFER defaults:
68  * - enable syscall per default because its emulated by KVM
69  * - enable LME and LMA per default on 64 bit KVM
70  */
71 #ifdef CONFIG_X86_64
72 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
73 #else
74 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
75 #endif
76
77 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
78 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
79
80 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
81 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
82                                     struct kvm_cpuid_entry2 __user *entries);
83
84 struct kvm_x86_ops *kvm_x86_ops;
85 EXPORT_SYMBOL_GPL(kvm_x86_ops);
86
87 int ignore_msrs = 0;
88 module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
89
90 struct kvm_stats_debugfs_item debugfs_entries[] = {
91         { "pf_fixed", VCPU_STAT(pf_fixed) },
92         { "pf_guest", VCPU_STAT(pf_guest) },
93         { "tlb_flush", VCPU_STAT(tlb_flush) },
94         { "invlpg", VCPU_STAT(invlpg) },
95         { "exits", VCPU_STAT(exits) },
96         { "io_exits", VCPU_STAT(io_exits) },
97         { "mmio_exits", VCPU_STAT(mmio_exits) },
98         { "signal_exits", VCPU_STAT(signal_exits) },
99         { "irq_window", VCPU_STAT(irq_window_exits) },
100         { "nmi_window", VCPU_STAT(nmi_window_exits) },
101         { "halt_exits", VCPU_STAT(halt_exits) },
102         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
103         { "hypercalls", VCPU_STAT(hypercalls) },
104         { "request_irq", VCPU_STAT(request_irq_exits) },
105         { "irq_exits", VCPU_STAT(irq_exits) },
106         { "host_state_reload", VCPU_STAT(host_state_reload) },
107         { "efer_reload", VCPU_STAT(efer_reload) },
108         { "fpu_reload", VCPU_STAT(fpu_reload) },
109         { "insn_emulation", VCPU_STAT(insn_emulation) },
110         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
111         { "irq_injections", VCPU_STAT(irq_injections) },
112         { "nmi_injections", VCPU_STAT(nmi_injections) },
113         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
114         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
115         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
116         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
117         { "mmu_flooded", VM_STAT(mmu_flooded) },
118         { "mmu_recycled", VM_STAT(mmu_recycled) },
119         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
120         { "mmu_unsync", VM_STAT(mmu_unsync) },
121         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
122         { "largepages", VM_STAT(lpages) },
123         { NULL }
124 };
125
126 unsigned long segment_base(u16 selector)
127 {
128         struct descriptor_table gdt;
129         struct desc_struct *d;
130         unsigned long table_base;
131         unsigned long v;
132
133         if (selector == 0)
134                 return 0;
135
136         kvm_get_gdt(&gdt);
137         table_base = gdt.base;
138
139         if (selector & 4) {           /* from ldt */
140                 u16 ldt_selector = kvm_read_ldt();
141
142                 table_base = segment_base(ldt_selector);
143         }
144         d = (struct desc_struct *)(table_base + (selector & ~7));
145         v = get_desc_base(d);
146 #ifdef CONFIG_X86_64
147         if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
148                 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
149 #endif
150         return v;
151 }
152 EXPORT_SYMBOL_GPL(segment_base);
153
154 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
155 {
156         if (irqchip_in_kernel(vcpu->kvm))
157                 return vcpu->arch.apic_base;
158         else
159                 return vcpu->arch.apic_base;
160 }
161 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
162
163 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
164 {
165         /* TODO: reserve bits check */
166         if (irqchip_in_kernel(vcpu->kvm))
167                 kvm_lapic_set_base(vcpu, data);
168         else
169                 vcpu->arch.apic_base = data;
170 }
171 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
172
173 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
174 {
175         WARN_ON(vcpu->arch.exception.pending);
176         vcpu->arch.exception.pending = true;
177         vcpu->arch.exception.has_error_code = false;
178         vcpu->arch.exception.nr = nr;
179 }
180 EXPORT_SYMBOL_GPL(kvm_queue_exception);
181
182 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
183                            u32 error_code)
184 {
185         ++vcpu->stat.pf_guest;
186
187         if (vcpu->arch.exception.pending) {
188                 switch(vcpu->arch.exception.nr) {
189                 case DF_VECTOR:
190                         /* triple fault -> shutdown */
191                         set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
192                         return;
193                 case PF_VECTOR:
194                         vcpu->arch.exception.nr = DF_VECTOR;
195                         vcpu->arch.exception.error_code = 0;
196                         return;
197                 default:
198                         /* replace previous exception with a new one in a hope
199                            that instruction re-execution will regenerate lost
200                            exception */
201                         vcpu->arch.exception.pending = false;
202                         break;
203                 }
204         }
205         vcpu->arch.cr2 = addr;
206         kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
207 }
208
209 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
210 {
211         vcpu->arch.nmi_pending = 1;
212 }
213 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
214
215 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
216 {
217         WARN_ON(vcpu->arch.exception.pending);
218         vcpu->arch.exception.pending = true;
219         vcpu->arch.exception.has_error_code = true;
220         vcpu->arch.exception.nr = nr;
221         vcpu->arch.exception.error_code = error_code;
222 }
223 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
224
225 /*
226  * Load the pae pdptrs.  Return true is they are all valid.
227  */
228 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
229 {
230         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
231         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
232         int i;
233         int ret;
234         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
235
236         ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
237                                   offset * sizeof(u64), sizeof(pdpte));
238         if (ret < 0) {
239                 ret = 0;
240                 goto out;
241         }
242         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
243                 if (is_present_gpte(pdpte[i]) &&
244                     (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
245                         ret = 0;
246                         goto out;
247                 }
248         }
249         ret = 1;
250
251         memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
252         __set_bit(VCPU_EXREG_PDPTR,
253                   (unsigned long *)&vcpu->arch.regs_avail);
254         __set_bit(VCPU_EXREG_PDPTR,
255                   (unsigned long *)&vcpu->arch.regs_dirty);
256 out:
257
258         return ret;
259 }
260 EXPORT_SYMBOL_GPL(load_pdptrs);
261
262 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
263 {
264         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
265         bool changed = true;
266         int r;
267
268         if (is_long_mode(vcpu) || !is_pae(vcpu))
269                 return false;
270
271         if (!test_bit(VCPU_EXREG_PDPTR,
272                       (unsigned long *)&vcpu->arch.regs_avail))
273                 return true;
274
275         r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
276         if (r < 0)
277                 goto out;
278         changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
279 out:
280
281         return changed;
282 }
283
284 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
285 {
286         if (cr0 & CR0_RESERVED_BITS) {
287                 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
288                        cr0, vcpu->arch.cr0);
289                 kvm_inject_gp(vcpu, 0);
290                 return;
291         }
292
293         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
294                 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
295                 kvm_inject_gp(vcpu, 0);
296                 return;
297         }
298
299         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
300                 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
301                        "and a clear PE flag\n");
302                 kvm_inject_gp(vcpu, 0);
303                 return;
304         }
305
306         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
307 #ifdef CONFIG_X86_64
308                 if ((vcpu->arch.shadow_efer & EFER_LME)) {
309                         int cs_db, cs_l;
310
311                         if (!is_pae(vcpu)) {
312                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
313                                        "in long mode while PAE is disabled\n");
314                                 kvm_inject_gp(vcpu, 0);
315                                 return;
316                         }
317                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
318                         if (cs_l) {
319                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
320                                        "in long mode while CS.L == 1\n");
321                                 kvm_inject_gp(vcpu, 0);
322                                 return;
323
324                         }
325                 } else
326 #endif
327                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
328                         printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
329                                "reserved bits\n");
330                         kvm_inject_gp(vcpu, 0);
331                         return;
332                 }
333
334         }
335
336         kvm_x86_ops->set_cr0(vcpu, cr0);
337         vcpu->arch.cr0 = cr0;
338
339         kvm_mmu_reset_context(vcpu);
340         return;
341 }
342 EXPORT_SYMBOL_GPL(kvm_set_cr0);
343
344 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
345 {
346         kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
347 }
348 EXPORT_SYMBOL_GPL(kvm_lmsw);
349
350 void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
351 {
352         unsigned long old_cr4 = vcpu->arch.cr4;
353         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
354
355         if (cr4 & CR4_RESERVED_BITS) {
356                 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
357                 kvm_inject_gp(vcpu, 0);
358                 return;
359         }
360
361         if (is_long_mode(vcpu)) {
362                 if (!(cr4 & X86_CR4_PAE)) {
363                         printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
364                                "in long mode\n");
365                         kvm_inject_gp(vcpu, 0);
366                         return;
367                 }
368         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
369                    && ((cr4 ^ old_cr4) & pdptr_bits)
370                    && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
371                 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
372                 kvm_inject_gp(vcpu, 0);
373                 return;
374         }
375
376         if (cr4 & X86_CR4_VMXE) {
377                 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
378                 kvm_inject_gp(vcpu, 0);
379                 return;
380         }
381         kvm_x86_ops->set_cr4(vcpu, cr4);
382         vcpu->arch.cr4 = cr4;
383         vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
384         kvm_mmu_reset_context(vcpu);
385 }
386 EXPORT_SYMBOL_GPL(kvm_set_cr4);
387
388 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
389 {
390         if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
391                 kvm_mmu_sync_roots(vcpu);
392                 kvm_mmu_flush_tlb(vcpu);
393                 return;
394         }
395
396         if (is_long_mode(vcpu)) {
397                 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
398                         printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
399                         kvm_inject_gp(vcpu, 0);
400                         return;
401                 }
402         } else {
403                 if (is_pae(vcpu)) {
404                         if (cr3 & CR3_PAE_RESERVED_BITS) {
405                                 printk(KERN_DEBUG
406                                        "set_cr3: #GP, reserved bits\n");
407                                 kvm_inject_gp(vcpu, 0);
408                                 return;
409                         }
410                         if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
411                                 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
412                                        "reserved bits\n");
413                                 kvm_inject_gp(vcpu, 0);
414                                 return;
415                         }
416                 }
417                 /*
418                  * We don't check reserved bits in nonpae mode, because
419                  * this isn't enforced, and VMware depends on this.
420                  */
421         }
422
423         /*
424          * Does the new cr3 value map to physical memory? (Note, we
425          * catch an invalid cr3 even in real-mode, because it would
426          * cause trouble later on when we turn on paging anyway.)
427          *
428          * A real CPU would silently accept an invalid cr3 and would
429          * attempt to use it - with largely undefined (and often hard
430          * to debug) behavior on the guest side.
431          */
432         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
433                 kvm_inject_gp(vcpu, 0);
434         else {
435                 vcpu->arch.cr3 = cr3;
436                 vcpu->arch.mmu.new_cr3(vcpu);
437         }
438 }
439 EXPORT_SYMBOL_GPL(kvm_set_cr3);
440
441 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
442 {
443         if (cr8 & CR8_RESERVED_BITS) {
444                 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
445                 kvm_inject_gp(vcpu, 0);
446                 return;
447         }
448         if (irqchip_in_kernel(vcpu->kvm))
449                 kvm_lapic_set_tpr(vcpu, cr8);
450         else
451                 vcpu->arch.cr8 = cr8;
452 }
453 EXPORT_SYMBOL_GPL(kvm_set_cr8);
454
455 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
456 {
457         if (irqchip_in_kernel(vcpu->kvm))
458                 return kvm_lapic_get_cr8(vcpu);
459         else
460                 return vcpu->arch.cr8;
461 }
462 EXPORT_SYMBOL_GPL(kvm_get_cr8);
463
464 static inline u32 bit(int bitno)
465 {
466         return 1 << (bitno & 31);
467 }
468
469 /*
470  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
471  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
472  *
473  * This list is modified at module load time to reflect the
474  * capabilities of the host cpu.
475  */
476 static u32 msrs_to_save[] = {
477         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
478         MSR_K6_STAR,
479 #ifdef CONFIG_X86_64
480         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
481 #endif
482         MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
483         MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
484 };
485
486 static unsigned num_msrs_to_save;
487
488 static u32 emulated_msrs[] = {
489         MSR_IA32_MISC_ENABLE,
490 };
491
492 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
493 {
494         if (efer & efer_reserved_bits) {
495                 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
496                        efer);
497                 kvm_inject_gp(vcpu, 0);
498                 return;
499         }
500
501         if (is_paging(vcpu)
502             && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
503                 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
504                 kvm_inject_gp(vcpu, 0);
505                 return;
506         }
507
508         if (efer & EFER_FFXSR) {
509                 struct kvm_cpuid_entry2 *feat;
510
511                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
512                 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
513                         printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
514                         kvm_inject_gp(vcpu, 0);
515                         return;
516                 }
517         }
518
519         if (efer & EFER_SVME) {
520                 struct kvm_cpuid_entry2 *feat;
521
522                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
523                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
524                         printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
525                         kvm_inject_gp(vcpu, 0);
526                         return;
527                 }
528         }
529
530         kvm_x86_ops->set_efer(vcpu, efer);
531
532         efer &= ~EFER_LMA;
533         efer |= vcpu->arch.shadow_efer & EFER_LMA;
534
535         vcpu->arch.shadow_efer = efer;
536
537         vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
538         kvm_mmu_reset_context(vcpu);
539 }
540
541 void kvm_enable_efer_bits(u64 mask)
542 {
543        efer_reserved_bits &= ~mask;
544 }
545 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
546
547
548 /*
549  * Writes msr value into into the appropriate "register".
550  * Returns 0 on success, non-0 otherwise.
551  * Assumes vcpu_load() was already called.
552  */
553 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
554 {
555         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
556 }
557
558 /*
559  * Adapt set_msr() to msr_io()'s calling convention
560  */
561 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
562 {
563         return kvm_set_msr(vcpu, index, *data);
564 }
565
566 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
567 {
568         static int version;
569         struct pvclock_wall_clock wc;
570         struct timespec now, sys, boot;
571
572         if (!wall_clock)
573                 return;
574
575         version++;
576
577         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
578
579         /*
580          * The guest calculates current wall clock time by adding
581          * system time (updated by kvm_write_guest_time below) to the
582          * wall clock specified here.  guest system time equals host
583          * system time for us, thus we must fill in host boot time here.
584          */
585         now = current_kernel_time();
586         ktime_get_ts(&sys);
587         boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
588
589         wc.sec = boot.tv_sec;
590         wc.nsec = boot.tv_nsec;
591         wc.version = version;
592
593         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
594
595         version++;
596         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
597 }
598
599 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
600 {
601         uint32_t quotient, remainder;
602
603         /* Don't try to replace with do_div(), this one calculates
604          * "(dividend << 32) / divisor" */
605         __asm__ ( "divl %4"
606                   : "=a" (quotient), "=d" (remainder)
607                   : "0" (0), "1" (dividend), "r" (divisor) );
608         return quotient;
609 }
610
611 static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
612 {
613         uint64_t nsecs = 1000000000LL;
614         int32_t  shift = 0;
615         uint64_t tps64;
616         uint32_t tps32;
617
618         tps64 = tsc_khz * 1000LL;
619         while (tps64 > nsecs*2) {
620                 tps64 >>= 1;
621                 shift--;
622         }
623
624         tps32 = (uint32_t)tps64;
625         while (tps32 <= (uint32_t)nsecs) {
626                 tps32 <<= 1;
627                 shift++;
628         }
629
630         hv_clock->tsc_shift = shift;
631         hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
632
633         pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
634                  __func__, tsc_khz, hv_clock->tsc_shift,
635                  hv_clock->tsc_to_system_mul);
636 }
637
638 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
639
640 static void kvm_write_guest_time(struct kvm_vcpu *v)
641 {
642         struct timespec ts;
643         unsigned long flags;
644         struct kvm_vcpu_arch *vcpu = &v->arch;
645         void *shared_kaddr;
646         unsigned long this_tsc_khz;
647
648         if ((!vcpu->time_page))
649                 return;
650
651         this_tsc_khz = get_cpu_var(cpu_tsc_khz);
652         if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
653                 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
654                 vcpu->hv_clock_tsc_khz = this_tsc_khz;
655         }
656         put_cpu_var(cpu_tsc_khz);
657
658         /* Keep irq disabled to prevent changes to the clock */
659         local_irq_save(flags);
660         kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
661         ktime_get_ts(&ts);
662         local_irq_restore(flags);
663
664         /* With all the info we got, fill in the values */
665
666         vcpu->hv_clock.system_time = ts.tv_nsec +
667                                      (NSEC_PER_SEC * (u64)ts.tv_sec);
668         /*
669          * The interface expects us to write an even number signaling that the
670          * update is finished. Since the guest won't see the intermediate
671          * state, we just increase by 2 at the end.
672          */
673         vcpu->hv_clock.version += 2;
674
675         shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
676
677         memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
678                sizeof(vcpu->hv_clock));
679
680         kunmap_atomic(shared_kaddr, KM_USER0);
681
682         mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
683 }
684
685 static int kvm_request_guest_time_update(struct kvm_vcpu *v)
686 {
687         struct kvm_vcpu_arch *vcpu = &v->arch;
688
689         if (!vcpu->time_page)
690                 return 0;
691         set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
692         return 1;
693 }
694
695 static bool msr_mtrr_valid(unsigned msr)
696 {
697         switch (msr) {
698         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
699         case MSR_MTRRfix64K_00000:
700         case MSR_MTRRfix16K_80000:
701         case MSR_MTRRfix16K_A0000:
702         case MSR_MTRRfix4K_C0000:
703         case MSR_MTRRfix4K_C8000:
704         case MSR_MTRRfix4K_D0000:
705         case MSR_MTRRfix4K_D8000:
706         case MSR_MTRRfix4K_E0000:
707         case MSR_MTRRfix4K_E8000:
708         case MSR_MTRRfix4K_F0000:
709         case MSR_MTRRfix4K_F8000:
710         case MSR_MTRRdefType:
711         case MSR_IA32_CR_PAT:
712                 return true;
713         case 0x2f8:
714                 return true;
715         }
716         return false;
717 }
718
719 static bool valid_pat_type(unsigned t)
720 {
721         return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
722 }
723
724 static bool valid_mtrr_type(unsigned t)
725 {
726         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
727 }
728
729 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
730 {
731         int i;
732
733         if (!msr_mtrr_valid(msr))
734                 return false;
735
736         if (msr == MSR_IA32_CR_PAT) {
737                 for (i = 0; i < 8; i++)
738                         if (!valid_pat_type((data >> (i * 8)) & 0xff))
739                                 return false;
740                 return true;
741         } else if (msr == MSR_MTRRdefType) {
742                 if (data & ~0xcff)
743                         return false;
744                 return valid_mtrr_type(data & 0xff);
745         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
746                 for (i = 0; i < 8 ; i++)
747                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
748                                 return false;
749                 return true;
750         }
751
752         /* variable MTRRs */
753         return valid_mtrr_type(data & 0xff);
754 }
755
756 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
757 {
758         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
759
760         if (!mtrr_valid(vcpu, msr, data))
761                 return 1;
762
763         if (msr == MSR_MTRRdefType) {
764                 vcpu->arch.mtrr_state.def_type = data;
765                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
766         } else if (msr == MSR_MTRRfix64K_00000)
767                 p[0] = data;
768         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
769                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
770         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
771                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
772         else if (msr == MSR_IA32_CR_PAT)
773                 vcpu->arch.pat = data;
774         else {  /* Variable MTRRs */
775                 int idx, is_mtrr_mask;
776                 u64 *pt;
777
778                 idx = (msr - 0x200) / 2;
779                 is_mtrr_mask = msr - 0x200 - 2 * idx;
780                 if (!is_mtrr_mask)
781                         pt =
782                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
783                 else
784                         pt =
785                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
786                 *pt = data;
787         }
788
789         kvm_mmu_reset_context(vcpu);
790         return 0;
791 }
792
793 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
794 {
795         u64 mcg_cap = vcpu->arch.mcg_cap;
796         unsigned bank_num = mcg_cap & 0xff;
797
798         switch (msr) {
799         case MSR_IA32_MCG_STATUS:
800                 vcpu->arch.mcg_status = data;
801                 break;
802         case MSR_IA32_MCG_CTL:
803                 if (!(mcg_cap & MCG_CTL_P))
804                         return 1;
805                 if (data != 0 && data != ~(u64)0)
806                         return -1;
807                 vcpu->arch.mcg_ctl = data;
808                 break;
809         default:
810                 if (msr >= MSR_IA32_MC0_CTL &&
811                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
812                         u32 offset = msr - MSR_IA32_MC0_CTL;
813                         /* only 0 or all 1s can be written to IA32_MCi_CTL */
814                         if ((offset & 0x3) == 0 &&
815                             data != 0 && data != ~(u64)0)
816                                 return -1;
817                         vcpu->arch.mce_banks[offset] = data;
818                         break;
819                 }
820                 return 1;
821         }
822         return 0;
823 }
824
825 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
826 {
827         switch (msr) {
828         case MSR_EFER:
829                 set_efer(vcpu, data);
830                 break;
831         case MSR_K7_HWCR:
832                 data &= ~(u64)0x40;     /* ignore flush filter disable */
833                 if (data != 0) {
834                         pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
835                                 data);
836                         return 1;
837                 }
838                 break;
839         case MSR_FAM10H_MMIO_CONF_BASE:
840                 if (data != 0) {
841                         pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
842                                 "0x%llx\n", data);
843                         return 1;
844                 }
845                 break;
846         case MSR_AMD64_NB_CFG:
847                 break;
848         case MSR_IA32_DEBUGCTLMSR:
849                 if (!data) {
850                         /* We support the non-activated case already */
851                         break;
852                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
853                         /* Values other than LBR and BTF are vendor-specific,
854                            thus reserved and should throw a #GP */
855                         return 1;
856                 }
857                 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
858                         __func__, data);
859                 break;
860         case MSR_IA32_UCODE_REV:
861         case MSR_IA32_UCODE_WRITE:
862         case MSR_VM_HSAVE_PA:
863         case MSR_AMD64_PATCH_LOADER:
864                 break;
865         case 0x200 ... 0x2ff:
866                 return set_msr_mtrr(vcpu, msr, data);
867         case MSR_IA32_APICBASE:
868                 kvm_set_apic_base(vcpu, data);
869                 break;
870         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
871                 return kvm_x2apic_msr_write(vcpu, msr, data);
872         case MSR_IA32_MISC_ENABLE:
873                 vcpu->arch.ia32_misc_enable_msr = data;
874                 break;
875         case MSR_KVM_WALL_CLOCK:
876                 vcpu->kvm->arch.wall_clock = data;
877                 kvm_write_wall_clock(vcpu->kvm, data);
878                 break;
879         case MSR_KVM_SYSTEM_TIME: {
880                 if (vcpu->arch.time_page) {
881                         kvm_release_page_dirty(vcpu->arch.time_page);
882                         vcpu->arch.time_page = NULL;
883                 }
884
885                 vcpu->arch.time = data;
886
887                 /* we verify if the enable bit is set... */
888                 if (!(data & 1))
889                         break;
890
891                 /* ...but clean it before doing the actual write */
892                 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
893
894                 vcpu->arch.time_page =
895                                 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
896
897                 if (is_error_page(vcpu->arch.time_page)) {
898                         kvm_release_page_clean(vcpu->arch.time_page);
899                         vcpu->arch.time_page = NULL;
900                 }
901
902                 kvm_request_guest_time_update(vcpu);
903                 break;
904         }
905         case MSR_IA32_MCG_CTL:
906         case MSR_IA32_MCG_STATUS:
907         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
908                 return set_msr_mce(vcpu, msr, data);
909
910         /* Performance counters are not protected by a CPUID bit,
911          * so we should check all of them in the generic path for the sake of
912          * cross vendor migration.
913          * Writing a zero into the event select MSRs disables them,
914          * which we perfectly emulate ;-). Any other value should be at least
915          * reported, some guests depend on them.
916          */
917         case MSR_P6_EVNTSEL0:
918         case MSR_P6_EVNTSEL1:
919         case MSR_K7_EVNTSEL0:
920         case MSR_K7_EVNTSEL1:
921         case MSR_K7_EVNTSEL2:
922         case MSR_K7_EVNTSEL3:
923                 if (data != 0)
924                         pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
925                                 "0x%x data 0x%llx\n", msr, data);
926                 break;
927         /* at least RHEL 4 unconditionally writes to the perfctr registers,
928          * so we ignore writes to make it happy.
929          */
930         case MSR_P6_PERFCTR0:
931         case MSR_P6_PERFCTR1:
932         case MSR_K7_PERFCTR0:
933         case MSR_K7_PERFCTR1:
934         case MSR_K7_PERFCTR2:
935         case MSR_K7_PERFCTR3:
936                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
937                         "0x%x data 0x%llx\n", msr, data);
938                 break;
939         default:
940                 if (!ignore_msrs) {
941                         pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
942                                 msr, data);
943                         return 1;
944                 } else {
945                         pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
946                                 msr, data);
947                         break;
948                 }
949         }
950         return 0;
951 }
952 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
953
954
955 /*
956  * Reads an msr value (of 'msr_index') into 'pdata'.
957  * Returns 0 on success, non-0 otherwise.
958  * Assumes vcpu_load() was already called.
959  */
960 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
961 {
962         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
963 }
964
965 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
966 {
967         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
968
969         if (!msr_mtrr_valid(msr))
970                 return 1;
971
972         if (msr == MSR_MTRRdefType)
973                 *pdata = vcpu->arch.mtrr_state.def_type +
974                          (vcpu->arch.mtrr_state.enabled << 10);
975         else if (msr == MSR_MTRRfix64K_00000)
976                 *pdata = p[0];
977         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
978                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
979         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
980                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
981         else if (msr == MSR_IA32_CR_PAT)
982                 *pdata = vcpu->arch.pat;
983         else {  /* Variable MTRRs */
984                 int idx, is_mtrr_mask;
985                 u64 *pt;
986
987                 idx = (msr - 0x200) / 2;
988                 is_mtrr_mask = msr - 0x200 - 2 * idx;
989                 if (!is_mtrr_mask)
990                         pt =
991                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
992                 else
993                         pt =
994                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
995                 *pdata = *pt;
996         }
997
998         return 0;
999 }
1000
1001 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1002 {
1003         u64 data;
1004         u64 mcg_cap = vcpu->arch.mcg_cap;
1005         unsigned bank_num = mcg_cap & 0xff;
1006
1007         switch (msr) {
1008         case MSR_IA32_P5_MC_ADDR:
1009         case MSR_IA32_P5_MC_TYPE:
1010                 data = 0;
1011                 break;
1012         case MSR_IA32_MCG_CAP:
1013                 data = vcpu->arch.mcg_cap;
1014                 break;
1015         case MSR_IA32_MCG_CTL:
1016                 if (!(mcg_cap & MCG_CTL_P))
1017                         return 1;
1018                 data = vcpu->arch.mcg_ctl;
1019                 break;
1020         case MSR_IA32_MCG_STATUS:
1021                 data = vcpu->arch.mcg_status;
1022                 break;
1023         default:
1024                 if (msr >= MSR_IA32_MC0_CTL &&
1025                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1026                         u32 offset = msr - MSR_IA32_MC0_CTL;
1027                         data = vcpu->arch.mce_banks[offset];
1028                         break;
1029                 }
1030                 return 1;
1031         }
1032         *pdata = data;
1033         return 0;
1034 }
1035
1036 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1037 {
1038         u64 data;
1039
1040         switch (msr) {
1041         case MSR_IA32_PLATFORM_ID:
1042         case MSR_IA32_UCODE_REV:
1043         case MSR_IA32_EBL_CR_POWERON:
1044         case MSR_IA32_DEBUGCTLMSR:
1045         case MSR_IA32_LASTBRANCHFROMIP:
1046         case MSR_IA32_LASTBRANCHTOIP:
1047         case MSR_IA32_LASTINTFROMIP:
1048         case MSR_IA32_LASTINTTOIP:
1049         case MSR_K8_SYSCFG:
1050         case MSR_K7_HWCR:
1051         case MSR_VM_HSAVE_PA:
1052         case MSR_P6_PERFCTR0:
1053         case MSR_P6_PERFCTR1:
1054         case MSR_P6_EVNTSEL0:
1055         case MSR_P6_EVNTSEL1:
1056         case MSR_K7_EVNTSEL0:
1057         case MSR_K7_PERFCTR0:
1058         case MSR_K8_INT_PENDING_MSG:
1059         case MSR_AMD64_NB_CFG:
1060         case MSR_FAM10H_MMIO_CONF_BASE:
1061                 data = 0;
1062                 break;
1063         case MSR_MTRRcap:
1064                 data = 0x500 | KVM_NR_VAR_MTRR;
1065                 break;
1066         case 0x200 ... 0x2ff:
1067                 return get_msr_mtrr(vcpu, msr, pdata);
1068         case 0xcd: /* fsb frequency */
1069                 data = 3;
1070                 break;
1071         case MSR_IA32_APICBASE:
1072                 data = kvm_get_apic_base(vcpu);
1073                 break;
1074         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1075                 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1076                 break;
1077         case MSR_IA32_MISC_ENABLE:
1078                 data = vcpu->arch.ia32_misc_enable_msr;
1079                 break;
1080         case MSR_IA32_PERF_STATUS:
1081                 /* TSC increment by tick */
1082                 data = 1000ULL;
1083                 /* CPU multiplier */
1084                 data |= (((uint64_t)4ULL) << 40);
1085                 break;
1086         case MSR_EFER:
1087                 data = vcpu->arch.shadow_efer;
1088                 break;
1089         case MSR_KVM_WALL_CLOCK:
1090                 data = vcpu->kvm->arch.wall_clock;
1091                 break;
1092         case MSR_KVM_SYSTEM_TIME:
1093                 data = vcpu->arch.time;
1094                 break;
1095         case MSR_IA32_P5_MC_ADDR:
1096         case MSR_IA32_P5_MC_TYPE:
1097         case MSR_IA32_MCG_CAP:
1098         case MSR_IA32_MCG_CTL:
1099         case MSR_IA32_MCG_STATUS:
1100         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1101                 return get_msr_mce(vcpu, msr, pdata);
1102         default:
1103                 if (!ignore_msrs) {
1104                         pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1105                         return 1;
1106                 } else {
1107                         pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1108                         data = 0;
1109                 }
1110                 break;
1111         }
1112         *pdata = data;
1113         return 0;
1114 }
1115 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1116
1117 /*
1118  * Read or write a bunch of msrs. All parameters are kernel addresses.
1119  *
1120  * @return number of msrs set successfully.
1121  */
1122 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1123                     struct kvm_msr_entry *entries,
1124                     int (*do_msr)(struct kvm_vcpu *vcpu,
1125                                   unsigned index, u64 *data))
1126 {
1127         int i;
1128
1129         vcpu_load(vcpu);
1130
1131         down_read(&vcpu->kvm->slots_lock);
1132         for (i = 0; i < msrs->nmsrs; ++i)
1133                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1134                         break;
1135         up_read(&vcpu->kvm->slots_lock);
1136
1137         vcpu_put(vcpu);
1138
1139         return i;
1140 }
1141
1142 /*
1143  * Read or write a bunch of msrs. Parameters are user addresses.
1144  *
1145  * @return number of msrs set successfully.
1146  */
1147 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1148                   int (*do_msr)(struct kvm_vcpu *vcpu,
1149                                 unsigned index, u64 *data),
1150                   int writeback)
1151 {
1152         struct kvm_msrs msrs;
1153         struct kvm_msr_entry *entries;
1154         int r, n;
1155         unsigned size;
1156
1157         r = -EFAULT;
1158         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1159                 goto out;
1160
1161         r = -E2BIG;
1162         if (msrs.nmsrs >= MAX_IO_MSRS)
1163                 goto out;
1164
1165         r = -ENOMEM;
1166         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1167         entries = vmalloc(size);
1168         if (!entries)
1169                 goto out;
1170
1171         r = -EFAULT;
1172         if (copy_from_user(entries, user_msrs->entries, size))
1173                 goto out_free;
1174
1175         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1176         if (r < 0)
1177                 goto out_free;
1178
1179         r = -EFAULT;
1180         if (writeback && copy_to_user(user_msrs->entries, entries, size))
1181                 goto out_free;
1182
1183         r = n;
1184
1185 out_free:
1186         vfree(entries);
1187 out:
1188         return r;
1189 }
1190
1191 int kvm_dev_ioctl_check_extension(long ext)
1192 {
1193         int r;
1194
1195         switch (ext) {
1196         case KVM_CAP_IRQCHIP:
1197         case KVM_CAP_HLT:
1198         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
1199         case KVM_CAP_SET_TSS_ADDR:
1200         case KVM_CAP_EXT_CPUID:
1201         case KVM_CAP_CLOCKSOURCE:
1202         case KVM_CAP_PIT:
1203         case KVM_CAP_NOP_IO_DELAY:
1204         case KVM_CAP_MP_STATE:
1205         case KVM_CAP_SYNC_MMU:
1206         case KVM_CAP_REINJECT_CONTROL:
1207         case KVM_CAP_IRQ_INJECT_STATUS:
1208         case KVM_CAP_ASSIGN_DEV_IRQ:
1209         case KVM_CAP_IRQFD:
1210         case KVM_CAP_IOEVENTFD:
1211         case KVM_CAP_PIT2:
1212         case KVM_CAP_PIT_STATE2:
1213         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1214                 r = 1;
1215                 break;
1216         case KVM_CAP_COALESCED_MMIO:
1217                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1218                 break;
1219         case KVM_CAP_VAPIC:
1220                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1221                 break;
1222         case KVM_CAP_NR_VCPUS:
1223                 r = KVM_MAX_VCPUS;
1224                 break;
1225         case KVM_CAP_NR_MEMSLOTS:
1226                 r = KVM_MEMORY_SLOTS;
1227                 break;
1228         case KVM_CAP_PV_MMU:
1229                 r = !tdp_enabled;
1230                 break;
1231         case KVM_CAP_IOMMU:
1232                 r = iommu_found();
1233                 break;
1234         case KVM_CAP_MCE:
1235                 r = KVM_MAX_MCE_BANKS;
1236                 break;
1237         default:
1238                 r = 0;
1239                 break;
1240         }
1241         return r;
1242
1243 }
1244
1245 long kvm_arch_dev_ioctl(struct file *filp,
1246                         unsigned int ioctl, unsigned long arg)
1247 {
1248         void __user *argp = (void __user *)arg;
1249         long r;
1250
1251         switch (ioctl) {
1252         case KVM_GET_MSR_INDEX_LIST: {
1253                 struct kvm_msr_list __user *user_msr_list = argp;
1254                 struct kvm_msr_list msr_list;
1255                 unsigned n;
1256
1257                 r = -EFAULT;
1258                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1259                         goto out;
1260                 n = msr_list.nmsrs;
1261                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1262                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1263                         goto out;
1264                 r = -E2BIG;
1265                 if (n < msr_list.nmsrs)
1266                         goto out;
1267                 r = -EFAULT;
1268                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1269                                  num_msrs_to_save * sizeof(u32)))
1270                         goto out;
1271                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
1272                                  &emulated_msrs,
1273                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1274                         goto out;
1275                 r = 0;
1276                 break;
1277         }
1278         case KVM_GET_SUPPORTED_CPUID: {
1279                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1280                 struct kvm_cpuid2 cpuid;
1281
1282                 r = -EFAULT;
1283                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1284                         goto out;
1285                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
1286                                                       cpuid_arg->entries);
1287                 if (r)
1288                         goto out;
1289
1290                 r = -EFAULT;
1291                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1292                         goto out;
1293                 r = 0;
1294                 break;
1295         }
1296         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1297                 u64 mce_cap;
1298
1299                 mce_cap = KVM_MCE_CAP_SUPPORTED;
1300                 r = -EFAULT;
1301                 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1302                         goto out;
1303                 r = 0;
1304                 break;
1305         }
1306         default:
1307                 r = -EINVAL;
1308         }
1309 out:
1310         return r;
1311 }
1312
1313 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1314 {
1315         kvm_x86_ops->vcpu_load(vcpu, cpu);
1316         kvm_request_guest_time_update(vcpu);
1317 }
1318
1319 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1320 {
1321         kvm_x86_ops->vcpu_put(vcpu);
1322         kvm_put_guest_fpu(vcpu);
1323 }
1324
1325 static int is_efer_nx(void)
1326 {
1327         unsigned long long efer = 0;
1328
1329         rdmsrl_safe(MSR_EFER, &efer);
1330         return efer & EFER_NX;
1331 }
1332
1333 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1334 {
1335         int i;
1336         struct kvm_cpuid_entry2 *e, *entry;
1337
1338         entry = NULL;
1339         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1340                 e = &vcpu->arch.cpuid_entries[i];
1341                 if (e->function == 0x80000001) {
1342                         entry = e;
1343                         break;
1344                 }
1345         }
1346         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
1347                 entry->edx &= ~(1 << 20);
1348                 printk(KERN_INFO "kvm: guest NX capability removed\n");
1349         }
1350 }
1351
1352 /* when an old userspace process fills a new kernel module */
1353 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1354                                     struct kvm_cpuid *cpuid,
1355                                     struct kvm_cpuid_entry __user *entries)
1356 {
1357         int r, i;
1358         struct kvm_cpuid_entry *cpuid_entries;
1359
1360         r = -E2BIG;
1361         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1362                 goto out;
1363         r = -ENOMEM;
1364         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1365         if (!cpuid_entries)
1366                 goto out;
1367         r = -EFAULT;
1368         if (copy_from_user(cpuid_entries, entries,
1369                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1370                 goto out_free;
1371         for (i = 0; i < cpuid->nent; i++) {
1372                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1373                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1374                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1375                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1376                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1377                 vcpu->arch.cpuid_entries[i].index = 0;
1378                 vcpu->arch.cpuid_entries[i].flags = 0;
1379                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1380                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1381                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1382         }
1383         vcpu->arch.cpuid_nent = cpuid->nent;
1384         cpuid_fix_nx_cap(vcpu);
1385         r = 0;
1386         kvm_apic_set_version(vcpu);
1387
1388 out_free:
1389         vfree(cpuid_entries);
1390 out:
1391         return r;
1392 }
1393
1394 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1395                                      struct kvm_cpuid2 *cpuid,
1396                                      struct kvm_cpuid_entry2 __user *entries)
1397 {
1398         int r;
1399
1400         r = -E2BIG;
1401         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1402                 goto out;
1403         r = -EFAULT;
1404         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
1405                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
1406                 goto out;
1407         vcpu->arch.cpuid_nent = cpuid->nent;
1408         kvm_apic_set_version(vcpu);
1409         return 0;
1410
1411 out:
1412         return r;
1413 }
1414
1415 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1416                                      struct kvm_cpuid2 *cpuid,
1417                                      struct kvm_cpuid_entry2 __user *entries)
1418 {
1419         int r;
1420
1421         r = -E2BIG;
1422         if (cpuid->nent < vcpu->arch.cpuid_nent)
1423                 goto out;
1424         r = -EFAULT;
1425         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1426                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
1427                 goto out;
1428         return 0;
1429
1430 out:
1431         cpuid->nent = vcpu->arch.cpuid_nent;
1432         return r;
1433 }
1434
1435 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1436                            u32 index)
1437 {
1438         entry->function = function;
1439         entry->index = index;
1440         cpuid_count(entry->function, entry->index,
1441                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1442         entry->flags = 0;
1443 }
1444
1445 #define F(x) bit(X86_FEATURE_##x)
1446
1447 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1448                          u32 index, int *nent, int maxnent)
1449 {
1450         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
1451         unsigned f_gbpages = kvm_x86_ops->gb_page_enable() ? F(GBPAGES) : 0;
1452 #ifdef CONFIG_X86_64
1453         unsigned f_lm = F(LM);
1454 #else
1455         unsigned f_lm = 0;
1456 #endif
1457
1458         /* cpuid 1.edx */
1459         const u32 kvm_supported_word0_x86_features =
1460                 F(FPU) | F(VME) | F(DE) | F(PSE) |
1461                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1462                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1463                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1464                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1465                 0 /* Reserved, DS, ACPI */ | F(MMX) |
1466                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1467                 0 /* HTT, TM, Reserved, PBE */;
1468         /* cpuid 0x80000001.edx */
1469         const u32 kvm_supported_word1_x86_features =
1470                 F(FPU) | F(VME) | F(DE) | F(PSE) |
1471                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1472                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1473                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1474                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1475                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1476                 F(FXSR) | F(FXSR_OPT) | f_gbpages | 0 /* RDTSCP */ |
1477                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1478         /* cpuid 1.ecx */
1479         const u32 kvm_supported_word4_x86_features =
1480                 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1481                 0 /* DS-CPL, VMX, SMX, EST */ |
1482                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1483                 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1484                 0 /* Reserved, DCA */ | F(XMM4_1) |
1485                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
1486                 0 /* Reserved, XSAVE, OSXSAVE */;
1487         /* cpuid 0x80000001.ecx */
1488         const u32 kvm_supported_word6_x86_features =
1489                 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1490                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1491                 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1492                 0 /* SKINIT */ | 0 /* WDT */;
1493
1494         /* all calls to cpuid_count() should be made on the same cpu */
1495         get_cpu();
1496         do_cpuid_1_ent(entry, function, index);
1497         ++*nent;
1498
1499         switch (function) {
1500         case 0:
1501                 entry->eax = min(entry->eax, (u32)0xb);
1502                 break;
1503         case 1:
1504                 entry->edx &= kvm_supported_word0_x86_features;
1505                 entry->ecx &= kvm_supported_word4_x86_features;
1506                 /* we support x2apic emulation even if host does not support
1507                  * it since we emulate x2apic in software */
1508                 entry->ecx |= F(X2APIC);
1509                 break;
1510         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1511          * may return different values. This forces us to get_cpu() before
1512          * issuing the first command, and also to emulate this annoying behavior
1513          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1514         case 2: {
1515                 int t, times = entry->eax & 0xff;
1516
1517                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1518                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
1519                 for (t = 1; t < times && *nent < maxnent; ++t) {
1520                         do_cpuid_1_ent(&entry[t], function, 0);
1521                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1522                         ++*nent;
1523                 }
1524                 break;
1525         }
1526         /* function 4 and 0xb have additional index. */
1527         case 4: {
1528                 int i, cache_type;
1529
1530                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1531                 /* read more entries until cache_type is zero */
1532                 for (i = 1; *nent < maxnent; ++i) {
1533                         cache_type = entry[i - 1].eax & 0x1f;
1534                         if (!cache_type)
1535                                 break;
1536                         do_cpuid_1_ent(&entry[i], function, i);
1537                         entry[i].flags |=
1538                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1539                         ++*nent;
1540                 }
1541                 break;
1542         }
1543         case 0xb: {
1544                 int i, level_type;
1545
1546                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1547                 /* read more entries until level_type is zero */
1548                 for (i = 1; *nent < maxnent; ++i) {
1549                         level_type = entry[i - 1].ecx & 0xff00;
1550                         if (!level_type)
1551                                 break;
1552                         do_cpuid_1_ent(&entry[i], function, i);
1553                         entry[i].flags |=
1554                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1555                         ++*nent;
1556                 }
1557                 break;
1558         }
1559         case 0x80000000:
1560                 entry->eax = min(entry->eax, 0x8000001a);
1561                 break;
1562         case 0x80000001:
1563                 entry->edx &= kvm_supported_word1_x86_features;
1564                 entry->ecx &= kvm_supported_word6_x86_features;
1565                 break;
1566         }
1567         put_cpu();
1568 }
1569
1570 #undef F
1571
1572 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1573                                      struct kvm_cpuid_entry2 __user *entries)
1574 {
1575         struct kvm_cpuid_entry2 *cpuid_entries;
1576         int limit, nent = 0, r = -E2BIG;
1577         u32 func;
1578
1579         if (cpuid->nent < 1)
1580                 goto out;
1581         r = -ENOMEM;
1582         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1583         if (!cpuid_entries)
1584                 goto out;
1585
1586         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1587         limit = cpuid_entries[0].eax;
1588         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1589                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1590                              &nent, cpuid->nent);
1591         r = -E2BIG;
1592         if (nent >= cpuid->nent)
1593                 goto out_free;
1594
1595         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1596         limit = cpuid_entries[nent - 1].eax;
1597         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1598                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1599                              &nent, cpuid->nent);
1600         r = -E2BIG;
1601         if (nent >= cpuid->nent)
1602                 goto out_free;
1603
1604         r = -EFAULT;
1605         if (copy_to_user(entries, cpuid_entries,
1606                          nent * sizeof(struct kvm_cpuid_entry2)))
1607                 goto out_free;
1608         cpuid->nent = nent;
1609         r = 0;
1610
1611 out_free:
1612         vfree(cpuid_entries);
1613 out:
1614         return r;
1615 }
1616
1617 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1618                                     struct kvm_lapic_state *s)
1619 {
1620         vcpu_load(vcpu);
1621         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
1622         vcpu_put(vcpu);
1623
1624         return 0;
1625 }
1626
1627 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1628                                     struct kvm_lapic_state *s)
1629 {
1630         vcpu_load(vcpu);
1631         memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1632         kvm_apic_post_state_restore(vcpu);
1633         update_cr8_intercept(vcpu);
1634         vcpu_put(vcpu);
1635
1636         return 0;
1637 }
1638
1639 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1640                                     struct kvm_interrupt *irq)
1641 {
1642         if (irq->irq < 0 || irq->irq >= 256)
1643                 return -EINVAL;
1644         if (irqchip_in_kernel(vcpu->kvm))
1645                 return -ENXIO;
1646         vcpu_load(vcpu);
1647
1648         kvm_queue_interrupt(vcpu, irq->irq, false);
1649
1650         vcpu_put(vcpu);
1651
1652         return 0;
1653 }
1654
1655 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1656 {
1657         vcpu_load(vcpu);
1658         kvm_inject_nmi(vcpu);
1659         vcpu_put(vcpu);
1660
1661         return 0;
1662 }
1663
1664 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1665                                            struct kvm_tpr_access_ctl *tac)
1666 {
1667         if (tac->flags)
1668                 return -EINVAL;
1669         vcpu->arch.tpr_access_reporting = !!tac->enabled;
1670         return 0;
1671 }
1672
1673 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1674                                         u64 mcg_cap)
1675 {
1676         int r;
1677         unsigned bank_num = mcg_cap & 0xff, bank;
1678
1679         r = -EINVAL;
1680         if (!bank_num)
1681                 goto out;
1682         if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
1683                 goto out;
1684         r = 0;
1685         vcpu->arch.mcg_cap = mcg_cap;
1686         /* Init IA32_MCG_CTL to all 1s */
1687         if (mcg_cap & MCG_CTL_P)
1688                 vcpu->arch.mcg_ctl = ~(u64)0;
1689         /* Init IA32_MCi_CTL to all 1s */
1690         for (bank = 0; bank < bank_num; bank++)
1691                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
1692 out:
1693         return r;
1694 }
1695
1696 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
1697                                       struct kvm_x86_mce *mce)
1698 {
1699         u64 mcg_cap = vcpu->arch.mcg_cap;
1700         unsigned bank_num = mcg_cap & 0xff;
1701         u64 *banks = vcpu->arch.mce_banks;
1702
1703         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
1704                 return -EINVAL;
1705         /*
1706          * if IA32_MCG_CTL is not all 1s, the uncorrected error
1707          * reporting is disabled
1708          */
1709         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1710             vcpu->arch.mcg_ctl != ~(u64)0)
1711                 return 0;
1712         banks += 4 * mce->bank;
1713         /*
1714          * if IA32_MCi_CTL is not all 1s, the uncorrected error
1715          * reporting is disabled for the bank
1716          */
1717         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
1718                 return 0;
1719         if (mce->status & MCI_STATUS_UC) {
1720                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
1721                     !(vcpu->arch.cr4 & X86_CR4_MCE)) {
1722                         printk(KERN_DEBUG "kvm: set_mce: "
1723                                "injects mce exception while "
1724                                "previous one is in progress!\n");
1725                         set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
1726                         return 0;
1727                 }
1728                 if (banks[1] & MCI_STATUS_VAL)
1729                         mce->status |= MCI_STATUS_OVER;
1730                 banks[2] = mce->addr;
1731                 banks[3] = mce->misc;
1732                 vcpu->arch.mcg_status = mce->mcg_status;
1733                 banks[1] = mce->status;
1734                 kvm_queue_exception(vcpu, MC_VECTOR);
1735         } else if (!(banks[1] & MCI_STATUS_VAL)
1736                    || !(banks[1] & MCI_STATUS_UC)) {
1737                 if (banks[1] & MCI_STATUS_VAL)
1738                         mce->status |= MCI_STATUS_OVER;
1739                 banks[2] = mce->addr;
1740                 banks[3] = mce->misc;
1741                 banks[1] = mce->status;
1742         } else
1743                 banks[1] |= MCI_STATUS_OVER;
1744         return 0;
1745 }
1746
1747 long kvm_arch_vcpu_ioctl(struct file *filp,
1748                          unsigned int ioctl, unsigned long arg)
1749 {
1750         struct kvm_vcpu *vcpu = filp->private_data;
1751         void __user *argp = (void __user *)arg;
1752         int r;
1753         struct kvm_lapic_state *lapic = NULL;
1754
1755         switch (ioctl) {
1756         case KVM_GET_LAPIC: {
1757                 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1758
1759                 r = -ENOMEM;
1760                 if (!lapic)
1761                         goto out;
1762                 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
1763                 if (r)
1764                         goto out;
1765                 r = -EFAULT;
1766                 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
1767                         goto out;
1768                 r = 0;
1769                 break;
1770         }
1771         case KVM_SET_LAPIC: {
1772                 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1773                 r = -ENOMEM;
1774                 if (!lapic)
1775                         goto out;
1776                 r = -EFAULT;
1777                 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
1778                         goto out;
1779                 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
1780                 if (r)
1781                         goto out;
1782                 r = 0;
1783                 break;
1784         }
1785         case KVM_INTERRUPT: {
1786                 struct kvm_interrupt irq;
1787
1788                 r = -EFAULT;
1789                 if (copy_from_user(&irq, argp, sizeof irq))
1790                         goto out;
1791                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1792                 if (r)
1793                         goto out;
1794                 r = 0;
1795                 break;
1796         }
1797         case KVM_NMI: {
1798                 r = kvm_vcpu_ioctl_nmi(vcpu);
1799                 if (r)
1800                         goto out;
1801                 r = 0;
1802                 break;
1803         }
1804         case KVM_SET_CPUID: {
1805                 struct kvm_cpuid __user *cpuid_arg = argp;
1806                 struct kvm_cpuid cpuid;
1807
1808                 r = -EFAULT;
1809                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1810                         goto out;
1811                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1812                 if (r)
1813                         goto out;
1814                 break;
1815         }
1816         case KVM_SET_CPUID2: {
1817                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1818                 struct kvm_cpuid2 cpuid;
1819
1820                 r = -EFAULT;
1821                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1822                         goto out;
1823                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1824                                               cpuid_arg->entries);
1825                 if (r)
1826                         goto out;
1827                 break;
1828         }
1829         case KVM_GET_CPUID2: {
1830                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1831                 struct kvm_cpuid2 cpuid;
1832
1833                 r = -EFAULT;
1834                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1835                         goto out;
1836                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1837                                               cpuid_arg->entries);
1838                 if (r)
1839                         goto out;
1840                 r = -EFAULT;
1841                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1842                         goto out;
1843                 r = 0;
1844                 break;
1845         }
1846         case KVM_GET_MSRS:
1847                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1848                 break;
1849         case KVM_SET_MSRS:
1850                 r = msr_io(vcpu, argp, do_set_msr, 0);
1851                 break;
1852         case KVM_TPR_ACCESS_REPORTING: {
1853                 struct kvm_tpr_access_ctl tac;
1854
1855                 r = -EFAULT;
1856                 if (copy_from_user(&tac, argp, sizeof tac))
1857                         goto out;
1858                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1859                 if (r)
1860                         goto out;
1861                 r = -EFAULT;
1862                 if (copy_to_user(argp, &tac, sizeof tac))
1863                         goto out;
1864                 r = 0;
1865                 break;
1866         };
1867         case KVM_SET_VAPIC_ADDR: {
1868                 struct kvm_vapic_addr va;
1869
1870                 r = -EINVAL;
1871                 if (!irqchip_in_kernel(vcpu->kvm))
1872                         goto out;
1873                 r = -EFAULT;
1874                 if (copy_from_user(&va, argp, sizeof va))
1875                         goto out;
1876                 r = 0;
1877                 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1878                 break;
1879         }
1880         case KVM_X86_SETUP_MCE: {
1881                 u64 mcg_cap;
1882
1883                 r = -EFAULT;
1884                 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
1885                         goto out;
1886                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
1887                 break;
1888         }
1889         case KVM_X86_SET_MCE: {
1890                 struct kvm_x86_mce mce;
1891
1892                 r = -EFAULT;
1893                 if (copy_from_user(&mce, argp, sizeof mce))
1894                         goto out;
1895                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
1896                 break;
1897         }
1898         default:
1899                 r = -EINVAL;
1900         }
1901 out:
1902         kfree(lapic);
1903         return r;
1904 }
1905
1906 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1907 {
1908         int ret;
1909
1910         if (addr > (unsigned int)(-3 * PAGE_SIZE))
1911                 return -1;
1912         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1913         return ret;
1914 }
1915
1916 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
1917                                               u64 ident_addr)
1918 {
1919         kvm->arch.ept_identity_map_addr = ident_addr;
1920         return 0;
1921 }
1922
1923 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1924                                           u32 kvm_nr_mmu_pages)
1925 {
1926         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1927                 return -EINVAL;
1928
1929         down_write(&kvm->slots_lock);
1930         spin_lock(&kvm->mmu_lock);
1931
1932         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
1933         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1934
1935         spin_unlock(&kvm->mmu_lock);
1936         up_write(&kvm->slots_lock);
1937         return 0;
1938 }
1939
1940 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1941 {
1942         return kvm->arch.n_alloc_mmu_pages;
1943 }
1944
1945 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1946 {
1947         int i;
1948         struct kvm_mem_alias *alias;
1949
1950         for (i = 0; i < kvm->arch.naliases; ++i) {
1951                 alias = &kvm->arch.aliases[i];
1952                 if (gfn >= alias->base_gfn
1953                     && gfn < alias->base_gfn + alias->npages)
1954                         return alias->target_gfn + gfn - alias->base_gfn;
1955         }
1956         return gfn;
1957 }
1958
1959 /*
1960  * Set a new alias region.  Aliases map a portion of physical memory into
1961  * another portion.  This is useful for memory windows, for example the PC
1962  * VGA region.
1963  */
1964 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1965                                          struct kvm_memory_alias *alias)
1966 {
1967         int r, n;
1968         struct kvm_mem_alias *p;
1969
1970         r = -EINVAL;
1971         /* General sanity checks */
1972         if (alias->memory_size & (PAGE_SIZE - 1))
1973                 goto out;
1974         if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1975                 goto out;
1976         if (alias->slot >= KVM_ALIAS_SLOTS)
1977                 goto out;
1978         if (alias->guest_phys_addr + alias->memory_size
1979             < alias->guest_phys_addr)
1980                 goto out;
1981         if (alias->target_phys_addr + alias->memory_size
1982             < alias->target_phys_addr)
1983                 goto out;
1984
1985         down_write(&kvm->slots_lock);
1986         spin_lock(&kvm->mmu_lock);
1987
1988         p = &kvm->arch.aliases[alias->slot];
1989         p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1990         p->npages = alias->memory_size >> PAGE_SHIFT;
1991         p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1992
1993         for (n = KVM_ALIAS_SLOTS; n > 0; --n)
1994                 if (kvm->arch.aliases[n - 1].npages)
1995                         break;
1996         kvm->arch.naliases = n;
1997
1998         spin_unlock(&kvm->mmu_lock);
1999         kvm_mmu_zap_all(kvm);
2000
2001         up_write(&kvm->slots_lock);
2002
2003         return 0;
2004
2005 out:
2006         return r;
2007 }
2008
2009 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2010 {
2011         int r;
2012
2013         r = 0;
2014         switch (chip->chip_id) {
2015         case KVM_IRQCHIP_PIC_MASTER:
2016                 memcpy(&chip->chip.pic,
2017                         &pic_irqchip(kvm)->pics[0],
2018                         sizeof(struct kvm_pic_state));
2019                 break;
2020         case KVM_IRQCHIP_PIC_SLAVE:
2021                 memcpy(&chip->chip.pic,
2022                         &pic_irqchip(kvm)->pics[1],
2023                         sizeof(struct kvm_pic_state));
2024                 break;
2025         case KVM_IRQCHIP_IOAPIC:
2026                 memcpy(&chip->chip.ioapic,
2027                         ioapic_irqchip(kvm),
2028                         sizeof(struct kvm_ioapic_state));
2029                 break;
2030         default:
2031                 r = -EINVAL;
2032                 break;
2033         }
2034         return r;
2035 }
2036
2037 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2038 {
2039         int r;
2040
2041         r = 0;
2042         switch (chip->chip_id) {
2043         case KVM_IRQCHIP_PIC_MASTER:
2044                 spin_lock(&pic_irqchip(kvm)->lock);
2045                 memcpy(&pic_irqchip(kvm)->pics[0],
2046                         &chip->chip.pic,
2047                         sizeof(struct kvm_pic_state));
2048                 spin_unlock(&pic_irqchip(kvm)->lock);
2049                 break;
2050         case KVM_IRQCHIP_PIC_SLAVE:
2051                 spin_lock(&pic_irqchip(kvm)->lock);
2052                 memcpy(&pic_irqchip(kvm)->pics[1],
2053                         &chip->chip.pic,
2054                         sizeof(struct kvm_pic_state));
2055                 spin_unlock(&pic_irqchip(kvm)->lock);
2056                 break;
2057         case KVM_IRQCHIP_IOAPIC:
2058                 mutex_lock(&kvm->irq_lock);
2059                 memcpy(ioapic_irqchip(kvm),
2060                         &chip->chip.ioapic,
2061                         sizeof(struct kvm_ioapic_state));
2062                 mutex_unlock(&kvm->irq_lock);
2063                 break;
2064         default:
2065                 r = -EINVAL;
2066                 break;
2067         }
2068         kvm_pic_update_irq(pic_irqchip(kvm));
2069         return r;
2070 }
2071
2072 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2073 {
2074         int r = 0;
2075
2076         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2077         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
2078         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2079         return r;
2080 }
2081
2082 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2083 {
2084         int r = 0;
2085
2086         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2087         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
2088         kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2089         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2090         return r;
2091 }
2092
2093 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2094 {
2095         int r = 0;
2096
2097         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2098         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2099                 sizeof(ps->channels));
2100         ps->flags = kvm->arch.vpit->pit_state.flags;
2101         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2102         return r;
2103 }
2104
2105 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2106 {
2107         int r = 0, start = 0;
2108         u32 prev_legacy, cur_legacy;
2109         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2110         prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2111         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2112         if (!prev_legacy && cur_legacy)
2113                 start = 1;
2114         memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2115                sizeof(kvm->arch.vpit->pit_state.channels));
2116         kvm->arch.vpit->pit_state.flags = ps->flags;
2117         kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
2118         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2119         return r;
2120 }
2121
2122 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2123                                  struct kvm_reinject_control *control)
2124 {
2125         if (!kvm->arch.vpit)
2126                 return -ENXIO;
2127         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2128         kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
2129         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2130         return 0;
2131 }
2132
2133 /*
2134  * Get (and clear) the dirty memory log for a memory slot.
2135  */
2136 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2137                                       struct kvm_dirty_log *log)
2138 {
2139         int r;
2140         int n;
2141         struct kvm_memory_slot *memslot;
2142         int is_dirty = 0;
2143
2144         down_write(&kvm->slots_lock);
2145
2146         r = kvm_get_dirty_log(kvm, log, &is_dirty);
2147         if (r)
2148                 goto out;
2149
2150         /* If nothing is dirty, don't bother messing with page tables. */
2151         if (is_dirty) {
2152                 spin_lock(&kvm->mmu_lock);
2153                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2154                 spin_unlock(&kvm->mmu_lock);
2155                 memslot = &kvm->memslots[log->slot];
2156                 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
2157                 memset(memslot->dirty_bitmap, 0, n);
2158         }
2159         r = 0;
2160 out:
2161         up_write(&kvm->slots_lock);
2162         return r;
2163 }
2164
2165 long kvm_arch_vm_ioctl(struct file *filp,
2166                        unsigned int ioctl, unsigned long arg)
2167 {
2168         struct kvm *kvm = filp->private_data;
2169         void __user *argp = (void __user *)arg;
2170         int r = -EINVAL;
2171         /*
2172          * This union makes it completely explicit to gcc-3.x
2173          * that these two variables' stack usage should be
2174          * combined, not added together.
2175          */
2176         union {
2177                 struct kvm_pit_state ps;
2178                 struct kvm_pit_state2 ps2;
2179                 struct kvm_memory_alias alias;
2180                 struct kvm_pit_config pit_config;
2181         } u;
2182
2183         switch (ioctl) {
2184         case KVM_SET_TSS_ADDR:
2185                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2186                 if (r < 0)
2187                         goto out;
2188                 break;
2189         case KVM_SET_IDENTITY_MAP_ADDR: {
2190                 u64 ident_addr;
2191
2192                 r = -EFAULT;
2193                 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
2194                         goto out;
2195                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
2196                 if (r < 0)
2197                         goto out;
2198                 break;
2199         }
2200         case KVM_SET_MEMORY_REGION: {
2201                 struct kvm_memory_region kvm_mem;
2202                 struct kvm_userspace_memory_region kvm_userspace_mem;
2203
2204                 r = -EFAULT;
2205                 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2206                         goto out;
2207                 kvm_userspace_mem.slot = kvm_mem.slot;
2208                 kvm_userspace_mem.flags = kvm_mem.flags;
2209                 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
2210                 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
2211                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
2212                 if (r)
2213                         goto out;
2214                 break;
2215         }
2216         case KVM_SET_NR_MMU_PAGES:
2217                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
2218                 if (r)
2219                         goto out;
2220                 break;
2221         case KVM_GET_NR_MMU_PAGES:
2222                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
2223                 break;
2224         case KVM_SET_MEMORY_ALIAS:
2225                 r = -EFAULT;
2226                 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
2227                         goto out;
2228                 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
2229                 if (r)
2230                         goto out;
2231                 break;
2232         case KVM_CREATE_IRQCHIP:
2233                 r = -ENOMEM;
2234                 kvm->arch.vpic = kvm_create_pic(kvm);
2235                 if (kvm->arch.vpic) {
2236                         r = kvm_ioapic_init(kvm);
2237                         if (r) {
2238                                 kfree(kvm->arch.vpic);
2239                                 kvm->arch.vpic = NULL;
2240                                 goto out;
2241                         }
2242                 } else
2243                         goto out;
2244                 r = kvm_setup_default_irq_routing(kvm);
2245                 if (r) {
2246                         kfree(kvm->arch.vpic);
2247                         kfree(kvm->arch.vioapic);
2248                         goto out;
2249                 }
2250                 break;
2251         case KVM_CREATE_PIT:
2252                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
2253                 goto create_pit;
2254         case KVM_CREATE_PIT2:
2255                 r = -EFAULT;
2256                 if (copy_from_user(&u.pit_config, argp,
2257                                    sizeof(struct kvm_pit_config)))
2258                         goto out;
2259         create_pit:
2260                 down_write(&kvm->slots_lock);
2261                 r = -EEXIST;
2262                 if (kvm->arch.vpit)
2263                         goto create_pit_unlock;
2264                 r = -ENOMEM;
2265                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
2266                 if (kvm->arch.vpit)
2267                         r = 0;
2268         create_pit_unlock:
2269                 up_write(&kvm->slots_lock);
2270                 break;
2271         case KVM_IRQ_LINE_STATUS:
2272         case KVM_IRQ_LINE: {
2273                 struct kvm_irq_level irq_event;
2274
2275                 r = -EFAULT;
2276                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2277                         goto out;
2278                 if (irqchip_in_kernel(kvm)) {
2279                         __s32 status;
2280                         mutex_lock(&kvm->irq_lock);
2281                         status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2282                                         irq_event.irq, irq_event.level);
2283                         mutex_unlock(&kvm->irq_lock);
2284                         if (ioctl == KVM_IRQ_LINE_STATUS) {
2285                                 irq_event.status = status;
2286                                 if (copy_to_user(argp, &irq_event,
2287                                                         sizeof irq_event))
2288                                         goto out;
2289                         }
2290                         r = 0;
2291                 }
2292                 break;
2293         }
2294         case KVM_GET_IRQCHIP: {
2295                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
2296                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
2297
2298                 r = -ENOMEM;
2299                 if (!chip)
2300                         goto out;
2301                 r = -EFAULT;
2302                 if (copy_from_user(chip, argp, sizeof *chip))
2303                         goto get_irqchip_out;
2304                 r = -ENXIO;
2305                 if (!irqchip_in_kernel(kvm))
2306                         goto get_irqchip_out;
2307                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
2308                 if (r)
2309                         goto get_irqchip_out;
2310                 r = -EFAULT;
2311                 if (copy_to_user(argp, chip, sizeof *chip))
2312                         goto get_irqchip_out;
2313                 r = 0;
2314         get_irqchip_out:
2315                 kfree(chip);
2316                 if (r)
2317                         goto out;
2318                 break;
2319         }
2320         case KVM_SET_IRQCHIP: {
2321                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
2322                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
2323
2324                 r = -ENOMEM;
2325                 if (!chip)
2326                         goto out;
2327                 r = -EFAULT;
2328                 if (copy_from_user(chip, argp, sizeof *chip))
2329                         goto set_irqchip_out;
2330                 r = -ENXIO;
2331                 if (!irqchip_in_kernel(kvm))
2332                         goto set_irqchip_out;
2333                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
2334                 if (r)
2335                         goto set_irqchip_out;
2336                 r = 0;
2337         set_irqchip_out:
2338                 kfree(chip);
2339                 if (r)
2340                         goto out;
2341                 break;
2342         }
2343         case KVM_GET_PIT: {
2344                 r = -EFAULT;
2345                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
2346                         goto out;
2347                 r = -ENXIO;
2348                 if (!kvm->arch.vpit)
2349                         goto out;
2350                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
2351                 if (r)
2352                         goto out;
2353                 r = -EFAULT;
2354                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
2355                         goto out;
2356                 r = 0;
2357                 break;
2358         }
2359         case KVM_SET_PIT: {
2360                 r = -EFAULT;
2361                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
2362                         goto out;
2363                 r = -ENXIO;
2364                 if (!kvm->arch.vpit)
2365                         goto out;
2366                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
2367                 if (r)
2368                         goto out;
2369                 r = 0;
2370                 break;
2371         }
2372         case KVM_GET_PIT2: {
2373                 r = -ENXIO;
2374                 if (!kvm->arch.vpit)
2375                         goto out;
2376                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
2377                 if (r)
2378                         goto out;
2379                 r = -EFAULT;
2380                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
2381                         goto out;
2382                 r = 0;
2383                 break;
2384         }
2385         case KVM_SET_PIT2: {
2386                 r = -EFAULT;
2387                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
2388                         goto out;
2389                 r = -ENXIO;
2390                 if (!kvm->arch.vpit)
2391                         goto out;
2392                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
2393                 if (r)
2394                         goto out;
2395                 r = 0;
2396                 break;
2397         }
2398         case KVM_REINJECT_CONTROL: {
2399                 struct kvm_reinject_control control;
2400                 r =  -EFAULT;
2401                 if (copy_from_user(&control, argp, sizeof(control)))
2402                         goto out;
2403                 r = kvm_vm_ioctl_reinject(kvm, &control);
2404                 if (r)
2405                         goto out;
2406                 r = 0;
2407                 break;
2408         }
2409         default:
2410                 ;
2411         }
2412 out:
2413         return r;
2414 }
2415
2416 static void kvm_init_msr_list(void)
2417 {
2418         u32 dummy[2];
2419         unsigned i, j;
2420
2421         for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2422                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2423                         continue;
2424                 if (j < i)
2425                         msrs_to_save[j] = msrs_to_save[i];
2426                 j++;
2427         }
2428         num_msrs_to_save = j;
2429 }
2430
2431 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
2432                            const void *v)
2433 {
2434         if (vcpu->arch.apic &&
2435             !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
2436                 return 0;
2437
2438         return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
2439 }
2440
2441 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
2442 {
2443         if (vcpu->arch.apic &&
2444             !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
2445                 return 0;
2446
2447         return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
2448 }
2449
2450 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2451                                struct kvm_vcpu *vcpu)
2452 {
2453         void *data = val;
2454         int r = X86EMUL_CONTINUE;
2455
2456         while (bytes) {
2457                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2458                 unsigned offset = addr & (PAGE_SIZE-1);
2459                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
2460                 int ret;
2461
2462                 if (gpa == UNMAPPED_GVA) {
2463                         r = X86EMUL_PROPAGATE_FAULT;
2464                         goto out;
2465                 }
2466                 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
2467                 if (ret < 0) {
2468                         r = X86EMUL_UNHANDLEABLE;
2469                         goto out;
2470                 }
2471
2472                 bytes -= toread;
2473                 data += toread;
2474                 addr += toread;
2475         }
2476 out:
2477         return r;
2478 }
2479
2480 static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2481                                 struct kvm_vcpu *vcpu)
2482 {
2483         void *data = val;
2484         int r = X86EMUL_CONTINUE;
2485
2486         while (bytes) {
2487                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2488                 unsigned offset = addr & (PAGE_SIZE-1);
2489                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2490                 int ret;
2491
2492                 if (gpa == UNMAPPED_GVA) {
2493                         r = X86EMUL_PROPAGATE_FAULT;
2494                         goto out;
2495                 }
2496                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2497                 if (ret < 0) {
2498                         r = X86EMUL_UNHANDLEABLE;
2499                         goto out;
2500                 }
2501
2502                 bytes -= towrite;
2503                 data += towrite;
2504                 addr += towrite;
2505         }
2506 out:
2507         return r;
2508 }
2509
2510
2511 static int emulator_read_emulated(unsigned long addr,
2512                                   void *val,
2513                                   unsigned int bytes,
2514                                   struct kvm_vcpu *vcpu)
2515 {
2516         gpa_t                 gpa;
2517
2518         if (vcpu->mmio_read_completed) {
2519                 memcpy(val, vcpu->mmio_data, bytes);
2520                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
2521                                vcpu->mmio_phys_addr, *(u64 *)val);
2522                 vcpu->mmio_read_completed = 0;
2523                 return X86EMUL_CONTINUE;
2524         }
2525
2526         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2527
2528         /* For APIC access vmexit */
2529         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2530                 goto mmio;
2531
2532         if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2533                                 == X86EMUL_CONTINUE)
2534                 return X86EMUL_CONTINUE;
2535         if (gpa == UNMAPPED_GVA)
2536                 return X86EMUL_PROPAGATE_FAULT;
2537
2538 mmio:
2539         /*
2540          * Is this MMIO handled locally?
2541          */
2542         if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
2543                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
2544                 return X86EMUL_CONTINUE;
2545         }
2546
2547         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
2548
2549         vcpu->mmio_needed = 1;
2550         vcpu->mmio_phys_addr = gpa;
2551         vcpu->mmio_size = bytes;
2552         vcpu->mmio_is_write = 0;
2553
2554         return X86EMUL_UNHANDLEABLE;
2555 }
2556
2557 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
2558                           const void *val, int bytes)
2559 {
2560         int ret;
2561
2562         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
2563         if (ret < 0)
2564                 return 0;
2565         kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
2566         return 1;
2567 }
2568
2569 static int emulator_write_emulated_onepage(unsigned long addr,
2570                                            const void *val,
2571                                            unsigned int bytes,
2572                                            struct kvm_vcpu *vcpu)
2573 {
2574         gpa_t                 gpa;
2575
2576         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2577
2578         if (gpa == UNMAPPED_GVA) {
2579                 kvm_inject_page_fault(vcpu, addr, 2);
2580                 return X86EMUL_PROPAGATE_FAULT;
2581         }
2582
2583         /* For APIC access vmexit */
2584         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2585                 goto mmio;
2586
2587         if (emulator_write_phys(vcpu, gpa, val, bytes))
2588                 return X86EMUL_CONTINUE;
2589
2590 mmio:
2591         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
2592         /*
2593          * Is this MMIO handled locally?
2594          */
2595         if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
2596                 return X86EMUL_CONTINUE;
2597
2598         vcpu->mmio_needed = 1;
2599         vcpu->mmio_phys_addr = gpa;
2600         vcpu->mmio_size = bytes;
2601         vcpu->mmio_is_write = 1;
2602         memcpy(vcpu->mmio_data, val, bytes);
2603
2604         return X86EMUL_CONTINUE;
2605 }
2606
2607 int emulator_write_emulated(unsigned long addr,
2608                                    const void *val,
2609                                    unsigned int bytes,
2610                                    struct kvm_vcpu *vcpu)
2611 {
2612         /* Crossing a page boundary? */
2613         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2614                 int rc, now;
2615
2616                 now = -addr & ~PAGE_MASK;
2617                 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2618                 if (rc != X86EMUL_CONTINUE)
2619                         return rc;
2620                 addr += now;
2621                 val += now;
2622                 bytes -= now;
2623         }
2624         return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2625 }
2626 EXPORT_SYMBOL_GPL(emulator_write_emulated);
2627
2628 static int emulator_cmpxchg_emulated(unsigned long addr,
2629                                      const void *old,
2630                                      const void *new,
2631                                      unsigned int bytes,
2632                                      struct kvm_vcpu *vcpu)
2633 {
2634         static int reported;
2635
2636         if (!reported) {
2637                 reported = 1;
2638                 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2639         }
2640 #ifndef CONFIG_X86_64
2641         /* guests cmpxchg8b have to be emulated atomically */
2642         if (bytes == 8) {
2643                 gpa_t gpa;
2644                 struct page *page;
2645                 char *kaddr;
2646                 u64 val;
2647
2648                 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2649
2650                 if (gpa == UNMAPPED_GVA ||
2651                    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2652                         goto emul_write;
2653
2654                 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2655                         goto emul_write;
2656
2657                 val = *(u64 *)new;
2658
2659                 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2660
2661                 kaddr = kmap_atomic(page, KM_USER0);
2662                 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2663                 kunmap_atomic(kaddr, KM_USER0);
2664                 kvm_release_page_dirty(page);
2665         }
2666 emul_write:
2667 #endif
2668
2669         return emulator_write_emulated(addr, new, bytes, vcpu);
2670 }
2671
2672 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2673 {
2674         return kvm_x86_ops->get_segment_base(vcpu, seg);
2675 }
2676
2677 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2678 {
2679         kvm_mmu_invlpg(vcpu, address);
2680         return X86EMUL_CONTINUE;
2681 }
2682
2683 int emulate_clts(struct kvm_vcpu *vcpu)
2684 {
2685         kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
2686         return X86EMUL_CONTINUE;
2687 }
2688
2689 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2690 {
2691         struct kvm_vcpu *vcpu = ctxt->vcpu;
2692
2693         switch (dr) {
2694         case 0 ... 3:
2695                 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2696                 return X86EMUL_CONTINUE;
2697         default:
2698                 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
2699                 return X86EMUL_UNHANDLEABLE;
2700         }
2701 }
2702
2703 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2704 {
2705         unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2706         int exception;
2707
2708         kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2709         if (exception) {
2710                 /* FIXME: better handling */
2711                 return X86EMUL_UNHANDLEABLE;
2712         }
2713         return X86EMUL_CONTINUE;
2714 }
2715
2716 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2717 {
2718         u8 opcodes[4];
2719         unsigned long rip = kvm_rip_read(vcpu);
2720         unsigned long rip_linear;
2721
2722         if (!printk_ratelimit())
2723                 return;
2724
2725         rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2726
2727         kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
2728
2729         printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2730                context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
2731 }
2732 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2733
2734 static struct x86_emulate_ops emulate_ops = {
2735         .read_std            = kvm_read_guest_virt,
2736         .read_emulated       = emulator_read_emulated,
2737         .write_emulated      = emulator_write_emulated,
2738         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
2739 };
2740
2741 static void cache_all_regs(struct kvm_vcpu *vcpu)
2742 {
2743         kvm_register_read(vcpu, VCPU_REGS_RAX);
2744         kvm_register_read(vcpu, VCPU_REGS_RSP);
2745         kvm_register_read(vcpu, VCPU_REGS_RIP);
2746         vcpu->arch.regs_dirty = ~0;
2747 }
2748
2749 int emulate_instruction(struct kvm_vcpu *vcpu,
2750                         struct kvm_run *run,
2751                         unsigned long cr2,
2752                         u16 error_code,
2753                         int emulation_type)
2754 {
2755         int r, shadow_mask;
2756         struct decode_cache *c;
2757
2758         kvm_clear_exception_queue(vcpu);
2759         vcpu->arch.mmio_fault_cr2 = cr2;
2760         /*
2761          * TODO: fix emulate.c to use guest_read/write_register
2762          * instead of direct ->regs accesses, can save hundred cycles
2763          * on Intel for instructions that don't read/change RSP, for
2764          * for example.
2765          */
2766         cache_all_regs(vcpu);
2767
2768         vcpu->mmio_is_write = 0;
2769         vcpu->arch.pio.string = 0;
2770
2771         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
2772                 int cs_db, cs_l;
2773                 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2774
2775                 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2776                 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2777                 vcpu->arch.emulate_ctxt.mode =
2778                         (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
2779                         ? X86EMUL_MODE_REAL : cs_l
2780                         ? X86EMUL_MODE_PROT64 : cs_db
2781                         ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2782
2783                 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
2784
2785                 /* Only allow emulation of specific instructions on #UD
2786                  * (namely VMMCALL, sysenter, sysexit, syscall)*/
2787                 c = &vcpu->arch.emulate_ctxt.decode;
2788                 if (emulation_type & EMULTYPE_TRAP_UD) {
2789                         if (!c->twobyte)
2790                                 return EMULATE_FAIL;
2791                         switch (c->b) {
2792                         case 0x01: /* VMMCALL */
2793                                 if (c->modrm_mod != 3 || c->modrm_rm != 1)
2794                                         return EMULATE_FAIL;
2795                                 break;
2796                         case 0x34: /* sysenter */
2797                         case 0x35: /* sysexit */
2798                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
2799                                         return EMULATE_FAIL;
2800                                 break;
2801                         case 0x05: /* syscall */
2802                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
2803                                         return EMULATE_FAIL;
2804                                 break;
2805                         default:
2806                                 return EMULATE_FAIL;
2807                         }
2808
2809                         if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
2810                                 return EMULATE_FAIL;
2811                 }
2812
2813                 ++vcpu->stat.insn_emulation;
2814                 if (r)  {
2815                         ++vcpu->stat.insn_emulation_fail;
2816                         if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2817                                 return EMULATE_DONE;
2818                         return EMULATE_FAIL;
2819                 }
2820         }
2821
2822         if (emulation_type & EMULTYPE_SKIP) {
2823                 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
2824                 return EMULATE_DONE;
2825         }
2826
2827         r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
2828         shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
2829
2830         if (r == 0)
2831                 kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
2832
2833         if (vcpu->arch.pio.string)
2834                 return EMULATE_DO_MMIO;
2835
2836         if ((r || vcpu->mmio_is_write) && run) {
2837                 run->exit_reason = KVM_EXIT_MMIO;
2838                 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2839                 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2840                 run->mmio.len = vcpu->mmio_size;
2841                 run->mmio.is_write = vcpu->mmio_is_write;
2842         }
2843
2844         if (r) {
2845                 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2846                         return EMULATE_DONE;
2847                 if (!vcpu->mmio_needed) {
2848                         kvm_report_emulation_failure(vcpu, "mmio");
2849                         return EMULATE_FAIL;
2850                 }
2851                 return EMULATE_DO_MMIO;
2852         }
2853
2854         kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
2855
2856         if (vcpu->mmio_is_write) {
2857                 vcpu->mmio_needed = 0;
2858                 return EMULATE_DO_MMIO;
2859         }
2860
2861         return EMULATE_DONE;
2862 }
2863 EXPORT_SYMBOL_GPL(emulate_instruction);
2864
2865 static int pio_copy_data(struct kvm_vcpu *vcpu)
2866 {
2867         void *p = vcpu->arch.pio_data;
2868         gva_t q = vcpu->arch.pio.guest_gva;
2869         unsigned bytes;
2870         int ret;
2871
2872         bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2873         if (vcpu->arch.pio.in)
2874                 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
2875         else
2876                 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2877         return ret;
2878 }
2879
2880 int complete_pio(struct kvm_vcpu *vcpu)
2881 {
2882         struct kvm_pio_request *io = &vcpu->arch.pio;
2883         long delta;
2884         int r;
2885         unsigned long val;
2886
2887         if (!io->string) {
2888                 if (io->in) {
2889                         val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2890                         memcpy(&val, vcpu->arch.pio_data, io->size);
2891                         kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2892                 }
2893         } else {
2894                 if (io->in) {
2895                         r = pio_copy_data(vcpu);
2896                         if (r)
2897                                 return r;
2898                 }
2899
2900                 delta = 1;
2901                 if (io->rep) {
2902                         delta *= io->cur_count;
2903                         /*
2904                          * The size of the register should really depend on
2905                          * current address size.
2906                          */
2907                         val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2908                         val -= delta;
2909                         kvm_register_write(vcpu, VCPU_REGS_RCX, val);
2910                 }
2911                 if (io->down)
2912                         delta = -delta;
2913                 delta *= io->size;
2914                 if (io->in) {
2915                         val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2916                         val += delta;
2917                         kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2918                 } else {
2919                         val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2920                         val += delta;
2921                         kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2922                 }
2923         }
2924
2925         io->count -= io->cur_count;
2926         io->cur_count = 0;
2927
2928         return 0;
2929 }
2930
2931 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
2932 {
2933         /* TODO: String I/O for in kernel device */
2934         int r;
2935
2936         if (vcpu->arch.pio.in)
2937                 r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
2938                                     vcpu->arch.pio.size, pd);
2939         else
2940                 r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
2941                                      vcpu->arch.pio.size, pd);
2942         return r;
2943 }
2944
2945 static int pio_string_write(struct kvm_vcpu *vcpu)
2946 {
2947         struct kvm_pio_request *io = &vcpu->arch.pio;
2948         void *pd = vcpu->arch.pio_data;
2949         int i, r = 0;
2950
2951         for (i = 0; i < io->cur_count; i++) {
2952                 if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
2953                                      io->port, io->size, pd)) {
2954                         r = -EOPNOTSUPP;
2955                         break;
2956                 }
2957                 pd += io->size;
2958         }
2959         return r;
2960 }
2961
2962 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2963                   int size, unsigned port)
2964 {
2965         unsigned long val;
2966
2967         vcpu->run->exit_reason = KVM_EXIT_IO;
2968         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2969         vcpu->run->io.size = vcpu->arch.pio.size = size;
2970         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2971         vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2972         vcpu->run->io.port = vcpu->arch.pio.port = port;
2973         vcpu->arch.pio.in = in;
2974         vcpu->arch.pio.string = 0;
2975         vcpu->arch.pio.down = 0;
2976         vcpu->arch.pio.rep = 0;
2977
2978         trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
2979                       size, 1);
2980
2981         val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2982         memcpy(vcpu->arch.pio_data, &val, 4);
2983
2984         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
2985                 complete_pio(vcpu);
2986                 return 1;
2987         }
2988         return 0;
2989 }
2990 EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2991
2992 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2993                   int size, unsigned long count, int down,
2994                   gva_t address, int rep, unsigned port)
2995 {
2996         unsigned now, in_page;
2997         int ret = 0;
2998
2999         vcpu->run->exit_reason = KVM_EXIT_IO;
3000         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
3001         vcpu->run->io.size = vcpu->arch.pio.size = size;
3002         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3003         vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
3004         vcpu->run->io.port = vcpu->arch.pio.port = port;
3005         vcpu->arch.pio.in = in;
3006         vcpu->arch.pio.string = 1;
3007         vcpu->arch.pio.down = down;
3008         vcpu->arch.pio.rep = rep;
3009
3010         trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
3011                       size, count);
3012
3013         if (!count) {
3014                 kvm_x86_ops->skip_emulated_instruction(vcpu);
3015                 return 1;
3016         }
3017
3018         if (!down)
3019                 in_page = PAGE_SIZE - offset_in_page(address);
3020         else
3021                 in_page = offset_in_page(address) + size;
3022         now = min(count, (unsigned long)in_page / size);
3023         if (!now)
3024                 now = 1;
3025         if (down) {
3026                 /*
3027                  * String I/O in reverse.  Yuck.  Kill the guest, fix later.
3028                  */
3029                 pr_unimpl(vcpu, "guest string pio down\n");
3030                 kvm_inject_gp(vcpu, 0);
3031                 return 1;
3032         }
3033         vcpu->run->io.count = now;
3034         vcpu->arch.pio.cur_count = now;
3035
3036         if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
3037                 kvm_x86_ops->skip_emulated_instruction(vcpu);
3038
3039         vcpu->arch.pio.guest_gva = address;
3040
3041         if (!vcpu->arch.pio.in) {
3042                 /* string PIO write */
3043                 ret = pio_copy_data(vcpu);
3044                 if (ret == X86EMUL_PROPAGATE_FAULT) {
3045                         kvm_inject_gp(vcpu, 0);
3046                         return 1;
3047                 }
3048                 if (ret == 0 && !pio_string_write(vcpu)) {
3049                         complete_pio(vcpu);
3050                         if (vcpu->arch.pio.count == 0)
3051                                 ret = 1;
3052                 }
3053         }
3054         /* no string PIO read support yet */
3055
3056         return ret;
3057 }
3058 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
3059
3060 static void bounce_off(void *info)
3061 {
3062         /* nothing */
3063 }
3064
3065 static unsigned int  ref_freq;
3066 static unsigned long tsc_khz_ref;
3067
3068 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
3069                                      void *data)
3070 {
3071         struct cpufreq_freqs *freq = data;
3072         struct kvm *kvm;
3073         struct kvm_vcpu *vcpu;
3074         int i, send_ipi = 0;
3075
3076         if (!ref_freq)
3077                 ref_freq = freq->old;
3078
3079         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
3080                 return 0;
3081         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
3082                 return 0;
3083         per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
3084
3085         spin_lock(&kvm_lock);
3086         list_for_each_entry(kvm, &vm_list, vm_list) {
3087                 kvm_for_each_vcpu(i, vcpu, kvm) {
3088                         if (vcpu->cpu != freq->cpu)
3089                                 continue;
3090                         if (!kvm_request_guest_time_update(vcpu))
3091                                 continue;
3092                         if (vcpu->cpu != smp_processor_id())
3093                                 send_ipi++;
3094                 }
3095         }
3096         spin_unlock(&kvm_lock);
3097
3098         if (freq->old < freq->new && send_ipi) {
3099                 /*
3100                  * We upscale the frequency.  Must make the guest
3101                  * doesn't see old kvmclock values while running with
3102                  * the new frequency, otherwise we risk the guest sees
3103                  * time go backwards.
3104                  *
3105                  * In case we update the frequency for another cpu
3106                  * (which might be in guest context) send an interrupt
3107                  * to kick the cpu out of guest context.  Next time
3108                  * guest context is entered kvmclock will be updated,
3109                  * so the guest will not see stale values.
3110                  */
3111                 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
3112         }
3113         return 0;
3114 }
3115
3116 static struct notifier_block kvmclock_cpufreq_notifier_block = {
3117         .notifier_call  = kvmclock_cpufreq_notifier
3118 };
3119
3120 int kvm_arch_init(void *opaque)
3121 {
3122         int r, cpu;
3123         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
3124
3125         if (kvm_x86_ops) {
3126                 printk(KERN_ERR "kvm: already loaded the other module\n");
3127                 r = -EEXIST;
3128                 goto out;
3129         }
3130
3131         if (!ops->cpu_has_kvm_support()) {
3132                 printk(KERN_ERR "kvm: no hardware support\n");
3133                 r = -EOPNOTSUPP;
3134                 goto out;
3135         }
3136         if (ops->disabled_by_bios()) {
3137                 printk(KERN_ERR "kvm: disabled by bios\n");
3138                 r = -EOPNOTSUPP;
3139                 goto out;
3140         }
3141
3142         r = kvm_mmu_module_init();
3143         if (r)
3144                 goto out;
3145
3146         kvm_init_msr_list();
3147
3148         kvm_x86_ops = ops;
3149         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
3150         kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
3151         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
3152                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
3153
3154         for_each_possible_cpu(cpu)
3155                 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
3156         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
3157                 tsc_khz_ref = tsc_khz;
3158                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
3159                                           CPUFREQ_TRANSITION_NOTIFIER);
3160         }
3161
3162         return 0;
3163
3164 out:
3165         return r;
3166 }
3167
3168 void kvm_arch_exit(void)
3169 {
3170         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
3171                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
3172                                             CPUFREQ_TRANSITION_NOTIFIER);
3173         kvm_x86_ops = NULL;
3174         kvm_mmu_module_exit();
3175 }
3176
3177 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
3178 {
3179         ++vcpu->stat.halt_exits;
3180         if (irqchip_in_kernel(vcpu->kvm)) {
3181                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
3182                 return 1;
3183         } else {
3184                 vcpu->run->exit_reason = KVM_EXIT_HLT;
3185                 return 0;
3186         }
3187 }
3188 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
3189
3190 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
3191                            unsigned long a1)
3192 {
3193         if (is_long_mode(vcpu))
3194                 return a0;
3195         else
3196                 return a0 | ((gpa_t)a1 << 32);
3197 }
3198
3199 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
3200 {
3201         unsigned long nr, a0, a1, a2, a3, ret;
3202         int r = 1;
3203
3204         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
3205         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
3206         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
3207         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
3208         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
3209
3210         trace_kvm_hypercall(nr, a0, a1, a2, a3);
3211
3212         if (!is_long_mode(vcpu)) {
3213                 nr &= 0xFFFFFFFF;
3214                 a0 &= 0xFFFFFFFF;
3215                 a1 &= 0xFFFFFFFF;
3216                 a2 &= 0xFFFFFFFF;
3217                 a3 &= 0xFFFFFFFF;
3218         }
3219
3220         if (kvm_x86_ops->get_cpl(vcpu) != 0) {
3221                 ret = -KVM_EPERM;
3222                 goto out;
3223         }
3224
3225         switch (nr) {
3226         case KVM_HC_VAPIC_POLL_IRQ:
3227                 ret = 0;
3228                 break;
3229         case KVM_HC_MMU_OP:
3230                 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
3231                 break;
3232         default:
3233                 ret = -KVM_ENOSYS;
3234                 break;
3235         }
3236 out:
3237         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
3238         ++vcpu->stat.hypercalls;
3239         return r;
3240 }
3241 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
3242
3243 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
3244 {
3245         char instruction[3];
3246         int ret = 0;
3247         unsigned long rip = kvm_rip_read(vcpu);
3248
3249
3250         /*
3251          * Blow out the MMU to ensure that no other VCPU has an active mapping
3252          * to ensure that the updated hypercall appears atomically across all
3253          * VCPUs.
3254          */
3255         kvm_mmu_zap_all(vcpu->kvm);
3256
3257         kvm_x86_ops->patch_hypercall(vcpu, instruction);
3258         if (emulator_write_emulated(rip, instruction, 3, vcpu)
3259             != X86EMUL_CONTINUE)
3260                 ret = -EFAULT;
3261
3262         return ret;
3263 }
3264
3265 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3266 {
3267         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3268 }
3269
3270 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3271 {
3272         struct descriptor_table dt = { limit, base };
3273
3274         kvm_x86_ops->set_gdt(vcpu, &dt);
3275 }
3276
3277 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3278 {
3279         struct descriptor_table dt = { limit, base };
3280
3281         kvm_x86_ops->set_idt(vcpu, &dt);
3282 }
3283
3284 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
3285                    unsigned long *rflags)
3286 {
3287         kvm_lmsw(vcpu, msw);
3288         *rflags = kvm_x86_ops->get_rflags(vcpu);
3289 }
3290
3291 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3292 {
3293         unsigned long value;
3294
3295         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3296         switch (cr) {
3297         case 0:
3298                 value = vcpu->arch.cr0;
3299                 break;
3300         case 2:
3301                 value = vcpu->arch.cr2;
3302                 break;
3303         case 3:
3304                 value = vcpu->arch.cr3;
3305                 break;
3306         case 4:
3307                 value = vcpu->arch.cr4;
3308                 break;
3309         case 8:
3310                 value = kvm_get_cr8(vcpu);
3311                 break;
3312         default:
3313                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3314                 return 0;
3315         }
3316
3317         return value;
3318 }
3319
3320 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3321                      unsigned long *rflags)
3322 {
3323         switch (cr) {
3324         case 0:
3325                 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
3326                 *rflags = kvm_x86_ops->get_rflags(vcpu);
3327                 break;
3328         case 2:
3329                 vcpu->arch.cr2 = val;
3330                 break;
3331         case 3:
3332                 kvm_set_cr3(vcpu, val);
3333                 break;
3334         case 4:
3335                 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
3336                 break;
3337         case 8:
3338                 kvm_set_cr8(vcpu, val & 0xfUL);
3339                 break;
3340         default:
3341                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3342         }
3343 }
3344
3345 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
3346 {
3347         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
3348         int j, nent = vcpu->arch.cpuid_nent;
3349
3350         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
3351         /* when no next entry is found, the current entry[i] is reselected */
3352         for (j = i + 1; ; j = (j + 1) % nent) {
3353                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
3354                 if (ej->function == e->function) {
3355                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
3356                         return j;
3357                 }
3358         }
3359         return 0; /* silence gcc, even though control never reaches here */
3360 }
3361
3362 /* find an entry with matching function, matching index (if needed), and that
3363  * should be read next (if it's stateful) */
3364 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
3365         u32 function, u32 index)
3366 {
3367         if (e->function != function)
3368                 return 0;
3369         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
3370                 return 0;
3371         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
3372             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
3373                 return 0;
3374         return 1;
3375 }
3376
3377 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3378                                               u32 function, u32 index)
3379 {
3380         int i;
3381         struct kvm_cpuid_entry2 *best = NULL;
3382
3383         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
3384                 struct kvm_cpuid_entry2 *e;
3385
3386                 e = &vcpu->arch.cpuid_entries[i];
3387                 if (is_matching_cpuid_entry(e, function, index)) {
3388                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
3389                                 move_to_next_stateful_cpuid_entry(vcpu, i);
3390                         best = e;
3391                         break;
3392                 }
3393                 /*
3394                  * Both basic or both extended?
3395                  */
3396                 if (((e->function ^ function) & 0x80000000) == 0)
3397                         if (!best || e->function > best->function)
3398                                 best = e;
3399         }
3400         return best;
3401 }
3402
3403 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3404 {
3405         struct kvm_cpuid_entry2 *best;
3406
3407         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
3408         if (best)
3409                 return best->eax & 0xff;
3410         return 36;
3411 }
3412
3413 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3414 {
3415         u32 function, index;
3416         struct kvm_cpuid_entry2 *best;
3417
3418         function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3419         index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3420         kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3421         kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3422         kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3423         kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3424         best = kvm_find_cpuid_entry(vcpu, function, index);
3425         if (best) {
3426                 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
3427                 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
3428                 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
3429                 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
3430         }
3431         kvm_x86_ops->skip_emulated_instruction(vcpu);
3432         trace_kvm_cpuid(function,
3433                         kvm_register_read(vcpu, VCPU_REGS_RAX),
3434                         kvm_register_read(vcpu, VCPU_REGS_RBX),
3435                         kvm_register_read(vcpu, VCPU_REGS_RCX),
3436                         kvm_register_read(vcpu, VCPU_REGS_RDX));
3437 }
3438 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
3439
3440 /*
3441  * Check if userspace requested an interrupt window, and that the
3442  * interrupt window is open.
3443  *
3444  * No need to exit to userspace if we already have an interrupt queued.
3445  */
3446 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
3447                                           struct kvm_run *kvm_run)
3448 {
3449         return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
3450                 kvm_run->request_interrupt_window &&
3451                 kvm_arch_interrupt_allowed(vcpu));
3452 }
3453
3454 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
3455                               struct kvm_run *kvm_run)
3456 {
3457         kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
3458         kvm_run->cr8 = kvm_get_cr8(vcpu);
3459         kvm_run->apic_base = kvm_get_apic_base(vcpu);
3460         if (irqchip_in_kernel(vcpu->kvm))
3461                 kvm_run->ready_for_interrupt_injection = 1;
3462         else
3463                 kvm_run->ready_for_interrupt_injection =
3464                         kvm_arch_interrupt_allowed(vcpu) &&
3465                         !kvm_cpu_has_interrupt(vcpu) &&
3466                         !kvm_event_needs_reinjection(vcpu);
3467 }
3468
3469 static void vapic_enter(struct kvm_vcpu *vcpu)
3470 {
3471         struct kvm_lapic *apic = vcpu->arch.apic;
3472         struct page *page;
3473
3474         if (!apic || !apic->vapic_addr)
3475                 return;
3476
3477         page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
3478
3479         vcpu->arch.apic->vapic_page = page;
3480 }
3481
3482 static void vapic_exit(struct kvm_vcpu *vcpu)
3483 {
3484         struct kvm_lapic *apic = vcpu->arch.apic;
3485
3486         if (!apic || !apic->vapic_addr)
3487                 return;
3488
3489         down_read(&vcpu->kvm->slots_lock);
3490         kvm_release_page_dirty(apic->vapic_page);
3491         mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
3492         up_read(&vcpu->kvm->slots_lock);
3493 }
3494
3495 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3496 {
3497         int max_irr, tpr;
3498
3499         if (!kvm_x86_ops->update_cr8_intercept)
3500                 return;
3501
3502         if (!vcpu->arch.apic)
3503                 return;
3504
3505         if (!vcpu->arch.apic->vapic_addr)
3506                 max_irr = kvm_lapic_find_highest_irr(vcpu);
3507         else
3508                 max_irr = -1;
3509
3510         if (max_irr != -1)
3511                 max_irr >>= 4;
3512
3513         tpr = kvm_lapic_get_cr8(vcpu);
3514
3515         kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3516 }
3517
3518 static void inject_pending_event(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3519 {
3520         /* try to reinject previous events if any */
3521         if (vcpu->arch.exception.pending) {
3522                 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
3523                                           vcpu->arch.exception.has_error_code,
3524                                           vcpu->arch.exception.error_code);
3525                 return;
3526         }
3527
3528         if (vcpu->arch.nmi_injected) {
3529                 kvm_x86_ops->set_nmi(vcpu);
3530                 return;
3531         }
3532
3533         if (vcpu->arch.interrupt.pending) {
3534                 kvm_x86_ops->set_irq(vcpu);
3535                 return;
3536         }
3537
3538         /* try to inject new event if pending */
3539         if (vcpu->arch.nmi_pending) {
3540                 if (kvm_x86_ops->nmi_allowed(vcpu)) {
3541                         vcpu->arch.nmi_pending = false;
3542                         vcpu->arch.nmi_injected = true;
3543                         kvm_x86_ops->set_nmi(vcpu);
3544                 }
3545         } else if (kvm_cpu_has_interrupt(vcpu)) {
3546                 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
3547                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
3548                                             false);
3549                         kvm_x86_ops->set_irq(vcpu);
3550                 }
3551         }
3552 }
3553
3554 static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3555 {
3556         int r;
3557         bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
3558                 kvm_run->request_interrupt_window;
3559
3560         if (vcpu->requests)
3561                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3562                         kvm_mmu_unload(vcpu);
3563
3564         r = kvm_mmu_reload(vcpu);
3565         if (unlikely(r))
3566                 goto out;
3567
3568         if (vcpu->requests) {
3569                 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
3570                         __kvm_migrate_timers(vcpu);
3571                 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3572                         kvm_write_guest_time(vcpu);
3573                 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3574                         kvm_mmu_sync_roots(vcpu);
3575                 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3576                         kvm_x86_ops->tlb_flush(vcpu);
3577                 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3578                                        &vcpu->requests)) {
3579                         kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
3580                         r = 0;
3581                         goto out;
3582                 }
3583                 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3584                         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
3585                         r = 0;
3586                         goto out;
3587                 }
3588         }
3589
3590         preempt_disable();
3591
3592         kvm_x86_ops->prepare_guest_switch(vcpu);
3593         kvm_load_guest_fpu(vcpu);
3594
3595         local_irq_disable();
3596
3597         clear_bit(KVM_REQ_KICK, &vcpu->requests);
3598         smp_mb__after_clear_bit();
3599
3600         if (vcpu->requests || need_resched() || signal_pending(current)) {
3601                 set_bit(KVM_REQ_KICK, &vcpu->requests);
3602                 local_irq_enable();
3603                 preempt_enable();
3604                 r = 1;
3605                 goto out;
3606         }
3607
3608         inject_pending_event(vcpu, kvm_run);
3609
3610         /* enable NMI/IRQ window open exits if needed */
3611         if (vcpu->arch.nmi_pending)
3612                 kvm_x86_ops->enable_nmi_window(vcpu);
3613         else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3614                 kvm_x86_ops->enable_irq_window(vcpu);
3615
3616         if (kvm_lapic_enabled(vcpu)) {
3617                 update_cr8_intercept(vcpu);
3618                 kvm_lapic_sync_to_vapic(vcpu);
3619         }
3620
3621         up_read(&vcpu->kvm->slots_lock);
3622
3623         kvm_guest_enter();
3624
3625         if (unlikely(vcpu->arch.switch_db_regs)) {
3626                 set_debugreg(0, 7);
3627                 set_debugreg(vcpu->arch.eff_db[0], 0);
3628                 set_debugreg(vcpu->arch.eff_db[1], 1);
3629                 set_debugreg(vcpu->arch.eff_db[2], 2);
3630                 set_debugreg(vcpu->arch.eff_db[3], 3);
3631         }
3632
3633         trace_kvm_entry(vcpu->vcpu_id);
3634         kvm_x86_ops->run(vcpu, kvm_run);
3635
3636         if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) {
3637                 set_debugreg(current->thread.debugreg0, 0);
3638                 set_debugreg(current->thread.debugreg1, 1);
3639                 set_debugreg(current->thread.debugreg2, 2);
3640                 set_debugreg(current->thread.debugreg3, 3);
3641                 set_debugreg(current->thread.debugreg6, 6);
3642                 set_debugreg(current->thread.debugreg7, 7);
3643         }
3644
3645         set_bit(KVM_REQ_KICK, &vcpu->requests);
3646         local_irq_enable();
3647
3648         ++vcpu->stat.exits;
3649
3650         /*
3651          * We must have an instruction between local_irq_enable() and
3652          * kvm_guest_exit(), so the timer interrupt isn't delayed by
3653          * the interrupt shadow.  The stat.exits increment will do nicely.
3654          * But we need to prevent reordering, hence this barrier():
3655          */
3656         barrier();
3657
3658         kvm_guest_exit();
3659
3660         preempt_enable();
3661
3662         down_read(&vcpu->kvm->slots_lock);
3663
3664         /*
3665          * Profile KVM exit RIPs:
3666          */
3667         if (unlikely(prof_on == KVM_PROFILING)) {
3668                 unsigned long rip = kvm_rip_read(vcpu);
3669                 profile_hit(KVM_PROFILING, (void *)rip);
3670         }
3671
3672
3673         kvm_lapic_sync_from_vapic(vcpu);
3674
3675         r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
3676 out:
3677         return r;
3678 }
3679
3680
3681 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3682 {
3683         int r;
3684
3685         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
3686                 pr_debug("vcpu %d received sipi with vector # %x\n",
3687                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
3688                 kvm_lapic_reset(vcpu);
3689                 r = kvm_arch_vcpu_reset(vcpu);
3690                 if (r)
3691                         return r;
3692                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3693         }
3694
3695         down_read(&vcpu->kvm->slots_lock);
3696         vapic_enter(vcpu);
3697
3698         r = 1;
3699         while (r > 0) {
3700                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
3701                         r = vcpu_enter_guest(vcpu, kvm_run);
3702                 else {
3703                         up_read(&vcpu->kvm->slots_lock);
3704                         kvm_vcpu_block(vcpu);
3705                         down_read(&vcpu->kvm->slots_lock);
3706                         if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
3707                         {
3708                                 switch(vcpu->arch.mp_state) {
3709                                 case KVM_MP_STATE_HALTED:
3710                                         vcpu->arch.mp_state =
3711                                                 KVM_MP_STATE_RUNNABLE;
3712                                 case KVM_MP_STATE_RUNNABLE:
3713                                         break;
3714                                 case KVM_MP_STATE_SIPI_RECEIVED:
3715                                 default:
3716                                         r = -EINTR;
3717                                         break;
3718                                 }
3719                         }
3720                 }
3721
3722                 if (r <= 0)
3723                         break;
3724
3725                 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3726                 if (kvm_cpu_has_pending_timer(vcpu))
3727                         kvm_inject_pending_timer_irqs(vcpu);
3728
3729                 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3730                         r = -EINTR;
3731                         kvm_run->exit_reason = KVM_EXIT_INTR;
3732                         ++vcpu->stat.request_irq_exits;
3733                 }
3734                 if (signal_pending(current)) {
3735                         r = -EINTR;
3736                         kvm_run->exit_reason = KVM_EXIT_INTR;
3737                         ++vcpu->stat.signal_exits;
3738                 }
3739                 if (need_resched()) {
3740                         up_read(&vcpu->kvm->slots_lock);
3741                         kvm_resched(vcpu);
3742                         down_read(&vcpu->kvm->slots_lock);
3743                 }
3744         }
3745
3746         up_read(&vcpu->kvm->slots_lock);
3747         post_kvm_run_save(vcpu, kvm_run);
3748
3749         vapic_exit(vcpu);
3750
3751         return r;
3752 }
3753
3754 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3755 {
3756         int r;
3757         sigset_t sigsaved;
3758
3759         vcpu_load(vcpu);
3760
3761         if (vcpu->sigset_active)
3762                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3763
3764         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
3765                 kvm_vcpu_block(vcpu);
3766                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
3767                 r = -EAGAIN;
3768                 goto out;
3769         }
3770
3771         /* re-sync apic's tpr */
3772         if (!irqchip_in_kernel(vcpu->kvm))
3773                 kvm_set_cr8(vcpu, kvm_run->cr8);
3774
3775         if (vcpu->arch.pio.cur_count) {
3776                 r = complete_pio(vcpu);
3777                 if (r)
3778                         goto out;
3779         }
3780 #if CONFIG_HAS_IOMEM
3781         if (vcpu->mmio_needed) {
3782                 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3783                 vcpu->mmio_read_completed = 1;
3784                 vcpu->mmio_needed = 0;
3785
3786                 down_read(&vcpu->kvm->slots_lock);
3787                 r = emulate_instruction(vcpu, kvm_run,
3788                                         vcpu->arch.mmio_fault_cr2, 0,
3789                                         EMULTYPE_NO_DECODE);
3790                 up_read(&vcpu->kvm->slots_lock);
3791                 if (r == EMULATE_DO_MMIO) {
3792                         /*
3793                          * Read-modify-write.  Back to userspace.
3794                          */
3795                         r = 0;
3796                         goto out;
3797                 }
3798         }
3799 #endif
3800         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3801                 kvm_register_write(vcpu, VCPU_REGS_RAX,
3802                                      kvm_run->hypercall.ret);
3803
3804         r = __vcpu_run(vcpu, kvm_run);
3805
3806 out:
3807         if (vcpu->sigset_active)
3808                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3809
3810         vcpu_put(vcpu);
3811         return r;
3812 }
3813
3814 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3815 {
3816         vcpu_load(vcpu);
3817
3818         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3819         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3820         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3821         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3822         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3823         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3824         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3825         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3826 #ifdef CONFIG_X86_64
3827         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3828         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3829         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3830         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3831         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3832         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3833         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3834         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
3835 #endif
3836
3837         regs->rip = kvm_rip_read(vcpu);
3838         regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3839
3840         /*
3841          * Don't leak debug flags in case they were set for guest debugging
3842          */
3843         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3844                 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3845
3846         vcpu_put(vcpu);
3847
3848         return 0;
3849 }
3850
3851 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3852 {
3853         vcpu_load(vcpu);
3854
3855         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3856         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3857         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3858         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3859         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3860         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3861         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3862         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
3863 #ifdef CONFIG_X86_64
3864         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3865         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3866         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3867         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3868         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3869         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3870         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3871         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3872
3873 #endif
3874
3875         kvm_rip_write(vcpu, regs->rip);
3876         kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3877
3878
3879         vcpu->arch.exception.pending = false;
3880
3881         vcpu_put(vcpu);
3882
3883         return 0;
3884 }
3885
3886 void kvm_get_segment(struct kvm_vcpu *vcpu,
3887                      struct kvm_segment *var, int seg)
3888 {
3889         kvm_x86_ops->get_segment(vcpu, var, seg);
3890 }
3891
3892 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3893 {
3894         struct kvm_segment cs;
3895
3896         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
3897         *db = cs.db;
3898         *l = cs.l;
3899 }
3900 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3901
3902 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3903                                   struct kvm_sregs *sregs)
3904 {
3905         struct descriptor_table dt;
3906
3907         vcpu_load(vcpu);
3908
3909         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3910         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3911         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3912         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3913         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3914         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3915
3916         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3917         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3918
3919         kvm_x86_ops->get_idt(vcpu, &dt);
3920         sregs->idt.limit = dt.limit;
3921         sregs->idt.base = dt.base;
3922         kvm_x86_ops->get_gdt(vcpu, &dt);
3923         sregs->gdt.limit = dt.limit;
3924         sregs->gdt.base = dt.base;
3925
3926         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3927         sregs->cr0 = vcpu->arch.cr0;
3928         sregs->cr2 = vcpu->arch.cr2;
3929         sregs->cr3 = vcpu->arch.cr3;
3930         sregs->cr4 = vcpu->arch.cr4;
3931         sregs->cr8 = kvm_get_cr8(vcpu);
3932         sregs->efer = vcpu->arch.shadow_efer;
3933         sregs->apic_base = kvm_get_apic_base(vcpu);
3934
3935         memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
3936
3937         if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
3938                 set_bit(vcpu->arch.interrupt.nr,
3939                         (unsigned long *)sregs->interrupt_bitmap);
3940
3941         vcpu_put(vcpu);
3942
3943         return 0;
3944 }
3945
3946 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3947                                     struct kvm_mp_state *mp_state)
3948 {
3949         vcpu_load(vcpu);
3950         mp_state->mp_state = vcpu->arch.mp_state;
3951         vcpu_put(vcpu);
3952         return 0;
3953 }
3954
3955 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3956                                     struct kvm_mp_state *mp_state)
3957 {
3958         vcpu_load(vcpu);
3959         vcpu->arch.mp_state = mp_state->mp_state;
3960         vcpu_put(vcpu);
3961         return 0;
3962 }
3963
3964 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3965                         struct kvm_segment *var, int seg)
3966 {
3967         kvm_x86_ops->set_segment(vcpu, var, seg);
3968 }
3969
3970 static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3971                                    struct kvm_segment *kvm_desct)
3972 {
3973         kvm_desct->base = get_desc_base(seg_desc);
3974         kvm_desct->limit = get_desc_limit(seg_desc);
3975         if (seg_desc->g) {
3976                 kvm_desct->limit <<= 12;
3977                 kvm_desct->limit |= 0xfff;
3978         }
3979         kvm_desct->selector = selector;
3980         kvm_desct->type = seg_desc->type;
3981         kvm_desct->present = seg_desc->p;
3982         kvm_desct->dpl = seg_desc->dpl;
3983         kvm_desct->db = seg_desc->d;
3984         kvm_desct->s = seg_desc->s;
3985         kvm_desct->l = seg_desc->l;
3986         kvm_desct->g = seg_desc->g;
3987         kvm_desct->avl = seg_desc->avl;
3988         if (!selector)
3989                 kvm_desct->unusable = 1;
3990         else
3991                 kvm_desct->unusable = 0;
3992         kvm_desct->padding = 0;
3993 }
3994
3995 static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
3996                                           u16 selector,
3997                                           struct descriptor_table *dtable)
3998 {
3999         if (selector & 1 << 2) {
4000                 struct kvm_segment kvm_seg;
4001
4002                 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
4003
4004                 if (kvm_seg.unusable)
4005                         dtable->limit = 0;
4006                 else
4007                         dtable->limit = kvm_seg.limit;
4008                 dtable->base = kvm_seg.base;
4009         }
4010         else
4011                 kvm_x86_ops->get_gdt(vcpu, dtable);
4012 }
4013
4014 /* allowed just for 8 bytes segments */
4015 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4016                                          struct desc_struct *seg_desc)
4017 {
4018         struct descriptor_table dtable;
4019         u16 index = selector >> 3;
4020
4021         get_segment_descriptor_dtable(vcpu, selector, &dtable);
4022
4023         if (dtable.limit < index * 8 + 7) {
4024                 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
4025                 return 1;
4026         }
4027         return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
4028 }
4029
4030 /* allowed just for 8 bytes segments */
4031 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4032                                          struct desc_struct *seg_desc)
4033 {
4034         struct descriptor_table dtable;
4035         u16 index = selector >> 3;
4036
4037         get_segment_descriptor_dtable(vcpu, selector, &dtable);
4038
4039         if (dtable.limit < index * 8 + 7)
4040                 return 1;
4041         return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
4042 }
4043
4044 static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
4045                              struct desc_struct *seg_desc)
4046 {
4047         u32 base_addr = get_desc_base(seg_desc);
4048
4049         return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
4050 }
4051
4052 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
4053 {
4054         struct kvm_segment kvm_seg;
4055
4056         kvm_get_segment(vcpu, &kvm_seg, seg);
4057         return kvm_seg.selector;
4058 }
4059
4060 static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
4061                                                 u16 selector,
4062                                                 struct kvm_segment *kvm_seg)
4063 {
4064         struct desc_struct seg_desc;
4065
4066         if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
4067                 return 1;
4068         seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
4069         return 0;
4070 }
4071
4072 static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
4073 {
4074         struct kvm_segment segvar = {
4075                 .base = selector << 4,
4076                 .limit = 0xffff,
4077                 .selector = selector,
4078                 .type = 3,
4079                 .present = 1,
4080                 .dpl = 3,
4081                 .db = 0,
4082                 .s = 1,
4083                 .l = 0,
4084                 .g = 0,
4085                 .avl = 0,
4086                 .unusable = 0,
4087         };
4088         kvm_x86_ops->set_segment(vcpu, &segvar, seg);
4089         return 0;
4090 }
4091
4092 static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
4093 {
4094         return (seg != VCPU_SREG_LDTR) &&
4095                 (seg != VCPU_SREG_TR) &&
4096                 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_VM);
4097 }
4098
4099 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4100                                 int type_bits, int seg)
4101 {
4102         struct kvm_segment kvm_seg;
4103
4104         if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE))
4105                 return kvm_load_realmode_segment(vcpu, selector, seg);
4106         if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
4107                 return 1;
4108         kvm_seg.type |= type_bits;
4109
4110         if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
4111             seg != VCPU_SREG_LDTR)
4112                 if (!kvm_seg.s)
4113                         kvm_seg.unusable = 1;
4114
4115         kvm_set_segment(vcpu, &kvm_seg, seg);
4116         return 0;
4117 }
4118
4119 static void save_state_to_tss32(struct kvm_vcpu *vcpu,
4120                                 struct tss_segment_32 *tss)
4121 {
4122         tss->cr3 = vcpu->arch.cr3;
4123         tss->eip = kvm_rip_read(vcpu);
4124         tss->eflags = kvm_x86_ops->get_rflags(vcpu);
4125         tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4126         tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4127         tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4128         tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4129         tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4130         tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4131         tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4132         tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
4133         tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4134         tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4135         tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4136         tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4137         tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
4138         tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
4139         tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
4140 }
4141
4142 static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4143                                   struct tss_segment_32 *tss)
4144 {
4145         kvm_set_cr3(vcpu, tss->cr3);
4146
4147         kvm_rip_write(vcpu, tss->eip);
4148         kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
4149
4150         kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
4151         kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
4152         kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
4153         kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
4154         kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
4155         kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
4156         kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
4157         kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
4158
4159         if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
4160                 return 1;
4161
4162         if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
4163                 return 1;
4164
4165         if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
4166                 return 1;
4167
4168         if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
4169                 return 1;
4170
4171         if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
4172                 return 1;
4173
4174         if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
4175                 return 1;
4176
4177         if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
4178                 return 1;
4179         return 0;
4180 }
4181
4182 static void save_state_to_tss16(struct kvm_vcpu *vcpu,
4183                                 struct tss_segment_16 *tss)
4184 {
4185         tss->ip = kvm_rip_read(vcpu);
4186         tss->flag = kvm_x86_ops->get_rflags(vcpu);
4187         tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4188         tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4189         tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4190         tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4191         tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4192         tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4193         tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
4194         tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
4195
4196         tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4197         tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4198         tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4199         tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4200         tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
4201         tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
4202 }
4203
4204 static int load_state_from_tss16(struct kvm_vcpu *vcpu,
4205                                  struct tss_segment_16 *tss)
4206 {
4207         kvm_rip_write(vcpu, tss->ip);
4208         kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
4209         kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
4210         kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
4211         kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
4212         kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
4213         kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
4214         kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
4215         kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
4216         kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
4217
4218         if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
4219                 return 1;
4220
4221         if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
4222                 return 1;
4223
4224         if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
4225                 return 1;
4226
4227         if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
4228                 return 1;
4229
4230         if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
4231                 return 1;
4232         return 0;
4233 }
4234
4235 static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
4236                               u16 old_tss_sel, u32 old_tss_base,
4237                               struct desc_struct *nseg_desc)
4238 {
4239         struct tss_segment_16 tss_segment_16;
4240         int ret = 0;
4241
4242         if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4243                            sizeof tss_segment_16))
4244                 goto out;
4245
4246         save_state_to_tss16(vcpu, &tss_segment_16);
4247
4248         if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4249                             sizeof tss_segment_16))
4250                 goto out;
4251
4252         if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4253                            &tss_segment_16, sizeof tss_segment_16))
4254                 goto out;
4255
4256         if (old_tss_sel != 0xffff) {
4257                 tss_segment_16.prev_task_link = old_tss_sel;
4258
4259                 if (kvm_write_guest(vcpu->kvm,
4260                                     get_tss_base_addr(vcpu, nseg_desc),
4261                                     &tss_segment_16.prev_task_link,
4262                                     sizeof tss_segment_16.prev_task_link))
4263                         goto out;
4264         }
4265
4266         if (load_state_from_tss16(vcpu, &tss_segment_16))
4267                 goto out;
4268
4269         ret = 1;
4270 out:
4271         return ret;
4272 }
4273
4274 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
4275                        u16 old_tss_sel, u32 old_tss_base,
4276                        struct desc_struct *nseg_desc)
4277 {
4278         struct tss_segment_32 tss_segment_32;
4279         int ret = 0;
4280
4281         if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4282                            sizeof tss_segment_32))
4283                 goto out;
4284
4285         save_state_to_tss32(vcpu, &tss_segment_32);
4286
4287         if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4288                             sizeof tss_segment_32))
4289                 goto out;
4290
4291         if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4292                            &tss_segment_32, sizeof tss_segment_32))
4293                 goto out;
4294
4295         if (old_tss_sel != 0xffff) {
4296                 tss_segment_32.prev_task_link = old_tss_sel;
4297
4298                 if (kvm_write_guest(vcpu->kvm,
4299                                     get_tss_base_addr(vcpu, nseg_desc),
4300                                     &tss_segment_32.prev_task_link,
4301                                     sizeof tss_segment_32.prev_task_link))
4302                         goto out;
4303         }
4304
4305         if (load_state_from_tss32(vcpu, &tss_segment_32))
4306                 goto out;
4307
4308         ret = 1;
4309 out:
4310         return ret;
4311 }
4312
4313 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4314 {
4315         struct kvm_segment tr_seg;
4316         struct desc_struct cseg_desc;
4317         struct desc_struct nseg_desc;
4318         int ret = 0;
4319         u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
4320         u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
4321
4322         old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
4323
4324         /* FIXME: Handle errors. Failure to read either TSS or their
4325          * descriptors should generate a pagefault.
4326          */
4327         if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
4328                 goto out;
4329
4330         if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
4331                 goto out;
4332
4333         if (reason != TASK_SWITCH_IRET) {
4334                 int cpl;
4335
4336                 cpl = kvm_x86_ops->get_cpl(vcpu);
4337                 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
4338                         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4339                         return 1;
4340                 }
4341         }
4342
4343         if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
4344                 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
4345                 return 1;
4346         }
4347
4348         if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
4349                 cseg_desc.type &= ~(1 << 1); //clear the B flag
4350                 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
4351         }
4352
4353         if (reason == TASK_SWITCH_IRET) {
4354                 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4355                 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
4356         }
4357
4358         /* set back link to prev task only if NT bit is set in eflags
4359            note that old_tss_sel is not used afetr this point */
4360         if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4361                 old_tss_sel = 0xffff;
4362
4363         /* set back link to prev task only if NT bit is set in eflags
4364            note that old_tss_sel is not used afetr this point */
4365         if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4366                 old_tss_sel = 0xffff;
4367
4368         if (nseg_desc.type & 8)
4369                 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
4370                                          old_tss_base, &nseg_desc);
4371         else
4372                 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
4373                                          old_tss_base, &nseg_desc);
4374
4375         if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
4376                 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4377                 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
4378         }
4379
4380         if (reason != TASK_SWITCH_IRET) {
4381                 nseg_desc.type |= (1 << 1);
4382                 save_guest_segment_descriptor(vcpu, tss_selector,
4383                                               &nseg_desc);
4384         }
4385
4386         kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
4387         seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4388         tr_seg.type = 11;
4389         kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
4390 out:
4391         return ret;
4392 }
4393 EXPORT_SYMBOL_GPL(kvm_task_switch);
4394
4395 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4396                                   struct kvm_sregs *sregs)
4397 {
4398         int mmu_reset_needed = 0;
4399         int pending_vec, max_bits;
4400         struct descriptor_table dt;
4401
4402         vcpu_load(vcpu);
4403
4404         dt.limit = sregs->idt.limit;
4405         dt.base = sregs->idt.base;
4406         kvm_x86_ops->set_idt(vcpu, &dt);
4407         dt.limit = sregs->gdt.limit;
4408         dt.base = sregs->gdt.base;
4409         kvm_x86_ops->set_gdt(vcpu, &dt);
4410
4411         vcpu->arch.cr2 = sregs->cr2;
4412         mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
4413         vcpu->arch.cr3 = sregs->cr3;
4414
4415         kvm_set_cr8(vcpu, sregs->cr8);
4416
4417         mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
4418         kvm_x86_ops->set_efer(vcpu, sregs->efer);
4419         kvm_set_apic_base(vcpu, sregs->apic_base);
4420
4421         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
4422
4423         mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
4424         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
4425         vcpu->arch.cr0 = sregs->cr0;
4426
4427         mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
4428         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4429         if (!is_long_mode(vcpu) && is_pae(vcpu))
4430                 load_pdptrs(vcpu, vcpu->arch.cr3);
4431
4432         if (mmu_reset_needed)
4433                 kvm_mmu_reset_context(vcpu);
4434
4435         max_bits = (sizeof sregs->interrupt_bitmap) << 3;
4436         pending_vec = find_first_bit(
4437                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
4438         if (pending_vec < max_bits) {
4439                 kvm_queue_interrupt(vcpu, pending_vec, false);
4440                 pr_debug("Set back pending irq %d\n", pending_vec);
4441                 if (irqchip_in_kernel(vcpu->kvm))
4442                         kvm_pic_clear_isr_ack(vcpu->kvm);
4443         }
4444
4445         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4446         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4447         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4448         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4449         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4450         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
4451
4452         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4453         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
4454
4455         update_cr8_intercept(vcpu);
4456
4457         /* Older userspace won't unhalt the vcpu on reset. */
4458         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
4459             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4460             !(vcpu->arch.cr0 & X86_CR0_PE))
4461                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4462
4463         vcpu_put(vcpu);
4464
4465         return 0;
4466 }
4467
4468 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4469                                         struct kvm_guest_debug *dbg)
4470 {
4471         int i, r;
4472
4473         vcpu_load(vcpu);
4474
4475         if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
4476             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
4477                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4478                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4479                 vcpu->arch.switch_db_regs =
4480                         (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4481         } else {
4482                 for (i = 0; i < KVM_NR_DB_REGS; i++)
4483                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4484                 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4485         }
4486
4487         r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
4488
4489         if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4490                 kvm_queue_exception(vcpu, DB_VECTOR);
4491         else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
4492                 kvm_queue_exception(vcpu, BP_VECTOR);
4493
4494         vcpu_put(vcpu);
4495
4496         return r;
4497 }
4498
4499 /*
4500  * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
4501  * we have asm/x86/processor.h
4502  */
4503 struct fxsave {
4504         u16     cwd;
4505         u16     swd;
4506         u16     twd;
4507         u16     fop;
4508         u64     rip;
4509         u64     rdp;
4510         u32     mxcsr;
4511         u32     mxcsr_mask;
4512         u32     st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
4513 #ifdef CONFIG_X86_64
4514         u32     xmm_space[64];  /* 16*16 bytes for each XMM-reg = 256 bytes */
4515 #else
4516         u32     xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
4517 #endif
4518 };
4519
4520 /*
4521  * Translate a guest virtual address to a guest physical address.
4522  */
4523 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4524                                     struct kvm_translation *tr)
4525 {
4526         unsigned long vaddr = tr->linear_address;
4527         gpa_t gpa;
4528
4529         vcpu_load(vcpu);
4530         down_read(&vcpu->kvm->slots_lock);
4531         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
4532         up_read(&vcpu->kvm->slots_lock);
4533         tr->physical_address = gpa;
4534         tr->valid = gpa != UNMAPPED_GVA;
4535         tr->writeable = 1;
4536         tr->usermode = 0;
4537         vcpu_put(vcpu);
4538
4539         return 0;
4540 }
4541
4542 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4543 {
4544         struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
4545
4546         vcpu_load(vcpu);
4547
4548         memcpy(fpu->fpr, fxsave->st_space, 128);
4549         fpu->fcw = fxsave->cwd;
4550         fpu->fsw = fxsave->swd;
4551         fpu->ftwx = fxsave->twd;
4552         fpu->last_opcode = fxsave->fop;
4553         fpu->last_ip = fxsave->rip;
4554         fpu->last_dp = fxsave->rdp;
4555         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
4556
4557         vcpu_put(vcpu);
4558
4559         return 0;
4560 }
4561
4562 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4563 {
4564         struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
4565
4566         vcpu_load(vcpu);
4567
4568         memcpy(fxsave->st_space, fpu->fpr, 128);
4569         fxsave->cwd = fpu->fcw;
4570         fxsave->swd = fpu->fsw;
4571         fxsave->twd = fpu->ftwx;
4572         fxsave->fop = fpu->last_opcode;
4573         fxsave->rip = fpu->last_ip;
4574         fxsave->rdp = fpu->last_dp;
4575         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4576
4577         vcpu_put(vcpu);
4578
4579         return 0;
4580 }
4581
4582 void fx_init(struct kvm_vcpu *vcpu)
4583 {
4584         unsigned after_mxcsr_mask;
4585
4586         /*
4587          * Touch the fpu the first time in non atomic context as if
4588          * this is the first fpu instruction the exception handler
4589          * will fire before the instruction returns and it'll have to
4590          * allocate ram with GFP_KERNEL.
4591          */
4592         if (!used_math())
4593                 kvm_fx_save(&vcpu->arch.host_fx_image);
4594
4595         /* Initialize guest FPU by resetting ours and saving into guest's */
4596         preempt_disable();
4597         kvm_fx_save(&vcpu->arch.host_fx_image);
4598         kvm_fx_finit();
4599         kvm_fx_save(&vcpu->arch.guest_fx_image);
4600         kvm_fx_restore(&vcpu->arch.host_fx_image);
4601         preempt_enable();
4602
4603         vcpu->arch.cr0 |= X86_CR0_ET;
4604         after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
4605         vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4606         memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
4607                0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4608 }
4609 EXPORT_SYMBOL_GPL(fx_init);
4610
4611 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4612 {
4613         if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4614                 return;
4615
4616         vcpu->guest_fpu_loaded = 1;
4617         kvm_fx_save(&vcpu->arch.host_fx_image);
4618         kvm_fx_restore(&vcpu->arch.guest_fx_image);
4619 }
4620 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4621
4622 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4623 {
4624         if (!vcpu->guest_fpu_loaded)
4625                 return;
4626
4627         vcpu->guest_fpu_loaded = 0;
4628         kvm_fx_save(&vcpu->arch.guest_fx_image);
4629         kvm_fx_restore(&vcpu->arch.host_fx_image);
4630         ++vcpu->stat.fpu_reload;
4631 }
4632 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
4633
4634 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4635 {
4636         if (vcpu->arch.time_page) {
4637                 kvm_release_page_dirty(vcpu->arch.time_page);
4638                 vcpu->arch.time_page = NULL;
4639         }
4640
4641         kvm_x86_ops->vcpu_free(vcpu);
4642 }
4643
4644 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
4645                                                 unsigned int id)
4646 {
4647         return kvm_x86_ops->vcpu_create(kvm, id);
4648 }
4649
4650 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
4651 {
4652         int r;
4653
4654         /* We do fxsave: this must be aligned. */
4655         BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
4656
4657         vcpu->arch.mtrr_state.have_fixed = 1;
4658         vcpu_load(vcpu);
4659         r = kvm_arch_vcpu_reset(vcpu);
4660         if (r == 0)
4661                 r = kvm_mmu_setup(vcpu);
4662         vcpu_put(vcpu);
4663         if (r < 0)
4664                 goto free_vcpu;
4665
4666         return 0;
4667 free_vcpu:
4668         kvm_x86_ops->vcpu_free(vcpu);
4669         return r;
4670 }
4671
4672 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
4673 {
4674         vcpu_load(vcpu);
4675         kvm_mmu_unload(vcpu);
4676         vcpu_put(vcpu);
4677
4678         kvm_x86_ops->vcpu_free(vcpu);
4679 }
4680
4681 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4682 {
4683         vcpu->arch.nmi_pending = false;
4684         vcpu->arch.nmi_injected = false;
4685
4686         vcpu->arch.switch_db_regs = 0;
4687         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4688         vcpu->arch.dr6 = DR6_FIXED_1;
4689         vcpu->arch.dr7 = DR7_FIXED_1;
4690
4691         return kvm_x86_ops->vcpu_reset(vcpu);
4692 }
4693
4694 void kvm_arch_hardware_enable(void *garbage)
4695 {
4696         kvm_x86_ops->hardware_enable(garbage);
4697 }
4698
4699 void kvm_arch_hardware_disable(void *garbage)
4700 {
4701         kvm_x86_ops->hardware_disable(garbage);
4702 }
4703
4704 int kvm_arch_hardware_setup(void)
4705 {
4706         return kvm_x86_ops->hardware_setup();
4707 }
4708
4709 void kvm_arch_hardware_unsetup(void)
4710 {
4711         kvm_x86_ops->hardware_unsetup();
4712 }
4713
4714 void kvm_arch_check_processor_compat(void *rtn)
4715 {
4716         kvm_x86_ops->check_processor_compatibility(rtn);
4717 }
4718
4719 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4720 {
4721         struct page *page;
4722         struct kvm *kvm;
4723         int r;
4724
4725         BUG_ON(vcpu->kvm == NULL);
4726         kvm = vcpu->kvm;
4727
4728         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
4729         if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
4730                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4731         else
4732                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
4733
4734         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4735         if (!page) {
4736                 r = -ENOMEM;
4737                 goto fail;
4738         }
4739         vcpu->arch.pio_data = page_address(page);
4740
4741         r = kvm_mmu_create(vcpu);
4742         if (r < 0)
4743                 goto fail_free_pio_data;
4744
4745         if (irqchip_in_kernel(kvm)) {
4746                 r = kvm_create_lapic(vcpu);
4747                 if (r < 0)
4748                         goto fail_mmu_destroy;
4749         }
4750
4751         vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
4752                                        GFP_KERNEL);
4753         if (!vcpu->arch.mce_banks) {
4754                 r = -ENOMEM;
4755                 goto fail_mmu_destroy;
4756         }
4757         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
4758
4759         return 0;
4760
4761 fail_mmu_destroy:
4762         kvm_mmu_destroy(vcpu);
4763 fail_free_pio_data:
4764         free_page((unsigned long)vcpu->arch.pio_data);
4765 fail:
4766         return r;
4767 }
4768
4769 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
4770 {
4771         kvm_free_lapic(vcpu);
4772         down_read(&vcpu->kvm->slots_lock);
4773         kvm_mmu_destroy(vcpu);
4774         up_read(&vcpu->kvm->slots_lock);
4775         free_page((unsigned long)vcpu->arch.pio_data);
4776 }
4777
4778 struct  kvm *kvm_arch_create_vm(void)
4779 {
4780         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4781
4782         if (!kvm)
4783                 return ERR_PTR(-ENOMEM);
4784
4785         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4786         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
4787
4788         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4789         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4790
4791         rdtscll(kvm->arch.vm_init_tsc);
4792
4793         return kvm;
4794 }
4795
4796 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4797 {
4798         vcpu_load(vcpu);
4799         kvm_mmu_unload(vcpu);
4800         vcpu_put(vcpu);
4801 }
4802
4803 static void kvm_free_vcpus(struct kvm *kvm)
4804 {
4805         unsigned int i;
4806         struct kvm_vcpu *vcpu;
4807
4808         /*
4809          * Unpin any mmu pages first.
4810          */
4811         kvm_for_each_vcpu(i, vcpu, kvm)
4812                 kvm_unload_vcpu_mmu(vcpu);
4813         kvm_for_each_vcpu(i, vcpu, kvm)
4814                 kvm_arch_vcpu_free(vcpu);
4815
4816         mutex_lock(&kvm->lock);
4817         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
4818                 kvm->vcpus[i] = NULL;
4819
4820         atomic_set(&kvm->online_vcpus, 0);
4821         mutex_unlock(&kvm->lock);
4822 }
4823
4824 void kvm_arch_sync_events(struct kvm *kvm)
4825 {
4826         kvm_free_all_assigned_devices(kvm);
4827 }
4828
4829 void kvm_arch_destroy_vm(struct kvm *kvm)
4830 {
4831         kvm_iommu_unmap_guest(kvm);
4832         kvm_free_pit(kvm);
4833         kfree(kvm->arch.vpic);
4834         kfree(kvm->arch.vioapic);
4835         kvm_free_vcpus(kvm);
4836         kvm_free_physmem(kvm);
4837         if (kvm->arch.apic_access_page)
4838                 put_page(kvm->arch.apic_access_page);
4839         if (kvm->arch.ept_identity_pagetable)
4840                 put_page(kvm->arch.ept_identity_pagetable);
4841         kfree(kvm);
4842 }
4843
4844 int kvm_arch_set_memory_region(struct kvm *kvm,
4845                                 struct kvm_userspace_memory_region *mem,
4846                                 struct kvm_memory_slot old,
4847                                 int user_alloc)
4848 {
4849         int npages = mem->memory_size >> PAGE_SHIFT;
4850         struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4851
4852         /*To keep backward compatibility with older userspace,
4853          *x86 needs to hanlde !user_alloc case.
4854          */
4855         if (!user_alloc) {
4856                 if (npages && !old.rmap) {
4857                         unsigned long userspace_addr;
4858
4859                         down_write(&current->mm->mmap_sem);
4860                         userspace_addr = do_mmap(NULL, 0,
4861                                                  npages * PAGE_SIZE,
4862                                                  PROT_READ | PROT_WRITE,
4863                                                  MAP_PRIVATE | MAP_ANONYMOUS,
4864                                                  0);
4865                         up_write(&current->mm->mmap_sem);
4866
4867                         if (IS_ERR((void *)userspace_addr))
4868                                 return PTR_ERR((void *)userspace_addr);
4869
4870                         /* set userspace_addr atomically for kvm_hva_to_rmapp */
4871                         spin_lock(&kvm->mmu_lock);
4872                         memslot->userspace_addr = userspace_addr;
4873                         spin_unlock(&kvm->mmu_lock);
4874                 } else {
4875                         if (!old.user_alloc && old.rmap) {
4876                                 int ret;
4877
4878                                 down_write(&current->mm->mmap_sem);
4879                                 ret = do_munmap(current->mm, old.userspace_addr,
4880                                                 old.npages * PAGE_SIZE);
4881                                 up_write(&current->mm->mmap_sem);
4882                                 if (ret < 0)
4883                                         printk(KERN_WARNING
4884                                        "kvm_vm_ioctl_set_memory_region: "
4885                                        "failed to munmap memory\n");
4886                         }
4887                 }
4888         }
4889
4890         spin_lock(&kvm->mmu_lock);
4891         if (!kvm->arch.n_requested_mmu_pages) {
4892                 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4893                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4894         }
4895
4896         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4897         spin_unlock(&kvm->mmu_lock);
4898
4899         return 0;
4900 }
4901
4902 void kvm_arch_flush_shadow(struct kvm *kvm)
4903 {
4904         kvm_mmu_zap_all(kvm);
4905         kvm_reload_remote_mmus(kvm);
4906 }
4907
4908 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4909 {
4910         return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
4911                 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4912                 || vcpu->arch.nmi_pending ||
4913                 (kvm_arch_interrupt_allowed(vcpu) &&
4914                  kvm_cpu_has_interrupt(vcpu));
4915 }
4916
4917 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4918 {
4919         int me;
4920         int cpu = vcpu->cpu;
4921
4922         if (waitqueue_active(&vcpu->wq)) {
4923                 wake_up_interruptible(&vcpu->wq);
4924                 ++vcpu->stat.halt_wakeup;
4925         }
4926
4927         me = get_cpu();
4928         if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
4929                 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
4930                         smp_send_reschedule(cpu);
4931         put_cpu();
4932 }
4933
4934 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
4935 {
4936         return kvm_x86_ops->interrupt_allowed(vcpu);
4937 }
4938
4939 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
4940 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
4941 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
4942 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
4943 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);