KVM: remove pre_task_link setting in save_state_to_tss16
[safe/jmp/linux-2.6] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Amit Shah    <amit.shah@qumranet.com>
14  *   Ben-Ami Yassour <benami@il.ibm.com>
15  *
16  * This work is licensed under the terms of the GNU GPL, version 2.  See
17  * the COPYING file in the top-level directory.
18  *
19  */
20
21 #include <linux/kvm_host.h>
22 #include "irq.h"
23 #include "mmu.h"
24 #include "i8254.h"
25 #include "tss.h"
26 #include "kvm_cache_regs.h"
27 #include "x86.h"
28
29 #include <linux/clocksource.h>
30 #include <linux/interrupt.h>
31 #include <linux/kvm.h>
32 #include <linux/fs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/highmem.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/cpufreq.h>
40 #include <trace/events/kvm.h>
41 #undef TRACE_INCLUDE_FILE
42 #define CREATE_TRACE_POINTS
43 #include "trace.h"
44
45 #include <asm/uaccess.h>
46 #include <asm/msr.h>
47 #include <asm/desc.h>
48 #include <asm/mtrr.h>
49 #include <asm/mce.h>
50
51 #define MAX_IO_MSRS 256
52 #define CR0_RESERVED_BITS                                               \
53         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
54                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
55                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
56 #define CR4_RESERVED_BITS                                               \
57         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
58                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
59                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
60                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
61
62 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
63
64 #define KVM_MAX_MCE_BANKS 32
65 #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
66
67 /* EFER defaults:
68  * - enable syscall per default because its emulated by KVM
69  * - enable LME and LMA per default on 64 bit KVM
70  */
71 #ifdef CONFIG_X86_64
72 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
73 #else
74 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
75 #endif
76
77 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
78 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
79
80 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
81 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
82                                     struct kvm_cpuid_entry2 __user *entries);
83
84 struct kvm_x86_ops *kvm_x86_ops;
85 EXPORT_SYMBOL_GPL(kvm_x86_ops);
86
87 int ignore_msrs = 0;
88 module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
89
90 struct kvm_stats_debugfs_item debugfs_entries[] = {
91         { "pf_fixed", VCPU_STAT(pf_fixed) },
92         { "pf_guest", VCPU_STAT(pf_guest) },
93         { "tlb_flush", VCPU_STAT(tlb_flush) },
94         { "invlpg", VCPU_STAT(invlpg) },
95         { "exits", VCPU_STAT(exits) },
96         { "io_exits", VCPU_STAT(io_exits) },
97         { "mmio_exits", VCPU_STAT(mmio_exits) },
98         { "signal_exits", VCPU_STAT(signal_exits) },
99         { "irq_window", VCPU_STAT(irq_window_exits) },
100         { "nmi_window", VCPU_STAT(nmi_window_exits) },
101         { "halt_exits", VCPU_STAT(halt_exits) },
102         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
103         { "hypercalls", VCPU_STAT(hypercalls) },
104         { "request_irq", VCPU_STAT(request_irq_exits) },
105         { "irq_exits", VCPU_STAT(irq_exits) },
106         { "host_state_reload", VCPU_STAT(host_state_reload) },
107         { "efer_reload", VCPU_STAT(efer_reload) },
108         { "fpu_reload", VCPU_STAT(fpu_reload) },
109         { "insn_emulation", VCPU_STAT(insn_emulation) },
110         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
111         { "irq_injections", VCPU_STAT(irq_injections) },
112         { "nmi_injections", VCPU_STAT(nmi_injections) },
113         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
114         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
115         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
116         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
117         { "mmu_flooded", VM_STAT(mmu_flooded) },
118         { "mmu_recycled", VM_STAT(mmu_recycled) },
119         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
120         { "mmu_unsync", VM_STAT(mmu_unsync) },
121         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
122         { "largepages", VM_STAT(lpages) },
123         { NULL }
124 };
125
126 unsigned long segment_base(u16 selector)
127 {
128         struct descriptor_table gdt;
129         struct desc_struct *d;
130         unsigned long table_base;
131         unsigned long v;
132
133         if (selector == 0)
134                 return 0;
135
136         kvm_get_gdt(&gdt);
137         table_base = gdt.base;
138
139         if (selector & 4) {           /* from ldt */
140                 u16 ldt_selector = kvm_read_ldt();
141
142                 table_base = segment_base(ldt_selector);
143         }
144         d = (struct desc_struct *)(table_base + (selector & ~7));
145         v = get_desc_base(d);
146 #ifdef CONFIG_X86_64
147         if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
148                 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
149 #endif
150         return v;
151 }
152 EXPORT_SYMBOL_GPL(segment_base);
153
154 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
155 {
156         if (irqchip_in_kernel(vcpu->kvm))
157                 return vcpu->arch.apic_base;
158         else
159                 return vcpu->arch.apic_base;
160 }
161 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
162
163 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
164 {
165         /* TODO: reserve bits check */
166         if (irqchip_in_kernel(vcpu->kvm))
167                 kvm_lapic_set_base(vcpu, data);
168         else
169                 vcpu->arch.apic_base = data;
170 }
171 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
172
173 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
174 {
175         WARN_ON(vcpu->arch.exception.pending);
176         vcpu->arch.exception.pending = true;
177         vcpu->arch.exception.has_error_code = false;
178         vcpu->arch.exception.nr = nr;
179 }
180 EXPORT_SYMBOL_GPL(kvm_queue_exception);
181
182 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
183                            u32 error_code)
184 {
185         ++vcpu->stat.pf_guest;
186
187         if (vcpu->arch.exception.pending) {
188                 switch(vcpu->arch.exception.nr) {
189                 case DF_VECTOR:
190                         /* triple fault -> shutdown */
191                         set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
192                         return;
193                 case PF_VECTOR:
194                         vcpu->arch.exception.nr = DF_VECTOR;
195                         vcpu->arch.exception.error_code = 0;
196                         return;
197                 default:
198                         /* replace previous exception with a new one in a hope
199                            that instruction re-execution will regenerate lost
200                            exception */
201                         vcpu->arch.exception.pending = false;
202                         break;
203                 }
204         }
205         vcpu->arch.cr2 = addr;
206         kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
207 }
208
209 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
210 {
211         vcpu->arch.nmi_pending = 1;
212 }
213 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
214
215 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
216 {
217         WARN_ON(vcpu->arch.exception.pending);
218         vcpu->arch.exception.pending = true;
219         vcpu->arch.exception.has_error_code = true;
220         vcpu->arch.exception.nr = nr;
221         vcpu->arch.exception.error_code = error_code;
222 }
223 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
224
225 /*
226  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
227  * a #GP and return false.
228  */
229 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
230 {
231         if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
232                 return true;
233         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
234         return false;
235 }
236 EXPORT_SYMBOL_GPL(kvm_require_cpl);
237
238 /*
239  * Load the pae pdptrs.  Return true is they are all valid.
240  */
241 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
242 {
243         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
244         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
245         int i;
246         int ret;
247         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
248
249         ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
250                                   offset * sizeof(u64), sizeof(pdpte));
251         if (ret < 0) {
252                 ret = 0;
253                 goto out;
254         }
255         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
256                 if (is_present_gpte(pdpte[i]) &&
257                     (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
258                         ret = 0;
259                         goto out;
260                 }
261         }
262         ret = 1;
263
264         memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
265         __set_bit(VCPU_EXREG_PDPTR,
266                   (unsigned long *)&vcpu->arch.regs_avail);
267         __set_bit(VCPU_EXREG_PDPTR,
268                   (unsigned long *)&vcpu->arch.regs_dirty);
269 out:
270
271         return ret;
272 }
273 EXPORT_SYMBOL_GPL(load_pdptrs);
274
275 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
276 {
277         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
278         bool changed = true;
279         int r;
280
281         if (is_long_mode(vcpu) || !is_pae(vcpu))
282                 return false;
283
284         if (!test_bit(VCPU_EXREG_PDPTR,
285                       (unsigned long *)&vcpu->arch.regs_avail))
286                 return true;
287
288         r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
289         if (r < 0)
290                 goto out;
291         changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
292 out:
293
294         return changed;
295 }
296
297 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
298 {
299         if (cr0 & CR0_RESERVED_BITS) {
300                 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
301                        cr0, vcpu->arch.cr0);
302                 kvm_inject_gp(vcpu, 0);
303                 return;
304         }
305
306         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
307                 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
308                 kvm_inject_gp(vcpu, 0);
309                 return;
310         }
311
312         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
313                 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
314                        "and a clear PE flag\n");
315                 kvm_inject_gp(vcpu, 0);
316                 return;
317         }
318
319         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
320 #ifdef CONFIG_X86_64
321                 if ((vcpu->arch.shadow_efer & EFER_LME)) {
322                         int cs_db, cs_l;
323
324                         if (!is_pae(vcpu)) {
325                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
326                                        "in long mode while PAE is disabled\n");
327                                 kvm_inject_gp(vcpu, 0);
328                                 return;
329                         }
330                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
331                         if (cs_l) {
332                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
333                                        "in long mode while CS.L == 1\n");
334                                 kvm_inject_gp(vcpu, 0);
335                                 return;
336
337                         }
338                 } else
339 #endif
340                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
341                         printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
342                                "reserved bits\n");
343                         kvm_inject_gp(vcpu, 0);
344                         return;
345                 }
346
347         }
348
349         kvm_x86_ops->set_cr0(vcpu, cr0);
350         vcpu->arch.cr0 = cr0;
351
352         kvm_mmu_reset_context(vcpu);
353         return;
354 }
355 EXPORT_SYMBOL_GPL(kvm_set_cr0);
356
357 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
358 {
359         kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
360 }
361 EXPORT_SYMBOL_GPL(kvm_lmsw);
362
363 void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
364 {
365         unsigned long old_cr4 = vcpu->arch.cr4;
366         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
367
368         if (cr4 & CR4_RESERVED_BITS) {
369                 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
370                 kvm_inject_gp(vcpu, 0);
371                 return;
372         }
373
374         if (is_long_mode(vcpu)) {
375                 if (!(cr4 & X86_CR4_PAE)) {
376                         printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
377                                "in long mode\n");
378                         kvm_inject_gp(vcpu, 0);
379                         return;
380                 }
381         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
382                    && ((cr4 ^ old_cr4) & pdptr_bits)
383                    && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
384                 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
385                 kvm_inject_gp(vcpu, 0);
386                 return;
387         }
388
389         if (cr4 & X86_CR4_VMXE) {
390                 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
391                 kvm_inject_gp(vcpu, 0);
392                 return;
393         }
394         kvm_x86_ops->set_cr4(vcpu, cr4);
395         vcpu->arch.cr4 = cr4;
396         vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
397         kvm_mmu_reset_context(vcpu);
398 }
399 EXPORT_SYMBOL_GPL(kvm_set_cr4);
400
401 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
402 {
403         if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
404                 kvm_mmu_sync_roots(vcpu);
405                 kvm_mmu_flush_tlb(vcpu);
406                 return;
407         }
408
409         if (is_long_mode(vcpu)) {
410                 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
411                         printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
412                         kvm_inject_gp(vcpu, 0);
413                         return;
414                 }
415         } else {
416                 if (is_pae(vcpu)) {
417                         if (cr3 & CR3_PAE_RESERVED_BITS) {
418                                 printk(KERN_DEBUG
419                                        "set_cr3: #GP, reserved bits\n");
420                                 kvm_inject_gp(vcpu, 0);
421                                 return;
422                         }
423                         if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
424                                 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
425                                        "reserved bits\n");
426                                 kvm_inject_gp(vcpu, 0);
427                                 return;
428                         }
429                 }
430                 /*
431                  * We don't check reserved bits in nonpae mode, because
432                  * this isn't enforced, and VMware depends on this.
433                  */
434         }
435
436         /*
437          * Does the new cr3 value map to physical memory? (Note, we
438          * catch an invalid cr3 even in real-mode, because it would
439          * cause trouble later on when we turn on paging anyway.)
440          *
441          * A real CPU would silently accept an invalid cr3 and would
442          * attempt to use it - with largely undefined (and often hard
443          * to debug) behavior on the guest side.
444          */
445         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
446                 kvm_inject_gp(vcpu, 0);
447         else {
448                 vcpu->arch.cr3 = cr3;
449                 vcpu->arch.mmu.new_cr3(vcpu);
450         }
451 }
452 EXPORT_SYMBOL_GPL(kvm_set_cr3);
453
454 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
455 {
456         if (cr8 & CR8_RESERVED_BITS) {
457                 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
458                 kvm_inject_gp(vcpu, 0);
459                 return;
460         }
461         if (irqchip_in_kernel(vcpu->kvm))
462                 kvm_lapic_set_tpr(vcpu, cr8);
463         else
464                 vcpu->arch.cr8 = cr8;
465 }
466 EXPORT_SYMBOL_GPL(kvm_set_cr8);
467
468 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
469 {
470         if (irqchip_in_kernel(vcpu->kvm))
471                 return kvm_lapic_get_cr8(vcpu);
472         else
473                 return vcpu->arch.cr8;
474 }
475 EXPORT_SYMBOL_GPL(kvm_get_cr8);
476
477 static inline u32 bit(int bitno)
478 {
479         return 1 << (bitno & 31);
480 }
481
482 /*
483  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
484  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
485  *
486  * This list is modified at module load time to reflect the
487  * capabilities of the host cpu.
488  */
489 static u32 msrs_to_save[] = {
490         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
491         MSR_K6_STAR,
492 #ifdef CONFIG_X86_64
493         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
494 #endif
495         MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
496         MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
497 };
498
499 static unsigned num_msrs_to_save;
500
501 static u32 emulated_msrs[] = {
502         MSR_IA32_MISC_ENABLE,
503 };
504
505 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
506 {
507         if (efer & efer_reserved_bits) {
508                 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
509                        efer);
510                 kvm_inject_gp(vcpu, 0);
511                 return;
512         }
513
514         if (is_paging(vcpu)
515             && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
516                 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
517                 kvm_inject_gp(vcpu, 0);
518                 return;
519         }
520
521         if (efer & EFER_FFXSR) {
522                 struct kvm_cpuid_entry2 *feat;
523
524                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
525                 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
526                         printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
527                         kvm_inject_gp(vcpu, 0);
528                         return;
529                 }
530         }
531
532         if (efer & EFER_SVME) {
533                 struct kvm_cpuid_entry2 *feat;
534
535                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
536                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
537                         printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
538                         kvm_inject_gp(vcpu, 0);
539                         return;
540                 }
541         }
542
543         kvm_x86_ops->set_efer(vcpu, efer);
544
545         efer &= ~EFER_LMA;
546         efer |= vcpu->arch.shadow_efer & EFER_LMA;
547
548         vcpu->arch.shadow_efer = efer;
549
550         vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
551         kvm_mmu_reset_context(vcpu);
552 }
553
554 void kvm_enable_efer_bits(u64 mask)
555 {
556        efer_reserved_bits &= ~mask;
557 }
558 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
559
560
561 /*
562  * Writes msr value into into the appropriate "register".
563  * Returns 0 on success, non-0 otherwise.
564  * Assumes vcpu_load() was already called.
565  */
566 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
567 {
568         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
569 }
570
571 /*
572  * Adapt set_msr() to msr_io()'s calling convention
573  */
574 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
575 {
576         return kvm_set_msr(vcpu, index, *data);
577 }
578
579 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
580 {
581         static int version;
582         struct pvclock_wall_clock wc;
583         struct timespec now, sys, boot;
584
585         if (!wall_clock)
586                 return;
587
588         version++;
589
590         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
591
592         /*
593          * The guest calculates current wall clock time by adding
594          * system time (updated by kvm_write_guest_time below) to the
595          * wall clock specified here.  guest system time equals host
596          * system time for us, thus we must fill in host boot time here.
597          */
598         now = current_kernel_time();
599         ktime_get_ts(&sys);
600         boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
601
602         wc.sec = boot.tv_sec;
603         wc.nsec = boot.tv_nsec;
604         wc.version = version;
605
606         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
607
608         version++;
609         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
610 }
611
612 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
613 {
614         uint32_t quotient, remainder;
615
616         /* Don't try to replace with do_div(), this one calculates
617          * "(dividend << 32) / divisor" */
618         __asm__ ( "divl %4"
619                   : "=a" (quotient), "=d" (remainder)
620                   : "0" (0), "1" (dividend), "r" (divisor) );
621         return quotient;
622 }
623
624 static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
625 {
626         uint64_t nsecs = 1000000000LL;
627         int32_t  shift = 0;
628         uint64_t tps64;
629         uint32_t tps32;
630
631         tps64 = tsc_khz * 1000LL;
632         while (tps64 > nsecs*2) {
633                 tps64 >>= 1;
634                 shift--;
635         }
636
637         tps32 = (uint32_t)tps64;
638         while (tps32 <= (uint32_t)nsecs) {
639                 tps32 <<= 1;
640                 shift++;
641         }
642
643         hv_clock->tsc_shift = shift;
644         hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
645
646         pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
647                  __func__, tsc_khz, hv_clock->tsc_shift,
648                  hv_clock->tsc_to_system_mul);
649 }
650
651 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
652
653 static void kvm_write_guest_time(struct kvm_vcpu *v)
654 {
655         struct timespec ts;
656         unsigned long flags;
657         struct kvm_vcpu_arch *vcpu = &v->arch;
658         void *shared_kaddr;
659         unsigned long this_tsc_khz;
660
661         if ((!vcpu->time_page))
662                 return;
663
664         this_tsc_khz = get_cpu_var(cpu_tsc_khz);
665         if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
666                 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
667                 vcpu->hv_clock_tsc_khz = this_tsc_khz;
668         }
669         put_cpu_var(cpu_tsc_khz);
670
671         /* Keep irq disabled to prevent changes to the clock */
672         local_irq_save(flags);
673         kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
674         ktime_get_ts(&ts);
675         local_irq_restore(flags);
676
677         /* With all the info we got, fill in the values */
678
679         vcpu->hv_clock.system_time = ts.tv_nsec +
680                                      (NSEC_PER_SEC * (u64)ts.tv_sec);
681         /*
682          * The interface expects us to write an even number signaling that the
683          * update is finished. Since the guest won't see the intermediate
684          * state, we just increase by 2 at the end.
685          */
686         vcpu->hv_clock.version += 2;
687
688         shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
689
690         memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
691                sizeof(vcpu->hv_clock));
692
693         kunmap_atomic(shared_kaddr, KM_USER0);
694
695         mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
696 }
697
698 static int kvm_request_guest_time_update(struct kvm_vcpu *v)
699 {
700         struct kvm_vcpu_arch *vcpu = &v->arch;
701
702         if (!vcpu->time_page)
703                 return 0;
704         set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
705         return 1;
706 }
707
708 static bool msr_mtrr_valid(unsigned msr)
709 {
710         switch (msr) {
711         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
712         case MSR_MTRRfix64K_00000:
713         case MSR_MTRRfix16K_80000:
714         case MSR_MTRRfix16K_A0000:
715         case MSR_MTRRfix4K_C0000:
716         case MSR_MTRRfix4K_C8000:
717         case MSR_MTRRfix4K_D0000:
718         case MSR_MTRRfix4K_D8000:
719         case MSR_MTRRfix4K_E0000:
720         case MSR_MTRRfix4K_E8000:
721         case MSR_MTRRfix4K_F0000:
722         case MSR_MTRRfix4K_F8000:
723         case MSR_MTRRdefType:
724         case MSR_IA32_CR_PAT:
725                 return true;
726         case 0x2f8:
727                 return true;
728         }
729         return false;
730 }
731
732 static bool valid_pat_type(unsigned t)
733 {
734         return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
735 }
736
737 static bool valid_mtrr_type(unsigned t)
738 {
739         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
740 }
741
742 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
743 {
744         int i;
745
746         if (!msr_mtrr_valid(msr))
747                 return false;
748
749         if (msr == MSR_IA32_CR_PAT) {
750                 for (i = 0; i < 8; i++)
751                         if (!valid_pat_type((data >> (i * 8)) & 0xff))
752                                 return false;
753                 return true;
754         } else if (msr == MSR_MTRRdefType) {
755                 if (data & ~0xcff)
756                         return false;
757                 return valid_mtrr_type(data & 0xff);
758         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
759                 for (i = 0; i < 8 ; i++)
760                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
761                                 return false;
762                 return true;
763         }
764
765         /* variable MTRRs */
766         return valid_mtrr_type(data & 0xff);
767 }
768
769 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
770 {
771         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
772
773         if (!mtrr_valid(vcpu, msr, data))
774                 return 1;
775
776         if (msr == MSR_MTRRdefType) {
777                 vcpu->arch.mtrr_state.def_type = data;
778                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
779         } else if (msr == MSR_MTRRfix64K_00000)
780                 p[0] = data;
781         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
782                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
783         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
784                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
785         else if (msr == MSR_IA32_CR_PAT)
786                 vcpu->arch.pat = data;
787         else {  /* Variable MTRRs */
788                 int idx, is_mtrr_mask;
789                 u64 *pt;
790
791                 idx = (msr - 0x200) / 2;
792                 is_mtrr_mask = msr - 0x200 - 2 * idx;
793                 if (!is_mtrr_mask)
794                         pt =
795                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
796                 else
797                         pt =
798                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
799                 *pt = data;
800         }
801
802         kvm_mmu_reset_context(vcpu);
803         return 0;
804 }
805
806 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
807 {
808         u64 mcg_cap = vcpu->arch.mcg_cap;
809         unsigned bank_num = mcg_cap & 0xff;
810
811         switch (msr) {
812         case MSR_IA32_MCG_STATUS:
813                 vcpu->arch.mcg_status = data;
814                 break;
815         case MSR_IA32_MCG_CTL:
816                 if (!(mcg_cap & MCG_CTL_P))
817                         return 1;
818                 if (data != 0 && data != ~(u64)0)
819                         return -1;
820                 vcpu->arch.mcg_ctl = data;
821                 break;
822         default:
823                 if (msr >= MSR_IA32_MC0_CTL &&
824                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
825                         u32 offset = msr - MSR_IA32_MC0_CTL;
826                         /* only 0 or all 1s can be written to IA32_MCi_CTL */
827                         if ((offset & 0x3) == 0 &&
828                             data != 0 && data != ~(u64)0)
829                                 return -1;
830                         vcpu->arch.mce_banks[offset] = data;
831                         break;
832                 }
833                 return 1;
834         }
835         return 0;
836 }
837
838 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
839 {
840         switch (msr) {
841         case MSR_EFER:
842                 set_efer(vcpu, data);
843                 break;
844         case MSR_K7_HWCR:
845                 data &= ~(u64)0x40;     /* ignore flush filter disable */
846                 if (data != 0) {
847                         pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
848                                 data);
849                         return 1;
850                 }
851                 break;
852         case MSR_FAM10H_MMIO_CONF_BASE:
853                 if (data != 0) {
854                         pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
855                                 "0x%llx\n", data);
856                         return 1;
857                 }
858                 break;
859         case MSR_AMD64_NB_CFG:
860                 break;
861         case MSR_IA32_DEBUGCTLMSR:
862                 if (!data) {
863                         /* We support the non-activated case already */
864                         break;
865                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
866                         /* Values other than LBR and BTF are vendor-specific,
867                            thus reserved and should throw a #GP */
868                         return 1;
869                 }
870                 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
871                         __func__, data);
872                 break;
873         case MSR_IA32_UCODE_REV:
874         case MSR_IA32_UCODE_WRITE:
875         case MSR_VM_HSAVE_PA:
876         case MSR_AMD64_PATCH_LOADER:
877                 break;
878         case 0x200 ... 0x2ff:
879                 return set_msr_mtrr(vcpu, msr, data);
880         case MSR_IA32_APICBASE:
881                 kvm_set_apic_base(vcpu, data);
882                 break;
883         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
884                 return kvm_x2apic_msr_write(vcpu, msr, data);
885         case MSR_IA32_MISC_ENABLE:
886                 vcpu->arch.ia32_misc_enable_msr = data;
887                 break;
888         case MSR_KVM_WALL_CLOCK:
889                 vcpu->kvm->arch.wall_clock = data;
890                 kvm_write_wall_clock(vcpu->kvm, data);
891                 break;
892         case MSR_KVM_SYSTEM_TIME: {
893                 if (vcpu->arch.time_page) {
894                         kvm_release_page_dirty(vcpu->arch.time_page);
895                         vcpu->arch.time_page = NULL;
896                 }
897
898                 vcpu->arch.time = data;
899
900                 /* we verify if the enable bit is set... */
901                 if (!(data & 1))
902                         break;
903
904                 /* ...but clean it before doing the actual write */
905                 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
906
907                 vcpu->arch.time_page =
908                                 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
909
910                 if (is_error_page(vcpu->arch.time_page)) {
911                         kvm_release_page_clean(vcpu->arch.time_page);
912                         vcpu->arch.time_page = NULL;
913                 }
914
915                 kvm_request_guest_time_update(vcpu);
916                 break;
917         }
918         case MSR_IA32_MCG_CTL:
919         case MSR_IA32_MCG_STATUS:
920         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
921                 return set_msr_mce(vcpu, msr, data);
922
923         /* Performance counters are not protected by a CPUID bit,
924          * so we should check all of them in the generic path for the sake of
925          * cross vendor migration.
926          * Writing a zero into the event select MSRs disables them,
927          * which we perfectly emulate ;-). Any other value should be at least
928          * reported, some guests depend on them.
929          */
930         case MSR_P6_EVNTSEL0:
931         case MSR_P6_EVNTSEL1:
932         case MSR_K7_EVNTSEL0:
933         case MSR_K7_EVNTSEL1:
934         case MSR_K7_EVNTSEL2:
935         case MSR_K7_EVNTSEL3:
936                 if (data != 0)
937                         pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
938                                 "0x%x data 0x%llx\n", msr, data);
939                 break;
940         /* at least RHEL 4 unconditionally writes to the perfctr registers,
941          * so we ignore writes to make it happy.
942          */
943         case MSR_P6_PERFCTR0:
944         case MSR_P6_PERFCTR1:
945         case MSR_K7_PERFCTR0:
946         case MSR_K7_PERFCTR1:
947         case MSR_K7_PERFCTR2:
948         case MSR_K7_PERFCTR3:
949                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
950                         "0x%x data 0x%llx\n", msr, data);
951                 break;
952         default:
953                 if (!ignore_msrs) {
954                         pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
955                                 msr, data);
956                         return 1;
957                 } else {
958                         pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
959                                 msr, data);
960                         break;
961                 }
962         }
963         return 0;
964 }
965 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
966
967
968 /*
969  * Reads an msr value (of 'msr_index') into 'pdata'.
970  * Returns 0 on success, non-0 otherwise.
971  * Assumes vcpu_load() was already called.
972  */
973 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
974 {
975         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
976 }
977
978 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
979 {
980         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
981
982         if (!msr_mtrr_valid(msr))
983                 return 1;
984
985         if (msr == MSR_MTRRdefType)
986                 *pdata = vcpu->arch.mtrr_state.def_type +
987                          (vcpu->arch.mtrr_state.enabled << 10);
988         else if (msr == MSR_MTRRfix64K_00000)
989                 *pdata = p[0];
990         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
991                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
992         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
993                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
994         else if (msr == MSR_IA32_CR_PAT)
995                 *pdata = vcpu->arch.pat;
996         else {  /* Variable MTRRs */
997                 int idx, is_mtrr_mask;
998                 u64 *pt;
999
1000                 idx = (msr - 0x200) / 2;
1001                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1002                 if (!is_mtrr_mask)
1003                         pt =
1004                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1005                 else
1006                         pt =
1007                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1008                 *pdata = *pt;
1009         }
1010
1011         return 0;
1012 }
1013
1014 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1015 {
1016         u64 data;
1017         u64 mcg_cap = vcpu->arch.mcg_cap;
1018         unsigned bank_num = mcg_cap & 0xff;
1019
1020         switch (msr) {
1021         case MSR_IA32_P5_MC_ADDR:
1022         case MSR_IA32_P5_MC_TYPE:
1023                 data = 0;
1024                 break;
1025         case MSR_IA32_MCG_CAP:
1026                 data = vcpu->arch.mcg_cap;
1027                 break;
1028         case MSR_IA32_MCG_CTL:
1029                 if (!(mcg_cap & MCG_CTL_P))
1030                         return 1;
1031                 data = vcpu->arch.mcg_ctl;
1032                 break;
1033         case MSR_IA32_MCG_STATUS:
1034                 data = vcpu->arch.mcg_status;
1035                 break;
1036         default:
1037                 if (msr >= MSR_IA32_MC0_CTL &&
1038                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1039                         u32 offset = msr - MSR_IA32_MC0_CTL;
1040                         data = vcpu->arch.mce_banks[offset];
1041                         break;
1042                 }
1043                 return 1;
1044         }
1045         *pdata = data;
1046         return 0;
1047 }
1048
1049 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1050 {
1051         u64 data;
1052
1053         switch (msr) {
1054         case MSR_IA32_PLATFORM_ID:
1055         case MSR_IA32_UCODE_REV:
1056         case MSR_IA32_EBL_CR_POWERON:
1057         case MSR_IA32_DEBUGCTLMSR:
1058         case MSR_IA32_LASTBRANCHFROMIP:
1059         case MSR_IA32_LASTBRANCHTOIP:
1060         case MSR_IA32_LASTINTFROMIP:
1061         case MSR_IA32_LASTINTTOIP:
1062         case MSR_K8_SYSCFG:
1063         case MSR_K7_HWCR:
1064         case MSR_VM_HSAVE_PA:
1065         case MSR_P6_PERFCTR0:
1066         case MSR_P6_PERFCTR1:
1067         case MSR_P6_EVNTSEL0:
1068         case MSR_P6_EVNTSEL1:
1069         case MSR_K7_EVNTSEL0:
1070         case MSR_K7_PERFCTR0:
1071         case MSR_K8_INT_PENDING_MSG:
1072         case MSR_AMD64_NB_CFG:
1073         case MSR_FAM10H_MMIO_CONF_BASE:
1074                 data = 0;
1075                 break;
1076         case MSR_MTRRcap:
1077                 data = 0x500 | KVM_NR_VAR_MTRR;
1078                 break;
1079         case 0x200 ... 0x2ff:
1080                 return get_msr_mtrr(vcpu, msr, pdata);
1081         case 0xcd: /* fsb frequency */
1082                 data = 3;
1083                 break;
1084         case MSR_IA32_APICBASE:
1085                 data = kvm_get_apic_base(vcpu);
1086                 break;
1087         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1088                 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1089                 break;
1090         case MSR_IA32_MISC_ENABLE:
1091                 data = vcpu->arch.ia32_misc_enable_msr;
1092                 break;
1093         case MSR_IA32_PERF_STATUS:
1094                 /* TSC increment by tick */
1095                 data = 1000ULL;
1096                 /* CPU multiplier */
1097                 data |= (((uint64_t)4ULL) << 40);
1098                 break;
1099         case MSR_EFER:
1100                 data = vcpu->arch.shadow_efer;
1101                 break;
1102         case MSR_KVM_WALL_CLOCK:
1103                 data = vcpu->kvm->arch.wall_clock;
1104                 break;
1105         case MSR_KVM_SYSTEM_TIME:
1106                 data = vcpu->arch.time;
1107                 break;
1108         case MSR_IA32_P5_MC_ADDR:
1109         case MSR_IA32_P5_MC_TYPE:
1110         case MSR_IA32_MCG_CAP:
1111         case MSR_IA32_MCG_CTL:
1112         case MSR_IA32_MCG_STATUS:
1113         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1114                 return get_msr_mce(vcpu, msr, pdata);
1115         default:
1116                 if (!ignore_msrs) {
1117                         pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1118                         return 1;
1119                 } else {
1120                         pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1121                         data = 0;
1122                 }
1123                 break;
1124         }
1125         *pdata = data;
1126         return 0;
1127 }
1128 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1129
1130 /*
1131  * Read or write a bunch of msrs. All parameters are kernel addresses.
1132  *
1133  * @return number of msrs set successfully.
1134  */
1135 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1136                     struct kvm_msr_entry *entries,
1137                     int (*do_msr)(struct kvm_vcpu *vcpu,
1138                                   unsigned index, u64 *data))
1139 {
1140         int i;
1141
1142         vcpu_load(vcpu);
1143
1144         down_read(&vcpu->kvm->slots_lock);
1145         for (i = 0; i < msrs->nmsrs; ++i)
1146                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1147                         break;
1148         up_read(&vcpu->kvm->slots_lock);
1149
1150         vcpu_put(vcpu);
1151
1152         return i;
1153 }
1154
1155 /*
1156  * Read or write a bunch of msrs. Parameters are user addresses.
1157  *
1158  * @return number of msrs set successfully.
1159  */
1160 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1161                   int (*do_msr)(struct kvm_vcpu *vcpu,
1162                                 unsigned index, u64 *data),
1163                   int writeback)
1164 {
1165         struct kvm_msrs msrs;
1166         struct kvm_msr_entry *entries;
1167         int r, n;
1168         unsigned size;
1169
1170         r = -EFAULT;
1171         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1172                 goto out;
1173
1174         r = -E2BIG;
1175         if (msrs.nmsrs >= MAX_IO_MSRS)
1176                 goto out;
1177
1178         r = -ENOMEM;
1179         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1180         entries = vmalloc(size);
1181         if (!entries)
1182                 goto out;
1183
1184         r = -EFAULT;
1185         if (copy_from_user(entries, user_msrs->entries, size))
1186                 goto out_free;
1187
1188         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1189         if (r < 0)
1190                 goto out_free;
1191
1192         r = -EFAULT;
1193         if (writeback && copy_to_user(user_msrs->entries, entries, size))
1194                 goto out_free;
1195
1196         r = n;
1197
1198 out_free:
1199         vfree(entries);
1200 out:
1201         return r;
1202 }
1203
1204 int kvm_dev_ioctl_check_extension(long ext)
1205 {
1206         int r;
1207
1208         switch (ext) {
1209         case KVM_CAP_IRQCHIP:
1210         case KVM_CAP_HLT:
1211         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
1212         case KVM_CAP_SET_TSS_ADDR:
1213         case KVM_CAP_EXT_CPUID:
1214         case KVM_CAP_CLOCKSOURCE:
1215         case KVM_CAP_PIT:
1216         case KVM_CAP_NOP_IO_DELAY:
1217         case KVM_CAP_MP_STATE:
1218         case KVM_CAP_SYNC_MMU:
1219         case KVM_CAP_REINJECT_CONTROL:
1220         case KVM_CAP_IRQ_INJECT_STATUS:
1221         case KVM_CAP_ASSIGN_DEV_IRQ:
1222         case KVM_CAP_IRQFD:
1223         case KVM_CAP_IOEVENTFD:
1224         case KVM_CAP_PIT2:
1225         case KVM_CAP_PIT_STATE2:
1226         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1227                 r = 1;
1228                 break;
1229         case KVM_CAP_COALESCED_MMIO:
1230                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1231                 break;
1232         case KVM_CAP_VAPIC:
1233                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1234                 break;
1235         case KVM_CAP_NR_VCPUS:
1236                 r = KVM_MAX_VCPUS;
1237                 break;
1238         case KVM_CAP_NR_MEMSLOTS:
1239                 r = KVM_MEMORY_SLOTS;
1240                 break;
1241         case KVM_CAP_PV_MMU:
1242                 r = !tdp_enabled;
1243                 break;
1244         case KVM_CAP_IOMMU:
1245                 r = iommu_found();
1246                 break;
1247         case KVM_CAP_MCE:
1248                 r = KVM_MAX_MCE_BANKS;
1249                 break;
1250         default:
1251                 r = 0;
1252                 break;
1253         }
1254         return r;
1255
1256 }
1257
1258 long kvm_arch_dev_ioctl(struct file *filp,
1259                         unsigned int ioctl, unsigned long arg)
1260 {
1261         void __user *argp = (void __user *)arg;
1262         long r;
1263
1264         switch (ioctl) {
1265         case KVM_GET_MSR_INDEX_LIST: {
1266                 struct kvm_msr_list __user *user_msr_list = argp;
1267                 struct kvm_msr_list msr_list;
1268                 unsigned n;
1269
1270                 r = -EFAULT;
1271                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1272                         goto out;
1273                 n = msr_list.nmsrs;
1274                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1275                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1276                         goto out;
1277                 r = -E2BIG;
1278                 if (n < msr_list.nmsrs)
1279                         goto out;
1280                 r = -EFAULT;
1281                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1282                                  num_msrs_to_save * sizeof(u32)))
1283                         goto out;
1284                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
1285                                  &emulated_msrs,
1286                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1287                         goto out;
1288                 r = 0;
1289                 break;
1290         }
1291         case KVM_GET_SUPPORTED_CPUID: {
1292                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1293                 struct kvm_cpuid2 cpuid;
1294
1295                 r = -EFAULT;
1296                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1297                         goto out;
1298                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
1299                                                       cpuid_arg->entries);
1300                 if (r)
1301                         goto out;
1302
1303                 r = -EFAULT;
1304                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1305                         goto out;
1306                 r = 0;
1307                 break;
1308         }
1309         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1310                 u64 mce_cap;
1311
1312                 mce_cap = KVM_MCE_CAP_SUPPORTED;
1313                 r = -EFAULT;
1314                 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1315                         goto out;
1316                 r = 0;
1317                 break;
1318         }
1319         default:
1320                 r = -EINVAL;
1321         }
1322 out:
1323         return r;
1324 }
1325
1326 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1327 {
1328         kvm_x86_ops->vcpu_load(vcpu, cpu);
1329         if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0))
1330                 per_cpu(cpu_tsc_khz, cpu) = cpufreq_quick_get(cpu);
1331         kvm_request_guest_time_update(vcpu);
1332 }
1333
1334 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1335 {
1336         kvm_x86_ops->vcpu_put(vcpu);
1337         kvm_put_guest_fpu(vcpu);
1338 }
1339
1340 static int is_efer_nx(void)
1341 {
1342         unsigned long long efer = 0;
1343
1344         rdmsrl_safe(MSR_EFER, &efer);
1345         return efer & EFER_NX;
1346 }
1347
1348 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1349 {
1350         int i;
1351         struct kvm_cpuid_entry2 *e, *entry;
1352
1353         entry = NULL;
1354         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1355                 e = &vcpu->arch.cpuid_entries[i];
1356                 if (e->function == 0x80000001) {
1357                         entry = e;
1358                         break;
1359                 }
1360         }
1361         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
1362                 entry->edx &= ~(1 << 20);
1363                 printk(KERN_INFO "kvm: guest NX capability removed\n");
1364         }
1365 }
1366
1367 /* when an old userspace process fills a new kernel module */
1368 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1369                                     struct kvm_cpuid *cpuid,
1370                                     struct kvm_cpuid_entry __user *entries)
1371 {
1372         int r, i;
1373         struct kvm_cpuid_entry *cpuid_entries;
1374
1375         r = -E2BIG;
1376         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1377                 goto out;
1378         r = -ENOMEM;
1379         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1380         if (!cpuid_entries)
1381                 goto out;
1382         r = -EFAULT;
1383         if (copy_from_user(cpuid_entries, entries,
1384                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1385                 goto out_free;
1386         for (i = 0; i < cpuid->nent; i++) {
1387                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1388                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1389                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1390                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1391                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1392                 vcpu->arch.cpuid_entries[i].index = 0;
1393                 vcpu->arch.cpuid_entries[i].flags = 0;
1394                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1395                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1396                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1397         }
1398         vcpu->arch.cpuid_nent = cpuid->nent;
1399         cpuid_fix_nx_cap(vcpu);
1400         r = 0;
1401         kvm_apic_set_version(vcpu);
1402
1403 out_free:
1404         vfree(cpuid_entries);
1405 out:
1406         return r;
1407 }
1408
1409 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1410                                      struct kvm_cpuid2 *cpuid,
1411                                      struct kvm_cpuid_entry2 __user *entries)
1412 {
1413         int r;
1414
1415         r = -E2BIG;
1416         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1417                 goto out;
1418         r = -EFAULT;
1419         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
1420                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
1421                 goto out;
1422         vcpu->arch.cpuid_nent = cpuid->nent;
1423         kvm_apic_set_version(vcpu);
1424         return 0;
1425
1426 out:
1427         return r;
1428 }
1429
1430 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1431                                      struct kvm_cpuid2 *cpuid,
1432                                      struct kvm_cpuid_entry2 __user *entries)
1433 {
1434         int r;
1435
1436         r = -E2BIG;
1437         if (cpuid->nent < vcpu->arch.cpuid_nent)
1438                 goto out;
1439         r = -EFAULT;
1440         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1441                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
1442                 goto out;
1443         return 0;
1444
1445 out:
1446         cpuid->nent = vcpu->arch.cpuid_nent;
1447         return r;
1448 }
1449
1450 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1451                            u32 index)
1452 {
1453         entry->function = function;
1454         entry->index = index;
1455         cpuid_count(entry->function, entry->index,
1456                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1457         entry->flags = 0;
1458 }
1459
1460 #define F(x) bit(X86_FEATURE_##x)
1461
1462 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1463                          u32 index, int *nent, int maxnent)
1464 {
1465         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
1466         unsigned f_gbpages = kvm_x86_ops->gb_page_enable() ? F(GBPAGES) : 0;
1467 #ifdef CONFIG_X86_64
1468         unsigned f_lm = F(LM);
1469 #else
1470         unsigned f_lm = 0;
1471 #endif
1472
1473         /* cpuid 1.edx */
1474         const u32 kvm_supported_word0_x86_features =
1475                 F(FPU) | F(VME) | F(DE) | F(PSE) |
1476                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1477                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1478                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1479                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1480                 0 /* Reserved, DS, ACPI */ | F(MMX) |
1481                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1482                 0 /* HTT, TM, Reserved, PBE */;
1483         /* cpuid 0x80000001.edx */
1484         const u32 kvm_supported_word1_x86_features =
1485                 F(FPU) | F(VME) | F(DE) | F(PSE) |
1486                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1487                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1488                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1489                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1490                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1491                 F(FXSR) | F(FXSR_OPT) | f_gbpages | 0 /* RDTSCP */ |
1492                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1493         /* cpuid 1.ecx */
1494         const u32 kvm_supported_word4_x86_features =
1495                 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1496                 0 /* DS-CPL, VMX, SMX, EST */ |
1497                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1498                 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1499                 0 /* Reserved, DCA */ | F(XMM4_1) |
1500                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
1501                 0 /* Reserved, XSAVE, OSXSAVE */;
1502         /* cpuid 0x80000001.ecx */
1503         const u32 kvm_supported_word6_x86_features =
1504                 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1505                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1506                 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1507                 0 /* SKINIT */ | 0 /* WDT */;
1508
1509         /* all calls to cpuid_count() should be made on the same cpu */
1510         get_cpu();
1511         do_cpuid_1_ent(entry, function, index);
1512         ++*nent;
1513
1514         switch (function) {
1515         case 0:
1516                 entry->eax = min(entry->eax, (u32)0xb);
1517                 break;
1518         case 1:
1519                 entry->edx &= kvm_supported_word0_x86_features;
1520                 entry->ecx &= kvm_supported_word4_x86_features;
1521                 /* we support x2apic emulation even if host does not support
1522                  * it since we emulate x2apic in software */
1523                 entry->ecx |= F(X2APIC);
1524                 break;
1525         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1526          * may return different values. This forces us to get_cpu() before
1527          * issuing the first command, and also to emulate this annoying behavior
1528          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1529         case 2: {
1530                 int t, times = entry->eax & 0xff;
1531
1532                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1533                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
1534                 for (t = 1; t < times && *nent < maxnent; ++t) {
1535                         do_cpuid_1_ent(&entry[t], function, 0);
1536                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1537                         ++*nent;
1538                 }
1539                 break;
1540         }
1541         /* function 4 and 0xb have additional index. */
1542         case 4: {
1543                 int i, cache_type;
1544
1545                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1546                 /* read more entries until cache_type is zero */
1547                 for (i = 1; *nent < maxnent; ++i) {
1548                         cache_type = entry[i - 1].eax & 0x1f;
1549                         if (!cache_type)
1550                                 break;
1551                         do_cpuid_1_ent(&entry[i], function, i);
1552                         entry[i].flags |=
1553                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1554                         ++*nent;
1555                 }
1556                 break;
1557         }
1558         case 0xb: {
1559                 int i, level_type;
1560
1561                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1562                 /* read more entries until level_type is zero */
1563                 for (i = 1; *nent < maxnent; ++i) {
1564                         level_type = entry[i - 1].ecx & 0xff00;
1565                         if (!level_type)
1566                                 break;
1567                         do_cpuid_1_ent(&entry[i], function, i);
1568                         entry[i].flags |=
1569                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1570                         ++*nent;
1571                 }
1572                 break;
1573         }
1574         case 0x80000000:
1575                 entry->eax = min(entry->eax, 0x8000001a);
1576                 break;
1577         case 0x80000001:
1578                 entry->edx &= kvm_supported_word1_x86_features;
1579                 entry->ecx &= kvm_supported_word6_x86_features;
1580                 break;
1581         }
1582         put_cpu();
1583 }
1584
1585 #undef F
1586
1587 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1588                                      struct kvm_cpuid_entry2 __user *entries)
1589 {
1590         struct kvm_cpuid_entry2 *cpuid_entries;
1591         int limit, nent = 0, r = -E2BIG;
1592         u32 func;
1593
1594         if (cpuid->nent < 1)
1595                 goto out;
1596         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1597                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1598         r = -ENOMEM;
1599         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1600         if (!cpuid_entries)
1601                 goto out;
1602
1603         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1604         limit = cpuid_entries[0].eax;
1605         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1606                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1607                              &nent, cpuid->nent);
1608         r = -E2BIG;
1609         if (nent >= cpuid->nent)
1610                 goto out_free;
1611
1612         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1613         limit = cpuid_entries[nent - 1].eax;
1614         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1615                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1616                              &nent, cpuid->nent);
1617         r = -E2BIG;
1618         if (nent >= cpuid->nent)
1619                 goto out_free;
1620
1621         r = -EFAULT;
1622         if (copy_to_user(entries, cpuid_entries,
1623                          nent * sizeof(struct kvm_cpuid_entry2)))
1624                 goto out_free;
1625         cpuid->nent = nent;
1626         r = 0;
1627
1628 out_free:
1629         vfree(cpuid_entries);
1630 out:
1631         return r;
1632 }
1633
1634 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1635                                     struct kvm_lapic_state *s)
1636 {
1637         vcpu_load(vcpu);
1638         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
1639         vcpu_put(vcpu);
1640
1641         return 0;
1642 }
1643
1644 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1645                                     struct kvm_lapic_state *s)
1646 {
1647         vcpu_load(vcpu);
1648         memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1649         kvm_apic_post_state_restore(vcpu);
1650         update_cr8_intercept(vcpu);
1651         vcpu_put(vcpu);
1652
1653         return 0;
1654 }
1655
1656 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1657                                     struct kvm_interrupt *irq)
1658 {
1659         if (irq->irq < 0 || irq->irq >= 256)
1660                 return -EINVAL;
1661         if (irqchip_in_kernel(vcpu->kvm))
1662                 return -ENXIO;
1663         vcpu_load(vcpu);
1664
1665         kvm_queue_interrupt(vcpu, irq->irq, false);
1666
1667         vcpu_put(vcpu);
1668
1669         return 0;
1670 }
1671
1672 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1673 {
1674         vcpu_load(vcpu);
1675         kvm_inject_nmi(vcpu);
1676         vcpu_put(vcpu);
1677
1678         return 0;
1679 }
1680
1681 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1682                                            struct kvm_tpr_access_ctl *tac)
1683 {
1684         if (tac->flags)
1685                 return -EINVAL;
1686         vcpu->arch.tpr_access_reporting = !!tac->enabled;
1687         return 0;
1688 }
1689
1690 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1691                                         u64 mcg_cap)
1692 {
1693         int r;
1694         unsigned bank_num = mcg_cap & 0xff, bank;
1695
1696         r = -EINVAL;
1697         if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
1698                 goto out;
1699         if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
1700                 goto out;
1701         r = 0;
1702         vcpu->arch.mcg_cap = mcg_cap;
1703         /* Init IA32_MCG_CTL to all 1s */
1704         if (mcg_cap & MCG_CTL_P)
1705                 vcpu->arch.mcg_ctl = ~(u64)0;
1706         /* Init IA32_MCi_CTL to all 1s */
1707         for (bank = 0; bank < bank_num; bank++)
1708                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
1709 out:
1710         return r;
1711 }
1712
1713 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
1714                                       struct kvm_x86_mce *mce)
1715 {
1716         u64 mcg_cap = vcpu->arch.mcg_cap;
1717         unsigned bank_num = mcg_cap & 0xff;
1718         u64 *banks = vcpu->arch.mce_banks;
1719
1720         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
1721                 return -EINVAL;
1722         /*
1723          * if IA32_MCG_CTL is not all 1s, the uncorrected error
1724          * reporting is disabled
1725          */
1726         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1727             vcpu->arch.mcg_ctl != ~(u64)0)
1728                 return 0;
1729         banks += 4 * mce->bank;
1730         /*
1731          * if IA32_MCi_CTL is not all 1s, the uncorrected error
1732          * reporting is disabled for the bank
1733          */
1734         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
1735                 return 0;
1736         if (mce->status & MCI_STATUS_UC) {
1737                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
1738                     !(vcpu->arch.cr4 & X86_CR4_MCE)) {
1739                         printk(KERN_DEBUG "kvm: set_mce: "
1740                                "injects mce exception while "
1741                                "previous one is in progress!\n");
1742                         set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
1743                         return 0;
1744                 }
1745                 if (banks[1] & MCI_STATUS_VAL)
1746                         mce->status |= MCI_STATUS_OVER;
1747                 banks[2] = mce->addr;
1748                 banks[3] = mce->misc;
1749                 vcpu->arch.mcg_status = mce->mcg_status;
1750                 banks[1] = mce->status;
1751                 kvm_queue_exception(vcpu, MC_VECTOR);
1752         } else if (!(banks[1] & MCI_STATUS_VAL)
1753                    || !(banks[1] & MCI_STATUS_UC)) {
1754                 if (banks[1] & MCI_STATUS_VAL)
1755                         mce->status |= MCI_STATUS_OVER;
1756                 banks[2] = mce->addr;
1757                 banks[3] = mce->misc;
1758                 banks[1] = mce->status;
1759         } else
1760                 banks[1] |= MCI_STATUS_OVER;
1761         return 0;
1762 }
1763
1764 long kvm_arch_vcpu_ioctl(struct file *filp,
1765                          unsigned int ioctl, unsigned long arg)
1766 {
1767         struct kvm_vcpu *vcpu = filp->private_data;
1768         void __user *argp = (void __user *)arg;
1769         int r;
1770         struct kvm_lapic_state *lapic = NULL;
1771
1772         switch (ioctl) {
1773         case KVM_GET_LAPIC: {
1774                 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1775
1776                 r = -ENOMEM;
1777                 if (!lapic)
1778                         goto out;
1779                 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
1780                 if (r)
1781                         goto out;
1782                 r = -EFAULT;
1783                 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
1784                         goto out;
1785                 r = 0;
1786                 break;
1787         }
1788         case KVM_SET_LAPIC: {
1789                 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1790                 r = -ENOMEM;
1791                 if (!lapic)
1792                         goto out;
1793                 r = -EFAULT;
1794                 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
1795                         goto out;
1796                 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
1797                 if (r)
1798                         goto out;
1799                 r = 0;
1800                 break;
1801         }
1802         case KVM_INTERRUPT: {
1803                 struct kvm_interrupt irq;
1804
1805                 r = -EFAULT;
1806                 if (copy_from_user(&irq, argp, sizeof irq))
1807                         goto out;
1808                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1809                 if (r)
1810                         goto out;
1811                 r = 0;
1812                 break;
1813         }
1814         case KVM_NMI: {
1815                 r = kvm_vcpu_ioctl_nmi(vcpu);
1816                 if (r)
1817                         goto out;
1818                 r = 0;
1819                 break;
1820         }
1821         case KVM_SET_CPUID: {
1822                 struct kvm_cpuid __user *cpuid_arg = argp;
1823                 struct kvm_cpuid cpuid;
1824
1825                 r = -EFAULT;
1826                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1827                         goto out;
1828                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1829                 if (r)
1830                         goto out;
1831                 break;
1832         }
1833         case KVM_SET_CPUID2: {
1834                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1835                 struct kvm_cpuid2 cpuid;
1836
1837                 r = -EFAULT;
1838                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1839                         goto out;
1840                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1841                                               cpuid_arg->entries);
1842                 if (r)
1843                         goto out;
1844                 break;
1845         }
1846         case KVM_GET_CPUID2: {
1847                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1848                 struct kvm_cpuid2 cpuid;
1849
1850                 r = -EFAULT;
1851                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1852                         goto out;
1853                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1854                                               cpuid_arg->entries);
1855                 if (r)
1856                         goto out;
1857                 r = -EFAULT;
1858                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1859                         goto out;
1860                 r = 0;
1861                 break;
1862         }
1863         case KVM_GET_MSRS:
1864                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1865                 break;
1866         case KVM_SET_MSRS:
1867                 r = msr_io(vcpu, argp, do_set_msr, 0);
1868                 break;
1869         case KVM_TPR_ACCESS_REPORTING: {
1870                 struct kvm_tpr_access_ctl tac;
1871
1872                 r = -EFAULT;
1873                 if (copy_from_user(&tac, argp, sizeof tac))
1874                         goto out;
1875                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1876                 if (r)
1877                         goto out;
1878                 r = -EFAULT;
1879                 if (copy_to_user(argp, &tac, sizeof tac))
1880                         goto out;
1881                 r = 0;
1882                 break;
1883         };
1884         case KVM_SET_VAPIC_ADDR: {
1885                 struct kvm_vapic_addr va;
1886
1887                 r = -EINVAL;
1888                 if (!irqchip_in_kernel(vcpu->kvm))
1889                         goto out;
1890                 r = -EFAULT;
1891                 if (copy_from_user(&va, argp, sizeof va))
1892                         goto out;
1893                 r = 0;
1894                 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1895                 break;
1896         }
1897         case KVM_X86_SETUP_MCE: {
1898                 u64 mcg_cap;
1899
1900                 r = -EFAULT;
1901                 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
1902                         goto out;
1903                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
1904                 break;
1905         }
1906         case KVM_X86_SET_MCE: {
1907                 struct kvm_x86_mce mce;
1908
1909                 r = -EFAULT;
1910                 if (copy_from_user(&mce, argp, sizeof mce))
1911                         goto out;
1912                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
1913                 break;
1914         }
1915         default:
1916                 r = -EINVAL;
1917         }
1918 out:
1919         kfree(lapic);
1920         return r;
1921 }
1922
1923 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1924 {
1925         int ret;
1926
1927         if (addr > (unsigned int)(-3 * PAGE_SIZE))
1928                 return -1;
1929         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1930         return ret;
1931 }
1932
1933 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
1934                                               u64 ident_addr)
1935 {
1936         kvm->arch.ept_identity_map_addr = ident_addr;
1937         return 0;
1938 }
1939
1940 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1941                                           u32 kvm_nr_mmu_pages)
1942 {
1943         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1944                 return -EINVAL;
1945
1946         down_write(&kvm->slots_lock);
1947         spin_lock(&kvm->mmu_lock);
1948
1949         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
1950         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1951
1952         spin_unlock(&kvm->mmu_lock);
1953         up_write(&kvm->slots_lock);
1954         return 0;
1955 }
1956
1957 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1958 {
1959         return kvm->arch.n_alloc_mmu_pages;
1960 }
1961
1962 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1963 {
1964         int i;
1965         struct kvm_mem_alias *alias;
1966
1967         for (i = 0; i < kvm->arch.naliases; ++i) {
1968                 alias = &kvm->arch.aliases[i];
1969                 if (gfn >= alias->base_gfn
1970                     && gfn < alias->base_gfn + alias->npages)
1971                         return alias->target_gfn + gfn - alias->base_gfn;
1972         }
1973         return gfn;
1974 }
1975
1976 /*
1977  * Set a new alias region.  Aliases map a portion of physical memory into
1978  * another portion.  This is useful for memory windows, for example the PC
1979  * VGA region.
1980  */
1981 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1982                                          struct kvm_memory_alias *alias)
1983 {
1984         int r, n;
1985         struct kvm_mem_alias *p;
1986
1987         r = -EINVAL;
1988         /* General sanity checks */
1989         if (alias->memory_size & (PAGE_SIZE - 1))
1990                 goto out;
1991         if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1992                 goto out;
1993         if (alias->slot >= KVM_ALIAS_SLOTS)
1994                 goto out;
1995         if (alias->guest_phys_addr + alias->memory_size
1996             < alias->guest_phys_addr)
1997                 goto out;
1998         if (alias->target_phys_addr + alias->memory_size
1999             < alias->target_phys_addr)
2000                 goto out;
2001
2002         down_write(&kvm->slots_lock);
2003         spin_lock(&kvm->mmu_lock);
2004
2005         p = &kvm->arch.aliases[alias->slot];
2006         p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
2007         p->npages = alias->memory_size >> PAGE_SHIFT;
2008         p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
2009
2010         for (n = KVM_ALIAS_SLOTS; n > 0; --n)
2011                 if (kvm->arch.aliases[n - 1].npages)
2012                         break;
2013         kvm->arch.naliases = n;
2014
2015         spin_unlock(&kvm->mmu_lock);
2016         kvm_mmu_zap_all(kvm);
2017
2018         up_write(&kvm->slots_lock);
2019
2020         return 0;
2021
2022 out:
2023         return r;
2024 }
2025
2026 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2027 {
2028         int r;
2029
2030         r = 0;
2031         switch (chip->chip_id) {
2032         case KVM_IRQCHIP_PIC_MASTER:
2033                 memcpy(&chip->chip.pic,
2034                         &pic_irqchip(kvm)->pics[0],
2035                         sizeof(struct kvm_pic_state));
2036                 break;
2037         case KVM_IRQCHIP_PIC_SLAVE:
2038                 memcpy(&chip->chip.pic,
2039                         &pic_irqchip(kvm)->pics[1],
2040                         sizeof(struct kvm_pic_state));
2041                 break;
2042         case KVM_IRQCHIP_IOAPIC:
2043                 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
2044                 break;
2045         default:
2046                 r = -EINVAL;
2047                 break;
2048         }
2049         return r;
2050 }
2051
2052 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2053 {
2054         int r;
2055
2056         r = 0;
2057         switch (chip->chip_id) {
2058         case KVM_IRQCHIP_PIC_MASTER:
2059                 spin_lock(&pic_irqchip(kvm)->lock);
2060                 memcpy(&pic_irqchip(kvm)->pics[0],
2061                         &chip->chip.pic,
2062                         sizeof(struct kvm_pic_state));
2063                 spin_unlock(&pic_irqchip(kvm)->lock);
2064                 break;
2065         case KVM_IRQCHIP_PIC_SLAVE:
2066                 spin_lock(&pic_irqchip(kvm)->lock);
2067                 memcpy(&pic_irqchip(kvm)->pics[1],
2068                         &chip->chip.pic,
2069                         sizeof(struct kvm_pic_state));
2070                 spin_unlock(&pic_irqchip(kvm)->lock);
2071                 break;
2072         case KVM_IRQCHIP_IOAPIC:
2073                 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
2074                 break;
2075         default:
2076                 r = -EINVAL;
2077                 break;
2078         }
2079         kvm_pic_update_irq(pic_irqchip(kvm));
2080         return r;
2081 }
2082
2083 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2084 {
2085         int r = 0;
2086
2087         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2088         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
2089         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2090         return r;
2091 }
2092
2093 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2094 {
2095         int r = 0;
2096
2097         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2098         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
2099         kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2100         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2101         return r;
2102 }
2103
2104 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2105 {
2106         int r = 0;
2107
2108         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2109         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2110                 sizeof(ps->channels));
2111         ps->flags = kvm->arch.vpit->pit_state.flags;
2112         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2113         return r;
2114 }
2115
2116 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2117 {
2118         int r = 0, start = 0;
2119         u32 prev_legacy, cur_legacy;
2120         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2121         prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2122         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2123         if (!prev_legacy && cur_legacy)
2124                 start = 1;
2125         memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2126                sizeof(kvm->arch.vpit->pit_state.channels));
2127         kvm->arch.vpit->pit_state.flags = ps->flags;
2128         kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
2129         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2130         return r;
2131 }
2132
2133 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2134                                  struct kvm_reinject_control *control)
2135 {
2136         if (!kvm->arch.vpit)
2137                 return -ENXIO;
2138         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2139         kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
2140         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2141         return 0;
2142 }
2143
2144 /*
2145  * Get (and clear) the dirty memory log for a memory slot.
2146  */
2147 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2148                                       struct kvm_dirty_log *log)
2149 {
2150         int r;
2151         int n;
2152         struct kvm_memory_slot *memslot;
2153         int is_dirty = 0;
2154
2155         down_write(&kvm->slots_lock);
2156
2157         r = kvm_get_dirty_log(kvm, log, &is_dirty);
2158         if (r)
2159                 goto out;
2160
2161         /* If nothing is dirty, don't bother messing with page tables. */
2162         if (is_dirty) {
2163                 spin_lock(&kvm->mmu_lock);
2164                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2165                 spin_unlock(&kvm->mmu_lock);
2166                 memslot = &kvm->memslots[log->slot];
2167                 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
2168                 memset(memslot->dirty_bitmap, 0, n);
2169         }
2170         r = 0;
2171 out:
2172         up_write(&kvm->slots_lock);
2173         return r;
2174 }
2175
2176 long kvm_arch_vm_ioctl(struct file *filp,
2177                        unsigned int ioctl, unsigned long arg)
2178 {
2179         struct kvm *kvm = filp->private_data;
2180         void __user *argp = (void __user *)arg;
2181         int r = -ENOTTY;
2182         /*
2183          * This union makes it completely explicit to gcc-3.x
2184          * that these two variables' stack usage should be
2185          * combined, not added together.
2186          */
2187         union {
2188                 struct kvm_pit_state ps;
2189                 struct kvm_pit_state2 ps2;
2190                 struct kvm_memory_alias alias;
2191                 struct kvm_pit_config pit_config;
2192         } u;
2193
2194         switch (ioctl) {
2195         case KVM_SET_TSS_ADDR:
2196                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2197                 if (r < 0)
2198                         goto out;
2199                 break;
2200         case KVM_SET_IDENTITY_MAP_ADDR: {
2201                 u64 ident_addr;
2202
2203                 r = -EFAULT;
2204                 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
2205                         goto out;
2206                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
2207                 if (r < 0)
2208                         goto out;
2209                 break;
2210         }
2211         case KVM_SET_MEMORY_REGION: {
2212                 struct kvm_memory_region kvm_mem;
2213                 struct kvm_userspace_memory_region kvm_userspace_mem;
2214
2215                 r = -EFAULT;
2216                 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2217                         goto out;
2218                 kvm_userspace_mem.slot = kvm_mem.slot;
2219                 kvm_userspace_mem.flags = kvm_mem.flags;
2220                 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
2221                 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
2222                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
2223                 if (r)
2224                         goto out;
2225                 break;
2226         }
2227         case KVM_SET_NR_MMU_PAGES:
2228                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
2229                 if (r)
2230                         goto out;
2231                 break;
2232         case KVM_GET_NR_MMU_PAGES:
2233                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
2234                 break;
2235         case KVM_SET_MEMORY_ALIAS:
2236                 r = -EFAULT;
2237                 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
2238                         goto out;
2239                 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
2240                 if (r)
2241                         goto out;
2242                 break;
2243         case KVM_CREATE_IRQCHIP:
2244                 r = -ENOMEM;
2245                 kvm->arch.vpic = kvm_create_pic(kvm);
2246                 if (kvm->arch.vpic) {
2247                         r = kvm_ioapic_init(kvm);
2248                         if (r) {
2249                                 kfree(kvm->arch.vpic);
2250                                 kvm->arch.vpic = NULL;
2251                                 goto out;
2252                         }
2253                 } else
2254                         goto out;
2255                 r = kvm_setup_default_irq_routing(kvm);
2256                 if (r) {
2257                         kfree(kvm->arch.vpic);
2258                         kfree(kvm->arch.vioapic);
2259                         goto out;
2260                 }
2261                 break;
2262         case KVM_CREATE_PIT:
2263                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
2264                 goto create_pit;
2265         case KVM_CREATE_PIT2:
2266                 r = -EFAULT;
2267                 if (copy_from_user(&u.pit_config, argp,
2268                                    sizeof(struct kvm_pit_config)))
2269                         goto out;
2270         create_pit:
2271                 down_write(&kvm->slots_lock);
2272                 r = -EEXIST;
2273                 if (kvm->arch.vpit)
2274                         goto create_pit_unlock;
2275                 r = -ENOMEM;
2276                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
2277                 if (kvm->arch.vpit)
2278                         r = 0;
2279         create_pit_unlock:
2280                 up_write(&kvm->slots_lock);
2281                 break;
2282         case KVM_IRQ_LINE_STATUS:
2283         case KVM_IRQ_LINE: {
2284                 struct kvm_irq_level irq_event;
2285
2286                 r = -EFAULT;
2287                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2288                         goto out;
2289                 if (irqchip_in_kernel(kvm)) {
2290                         __s32 status;
2291                         status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2292                                         irq_event.irq, irq_event.level);
2293                         if (ioctl == KVM_IRQ_LINE_STATUS) {
2294                                 irq_event.status = status;
2295                                 if (copy_to_user(argp, &irq_event,
2296                                                         sizeof irq_event))
2297                                         goto out;
2298                         }
2299                         r = 0;
2300                 }
2301                 break;
2302         }
2303         case KVM_GET_IRQCHIP: {
2304                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
2305                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
2306
2307                 r = -ENOMEM;
2308                 if (!chip)
2309                         goto out;
2310                 r = -EFAULT;
2311                 if (copy_from_user(chip, argp, sizeof *chip))
2312                         goto get_irqchip_out;
2313                 r = -ENXIO;
2314                 if (!irqchip_in_kernel(kvm))
2315                         goto get_irqchip_out;
2316                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
2317                 if (r)
2318                         goto get_irqchip_out;
2319                 r = -EFAULT;
2320                 if (copy_to_user(argp, chip, sizeof *chip))
2321                         goto get_irqchip_out;
2322                 r = 0;
2323         get_irqchip_out:
2324                 kfree(chip);
2325                 if (r)
2326                         goto out;
2327                 break;
2328         }
2329         case KVM_SET_IRQCHIP: {
2330                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
2331                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
2332
2333                 r = -ENOMEM;
2334                 if (!chip)
2335                         goto out;
2336                 r = -EFAULT;
2337                 if (copy_from_user(chip, argp, sizeof *chip))
2338                         goto set_irqchip_out;
2339                 r = -ENXIO;
2340                 if (!irqchip_in_kernel(kvm))
2341                         goto set_irqchip_out;
2342                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
2343                 if (r)
2344                         goto set_irqchip_out;
2345                 r = 0;
2346         set_irqchip_out:
2347                 kfree(chip);
2348                 if (r)
2349                         goto out;
2350                 break;
2351         }
2352         case KVM_GET_PIT: {
2353                 r = -EFAULT;
2354                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
2355                         goto out;
2356                 r = -ENXIO;
2357                 if (!kvm->arch.vpit)
2358                         goto out;
2359                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
2360                 if (r)
2361                         goto out;
2362                 r = -EFAULT;
2363                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
2364                         goto out;
2365                 r = 0;
2366                 break;
2367         }
2368         case KVM_SET_PIT: {
2369                 r = -EFAULT;
2370                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
2371                         goto out;
2372                 r = -ENXIO;
2373                 if (!kvm->arch.vpit)
2374                         goto out;
2375                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
2376                 if (r)
2377                         goto out;
2378                 r = 0;
2379                 break;
2380         }
2381         case KVM_GET_PIT2: {
2382                 r = -ENXIO;
2383                 if (!kvm->arch.vpit)
2384                         goto out;
2385                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
2386                 if (r)
2387                         goto out;
2388                 r = -EFAULT;
2389                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
2390                         goto out;
2391                 r = 0;
2392                 break;
2393         }
2394         case KVM_SET_PIT2: {
2395                 r = -EFAULT;
2396                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
2397                         goto out;
2398                 r = -ENXIO;
2399                 if (!kvm->arch.vpit)
2400                         goto out;
2401                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
2402                 if (r)
2403                         goto out;
2404                 r = 0;
2405                 break;
2406         }
2407         case KVM_REINJECT_CONTROL: {
2408                 struct kvm_reinject_control control;
2409                 r =  -EFAULT;
2410                 if (copy_from_user(&control, argp, sizeof(control)))
2411                         goto out;
2412                 r = kvm_vm_ioctl_reinject(kvm, &control);
2413                 if (r)
2414                         goto out;
2415                 r = 0;
2416                 break;
2417         }
2418         default:
2419                 ;
2420         }
2421 out:
2422         return r;
2423 }
2424
2425 static void kvm_init_msr_list(void)
2426 {
2427         u32 dummy[2];
2428         unsigned i, j;
2429
2430         for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2431                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2432                         continue;
2433                 if (j < i)
2434                         msrs_to_save[j] = msrs_to_save[i];
2435                 j++;
2436         }
2437         num_msrs_to_save = j;
2438 }
2439
2440 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
2441                            const void *v)
2442 {
2443         if (vcpu->arch.apic &&
2444             !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
2445                 return 0;
2446
2447         return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
2448 }
2449
2450 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
2451 {
2452         if (vcpu->arch.apic &&
2453             !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
2454                 return 0;
2455
2456         return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
2457 }
2458
2459 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2460                                struct kvm_vcpu *vcpu)
2461 {
2462         void *data = val;
2463         int r = X86EMUL_CONTINUE;
2464
2465         while (bytes) {
2466                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2467                 unsigned offset = addr & (PAGE_SIZE-1);
2468                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
2469                 int ret;
2470
2471                 if (gpa == UNMAPPED_GVA) {
2472                         r = X86EMUL_PROPAGATE_FAULT;
2473                         goto out;
2474                 }
2475                 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
2476                 if (ret < 0) {
2477                         r = X86EMUL_UNHANDLEABLE;
2478                         goto out;
2479                 }
2480
2481                 bytes -= toread;
2482                 data += toread;
2483                 addr += toread;
2484         }
2485 out:
2486         return r;
2487 }
2488
2489 static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2490                                 struct kvm_vcpu *vcpu)
2491 {
2492         void *data = val;
2493         int r = X86EMUL_CONTINUE;
2494
2495         while (bytes) {
2496                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2497                 unsigned offset = addr & (PAGE_SIZE-1);
2498                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2499                 int ret;
2500
2501                 if (gpa == UNMAPPED_GVA) {
2502                         r = X86EMUL_PROPAGATE_FAULT;
2503                         goto out;
2504                 }
2505                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2506                 if (ret < 0) {
2507                         r = X86EMUL_UNHANDLEABLE;
2508                         goto out;
2509                 }
2510
2511                 bytes -= towrite;
2512                 data += towrite;
2513                 addr += towrite;
2514         }
2515 out:
2516         return r;
2517 }
2518
2519
2520 static int emulator_read_emulated(unsigned long addr,
2521                                   void *val,
2522                                   unsigned int bytes,
2523                                   struct kvm_vcpu *vcpu)
2524 {
2525         gpa_t                 gpa;
2526
2527         if (vcpu->mmio_read_completed) {
2528                 memcpy(val, vcpu->mmio_data, bytes);
2529                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
2530                                vcpu->mmio_phys_addr, *(u64 *)val);
2531                 vcpu->mmio_read_completed = 0;
2532                 return X86EMUL_CONTINUE;
2533         }
2534
2535         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2536
2537         /* For APIC access vmexit */
2538         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2539                 goto mmio;
2540
2541         if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2542                                 == X86EMUL_CONTINUE)
2543                 return X86EMUL_CONTINUE;
2544         if (gpa == UNMAPPED_GVA)
2545                 return X86EMUL_PROPAGATE_FAULT;
2546
2547 mmio:
2548         /*
2549          * Is this MMIO handled locally?
2550          */
2551         if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
2552                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
2553                 return X86EMUL_CONTINUE;
2554         }
2555
2556         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
2557
2558         vcpu->mmio_needed = 1;
2559         vcpu->mmio_phys_addr = gpa;
2560         vcpu->mmio_size = bytes;
2561         vcpu->mmio_is_write = 0;
2562
2563         return X86EMUL_UNHANDLEABLE;
2564 }
2565
2566 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
2567                           const void *val, int bytes)
2568 {
2569         int ret;
2570
2571         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
2572         if (ret < 0)
2573                 return 0;
2574         kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
2575         return 1;
2576 }
2577
2578 static int emulator_write_emulated_onepage(unsigned long addr,
2579                                            const void *val,
2580                                            unsigned int bytes,
2581                                            struct kvm_vcpu *vcpu)
2582 {
2583         gpa_t                 gpa;
2584
2585         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2586
2587         if (gpa == UNMAPPED_GVA) {
2588                 kvm_inject_page_fault(vcpu, addr, 2);
2589                 return X86EMUL_PROPAGATE_FAULT;
2590         }
2591
2592         /* For APIC access vmexit */
2593         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2594                 goto mmio;
2595
2596         if (emulator_write_phys(vcpu, gpa, val, bytes))
2597                 return X86EMUL_CONTINUE;
2598
2599 mmio:
2600         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
2601         /*
2602          * Is this MMIO handled locally?
2603          */
2604         if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
2605                 return X86EMUL_CONTINUE;
2606
2607         vcpu->mmio_needed = 1;
2608         vcpu->mmio_phys_addr = gpa;
2609         vcpu->mmio_size = bytes;
2610         vcpu->mmio_is_write = 1;
2611         memcpy(vcpu->mmio_data, val, bytes);
2612
2613         return X86EMUL_CONTINUE;
2614 }
2615
2616 int emulator_write_emulated(unsigned long addr,
2617                                    const void *val,
2618                                    unsigned int bytes,
2619                                    struct kvm_vcpu *vcpu)
2620 {
2621         /* Crossing a page boundary? */
2622         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2623                 int rc, now;
2624
2625                 now = -addr & ~PAGE_MASK;
2626                 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2627                 if (rc != X86EMUL_CONTINUE)
2628                         return rc;
2629                 addr += now;
2630                 val += now;
2631                 bytes -= now;
2632         }
2633         return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2634 }
2635 EXPORT_SYMBOL_GPL(emulator_write_emulated);
2636
2637 static int emulator_cmpxchg_emulated(unsigned long addr,
2638                                      const void *old,
2639                                      const void *new,
2640                                      unsigned int bytes,
2641                                      struct kvm_vcpu *vcpu)
2642 {
2643         printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
2644 #ifndef CONFIG_X86_64
2645         /* guests cmpxchg8b have to be emulated atomically */
2646         if (bytes == 8) {
2647                 gpa_t gpa;
2648                 struct page *page;
2649                 char *kaddr;
2650                 u64 val;
2651
2652                 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2653
2654                 if (gpa == UNMAPPED_GVA ||
2655                    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2656                         goto emul_write;
2657
2658                 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2659                         goto emul_write;
2660
2661                 val = *(u64 *)new;
2662
2663                 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2664
2665                 kaddr = kmap_atomic(page, KM_USER0);
2666                 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2667                 kunmap_atomic(kaddr, KM_USER0);
2668                 kvm_release_page_dirty(page);
2669         }
2670 emul_write:
2671 #endif
2672
2673         return emulator_write_emulated(addr, new, bytes, vcpu);
2674 }
2675
2676 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2677 {
2678         return kvm_x86_ops->get_segment_base(vcpu, seg);
2679 }
2680
2681 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2682 {
2683         kvm_mmu_invlpg(vcpu, address);
2684         return X86EMUL_CONTINUE;
2685 }
2686
2687 int emulate_clts(struct kvm_vcpu *vcpu)
2688 {
2689         kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
2690         return X86EMUL_CONTINUE;
2691 }
2692
2693 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2694 {
2695         struct kvm_vcpu *vcpu = ctxt->vcpu;
2696
2697         switch (dr) {
2698         case 0 ... 3:
2699                 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2700                 return X86EMUL_CONTINUE;
2701         default:
2702                 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
2703                 return X86EMUL_UNHANDLEABLE;
2704         }
2705 }
2706
2707 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2708 {
2709         unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2710         int exception;
2711
2712         kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2713         if (exception) {
2714                 /* FIXME: better handling */
2715                 return X86EMUL_UNHANDLEABLE;
2716         }
2717         return X86EMUL_CONTINUE;
2718 }
2719
2720 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2721 {
2722         u8 opcodes[4];
2723         unsigned long rip = kvm_rip_read(vcpu);
2724         unsigned long rip_linear;
2725
2726         if (!printk_ratelimit())
2727                 return;
2728
2729         rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2730
2731         kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
2732
2733         printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2734                context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
2735 }
2736 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2737
2738 static struct x86_emulate_ops emulate_ops = {
2739         .read_std            = kvm_read_guest_virt,
2740         .read_emulated       = emulator_read_emulated,
2741         .write_emulated      = emulator_write_emulated,
2742         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
2743 };
2744
2745 static void cache_all_regs(struct kvm_vcpu *vcpu)
2746 {
2747         kvm_register_read(vcpu, VCPU_REGS_RAX);
2748         kvm_register_read(vcpu, VCPU_REGS_RSP);
2749         kvm_register_read(vcpu, VCPU_REGS_RIP);
2750         vcpu->arch.regs_dirty = ~0;
2751 }
2752
2753 int emulate_instruction(struct kvm_vcpu *vcpu,
2754                         unsigned long cr2,
2755                         u16 error_code,
2756                         int emulation_type)
2757 {
2758         int r, shadow_mask;
2759         struct decode_cache *c;
2760         struct kvm_run *run = vcpu->run;
2761
2762         kvm_clear_exception_queue(vcpu);
2763         vcpu->arch.mmio_fault_cr2 = cr2;
2764         /*
2765          * TODO: fix emulate.c to use guest_read/write_register
2766          * instead of direct ->regs accesses, can save hundred cycles
2767          * on Intel for instructions that don't read/change RSP, for
2768          * for example.
2769          */
2770         cache_all_regs(vcpu);
2771
2772         vcpu->mmio_is_write = 0;
2773         vcpu->arch.pio.string = 0;
2774
2775         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
2776                 int cs_db, cs_l;
2777                 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2778
2779                 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2780                 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2781                 vcpu->arch.emulate_ctxt.mode =
2782                         (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
2783                         ? X86EMUL_MODE_REAL : cs_l
2784                         ? X86EMUL_MODE_PROT64 : cs_db
2785                         ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2786
2787                 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
2788
2789                 /* Only allow emulation of specific instructions on #UD
2790                  * (namely VMMCALL, sysenter, sysexit, syscall)*/
2791                 c = &vcpu->arch.emulate_ctxt.decode;
2792                 if (emulation_type & EMULTYPE_TRAP_UD) {
2793                         if (!c->twobyte)
2794                                 return EMULATE_FAIL;
2795                         switch (c->b) {
2796                         case 0x01: /* VMMCALL */
2797                                 if (c->modrm_mod != 3 || c->modrm_rm != 1)
2798                                         return EMULATE_FAIL;
2799                                 break;
2800                         case 0x34: /* sysenter */
2801                         case 0x35: /* sysexit */
2802                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
2803                                         return EMULATE_FAIL;
2804                                 break;
2805                         case 0x05: /* syscall */
2806                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
2807                                         return EMULATE_FAIL;
2808                                 break;
2809                         default:
2810                                 return EMULATE_FAIL;
2811                         }
2812
2813                         if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
2814                                 return EMULATE_FAIL;
2815                 }
2816
2817                 ++vcpu->stat.insn_emulation;
2818                 if (r)  {
2819                         ++vcpu->stat.insn_emulation_fail;
2820                         if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2821                                 return EMULATE_DONE;
2822                         return EMULATE_FAIL;
2823                 }
2824         }
2825
2826         if (emulation_type & EMULTYPE_SKIP) {
2827                 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
2828                 return EMULATE_DONE;
2829         }
2830
2831         r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
2832         shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
2833
2834         if (r == 0)
2835                 kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
2836
2837         if (vcpu->arch.pio.string)
2838                 return EMULATE_DO_MMIO;
2839
2840         if ((r || vcpu->mmio_is_write) && run) {
2841                 run->exit_reason = KVM_EXIT_MMIO;
2842                 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2843                 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2844                 run->mmio.len = vcpu->mmio_size;
2845                 run->mmio.is_write = vcpu->mmio_is_write;
2846         }
2847
2848         if (r) {
2849                 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2850                         return EMULATE_DONE;
2851                 if (!vcpu->mmio_needed) {
2852                         kvm_report_emulation_failure(vcpu, "mmio");
2853                         return EMULATE_FAIL;
2854                 }
2855                 return EMULATE_DO_MMIO;
2856         }
2857
2858         kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
2859
2860         if (vcpu->mmio_is_write) {
2861                 vcpu->mmio_needed = 0;
2862                 return EMULATE_DO_MMIO;
2863         }
2864
2865         return EMULATE_DONE;
2866 }
2867 EXPORT_SYMBOL_GPL(emulate_instruction);
2868
2869 static int pio_copy_data(struct kvm_vcpu *vcpu)
2870 {
2871         void *p = vcpu->arch.pio_data;
2872         gva_t q = vcpu->arch.pio.guest_gva;
2873         unsigned bytes;
2874         int ret;
2875
2876         bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2877         if (vcpu->arch.pio.in)
2878                 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
2879         else
2880                 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2881         return ret;
2882 }
2883
2884 int complete_pio(struct kvm_vcpu *vcpu)
2885 {
2886         struct kvm_pio_request *io = &vcpu->arch.pio;
2887         long delta;
2888         int r;
2889         unsigned long val;
2890
2891         if (!io->string) {
2892                 if (io->in) {
2893                         val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2894                         memcpy(&val, vcpu->arch.pio_data, io->size);
2895                         kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2896                 }
2897         } else {
2898                 if (io->in) {
2899                         r = pio_copy_data(vcpu);
2900                         if (r)
2901                                 return r;
2902                 }
2903
2904                 delta = 1;
2905                 if (io->rep) {
2906                         delta *= io->cur_count;
2907                         /*
2908                          * The size of the register should really depend on
2909                          * current address size.
2910                          */
2911                         val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2912                         val -= delta;
2913                         kvm_register_write(vcpu, VCPU_REGS_RCX, val);
2914                 }
2915                 if (io->down)
2916                         delta = -delta;
2917                 delta *= io->size;
2918                 if (io->in) {
2919                         val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2920                         val += delta;
2921                         kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2922                 } else {
2923                         val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2924                         val += delta;
2925                         kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2926                 }
2927         }
2928
2929         io->count -= io->cur_count;
2930         io->cur_count = 0;
2931
2932         return 0;
2933 }
2934
2935 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
2936 {
2937         /* TODO: String I/O for in kernel device */
2938         int r;
2939
2940         if (vcpu->arch.pio.in)
2941                 r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
2942                                     vcpu->arch.pio.size, pd);
2943         else
2944                 r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
2945                                      vcpu->arch.pio.size, pd);
2946         return r;
2947 }
2948
2949 static int pio_string_write(struct kvm_vcpu *vcpu)
2950 {
2951         struct kvm_pio_request *io = &vcpu->arch.pio;
2952         void *pd = vcpu->arch.pio_data;
2953         int i, r = 0;
2954
2955         for (i = 0; i < io->cur_count; i++) {
2956                 if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
2957                                      io->port, io->size, pd)) {
2958                         r = -EOPNOTSUPP;
2959                         break;
2960                 }
2961                 pd += io->size;
2962         }
2963         return r;
2964 }
2965
2966 int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
2967 {
2968         unsigned long val;
2969
2970         vcpu->run->exit_reason = KVM_EXIT_IO;
2971         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2972         vcpu->run->io.size = vcpu->arch.pio.size = size;
2973         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2974         vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2975         vcpu->run->io.port = vcpu->arch.pio.port = port;
2976         vcpu->arch.pio.in = in;
2977         vcpu->arch.pio.string = 0;
2978         vcpu->arch.pio.down = 0;
2979         vcpu->arch.pio.rep = 0;
2980
2981         trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
2982                       size, 1);
2983
2984         val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2985         memcpy(vcpu->arch.pio_data, &val, 4);
2986
2987         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
2988                 complete_pio(vcpu);
2989                 return 1;
2990         }
2991         return 0;
2992 }
2993 EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2994
2995 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
2996                   int size, unsigned long count, int down,
2997                   gva_t address, int rep, unsigned port)
2998 {
2999         unsigned now, in_page;
3000         int ret = 0;
3001
3002         vcpu->run->exit_reason = KVM_EXIT_IO;
3003         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
3004         vcpu->run->io.size = vcpu->arch.pio.size = size;
3005         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3006         vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
3007         vcpu->run->io.port = vcpu->arch.pio.port = port;
3008         vcpu->arch.pio.in = in;
3009         vcpu->arch.pio.string = 1;
3010         vcpu->arch.pio.down = down;
3011         vcpu->arch.pio.rep = rep;
3012
3013         trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
3014                       size, count);
3015
3016         if (!count) {
3017                 kvm_x86_ops->skip_emulated_instruction(vcpu);
3018                 return 1;
3019         }
3020
3021         if (!down)
3022                 in_page = PAGE_SIZE - offset_in_page(address);
3023         else
3024                 in_page = offset_in_page(address) + size;
3025         now = min(count, (unsigned long)in_page / size);
3026         if (!now)
3027                 now = 1;
3028         if (down) {
3029                 /*
3030                  * String I/O in reverse.  Yuck.  Kill the guest, fix later.
3031                  */
3032                 pr_unimpl(vcpu, "guest string pio down\n");
3033                 kvm_inject_gp(vcpu, 0);
3034                 return 1;
3035         }
3036         vcpu->run->io.count = now;
3037         vcpu->arch.pio.cur_count = now;
3038
3039         if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
3040                 kvm_x86_ops->skip_emulated_instruction(vcpu);
3041
3042         vcpu->arch.pio.guest_gva = address;
3043
3044         if (!vcpu->arch.pio.in) {
3045                 /* string PIO write */
3046                 ret = pio_copy_data(vcpu);
3047                 if (ret == X86EMUL_PROPAGATE_FAULT) {
3048                         kvm_inject_gp(vcpu, 0);
3049                         return 1;
3050                 }
3051                 if (ret == 0 && !pio_string_write(vcpu)) {
3052                         complete_pio(vcpu);
3053                         if (vcpu->arch.pio.count == 0)
3054                                 ret = 1;
3055                 }
3056         }
3057         /* no string PIO read support yet */
3058
3059         return ret;
3060 }
3061 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
3062
3063 static void bounce_off(void *info)
3064 {
3065         /* nothing */
3066 }
3067
3068 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
3069                                      void *data)
3070 {
3071         struct cpufreq_freqs *freq = data;
3072         struct kvm *kvm;
3073         struct kvm_vcpu *vcpu;
3074         int i, send_ipi = 0;
3075
3076         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
3077                 return 0;
3078         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
3079                 return 0;
3080         per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
3081
3082         spin_lock(&kvm_lock);
3083         list_for_each_entry(kvm, &vm_list, vm_list) {
3084                 kvm_for_each_vcpu(i, vcpu, kvm) {
3085                         if (vcpu->cpu != freq->cpu)
3086                                 continue;
3087                         if (!kvm_request_guest_time_update(vcpu))
3088                                 continue;
3089                         if (vcpu->cpu != smp_processor_id())
3090                                 send_ipi++;
3091                 }
3092         }
3093         spin_unlock(&kvm_lock);
3094
3095         if (freq->old < freq->new && send_ipi) {
3096                 /*
3097                  * We upscale the frequency.  Must make the guest
3098                  * doesn't see old kvmclock values while running with
3099                  * the new frequency, otherwise we risk the guest sees
3100                  * time go backwards.
3101                  *
3102                  * In case we update the frequency for another cpu
3103                  * (which might be in guest context) send an interrupt
3104                  * to kick the cpu out of guest context.  Next time
3105                  * guest context is entered kvmclock will be updated,
3106                  * so the guest will not see stale values.
3107                  */
3108                 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
3109         }
3110         return 0;
3111 }
3112
3113 static struct notifier_block kvmclock_cpufreq_notifier_block = {
3114         .notifier_call  = kvmclock_cpufreq_notifier
3115 };
3116
3117 static void kvm_timer_init(void)
3118 {
3119         int cpu;
3120
3121         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
3122                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
3123                                           CPUFREQ_TRANSITION_NOTIFIER);
3124                 for_each_online_cpu(cpu)
3125                         per_cpu(cpu_tsc_khz, cpu) = cpufreq_get(cpu);
3126         } else {
3127                 for_each_possible_cpu(cpu)
3128                         per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
3129         }
3130 }
3131
3132 int kvm_arch_init(void *opaque)
3133 {
3134         int r;
3135         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
3136
3137         if (kvm_x86_ops) {
3138                 printk(KERN_ERR "kvm: already loaded the other module\n");
3139                 r = -EEXIST;
3140                 goto out;
3141         }
3142
3143         if (!ops->cpu_has_kvm_support()) {
3144                 printk(KERN_ERR "kvm: no hardware support\n");
3145                 r = -EOPNOTSUPP;
3146                 goto out;
3147         }
3148         if (ops->disabled_by_bios()) {
3149                 printk(KERN_ERR "kvm: disabled by bios\n");
3150                 r = -EOPNOTSUPP;
3151                 goto out;
3152         }
3153
3154         r = kvm_mmu_module_init();
3155         if (r)
3156                 goto out;
3157
3158         kvm_init_msr_list();
3159
3160         kvm_x86_ops = ops;
3161         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
3162         kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
3163         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
3164                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
3165
3166         kvm_timer_init();
3167
3168         return 0;
3169
3170 out:
3171         return r;
3172 }
3173
3174 void kvm_arch_exit(void)
3175 {
3176         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
3177                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
3178                                             CPUFREQ_TRANSITION_NOTIFIER);
3179         kvm_x86_ops = NULL;
3180         kvm_mmu_module_exit();
3181 }
3182
3183 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
3184 {
3185         ++vcpu->stat.halt_exits;
3186         if (irqchip_in_kernel(vcpu->kvm)) {
3187                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
3188                 return 1;
3189         } else {
3190                 vcpu->run->exit_reason = KVM_EXIT_HLT;
3191                 return 0;
3192         }
3193 }
3194 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
3195
3196 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
3197                            unsigned long a1)
3198 {
3199         if (is_long_mode(vcpu))
3200                 return a0;
3201         else
3202                 return a0 | ((gpa_t)a1 << 32);
3203 }
3204
3205 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
3206 {
3207         unsigned long nr, a0, a1, a2, a3, ret;
3208         int r = 1;
3209
3210         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
3211         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
3212         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
3213         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
3214         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
3215
3216         trace_kvm_hypercall(nr, a0, a1, a2, a3);
3217
3218         if (!is_long_mode(vcpu)) {
3219                 nr &= 0xFFFFFFFF;
3220                 a0 &= 0xFFFFFFFF;
3221                 a1 &= 0xFFFFFFFF;
3222                 a2 &= 0xFFFFFFFF;
3223                 a3 &= 0xFFFFFFFF;
3224         }
3225
3226         if (kvm_x86_ops->get_cpl(vcpu) != 0) {
3227                 ret = -KVM_EPERM;
3228                 goto out;
3229         }
3230
3231         switch (nr) {
3232         case KVM_HC_VAPIC_POLL_IRQ:
3233                 ret = 0;
3234                 break;
3235         case KVM_HC_MMU_OP:
3236                 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
3237                 break;
3238         default:
3239                 ret = -KVM_ENOSYS;
3240                 break;
3241         }
3242 out:
3243         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
3244         ++vcpu->stat.hypercalls;
3245         return r;
3246 }
3247 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
3248
3249 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
3250 {
3251         char instruction[3];
3252         int ret = 0;
3253         unsigned long rip = kvm_rip_read(vcpu);
3254
3255
3256         /*
3257          * Blow out the MMU to ensure that no other VCPU has an active mapping
3258          * to ensure that the updated hypercall appears atomically across all
3259          * VCPUs.
3260          */
3261         kvm_mmu_zap_all(vcpu->kvm);
3262
3263         kvm_x86_ops->patch_hypercall(vcpu, instruction);
3264         if (emulator_write_emulated(rip, instruction, 3, vcpu)
3265             != X86EMUL_CONTINUE)
3266                 ret = -EFAULT;
3267
3268         return ret;
3269 }
3270
3271 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3272 {
3273         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3274 }
3275
3276 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3277 {
3278         struct descriptor_table dt = { limit, base };
3279
3280         kvm_x86_ops->set_gdt(vcpu, &dt);
3281 }
3282
3283 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3284 {
3285         struct descriptor_table dt = { limit, base };
3286
3287         kvm_x86_ops->set_idt(vcpu, &dt);
3288 }
3289
3290 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
3291                    unsigned long *rflags)
3292 {
3293         kvm_lmsw(vcpu, msw);
3294         *rflags = kvm_x86_ops->get_rflags(vcpu);
3295 }
3296
3297 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3298 {
3299         unsigned long value;
3300
3301         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3302         switch (cr) {
3303         case 0:
3304                 value = vcpu->arch.cr0;
3305                 break;
3306         case 2:
3307                 value = vcpu->arch.cr2;
3308                 break;
3309         case 3:
3310                 value = vcpu->arch.cr3;
3311                 break;
3312         case 4:
3313                 value = vcpu->arch.cr4;
3314                 break;
3315         case 8:
3316                 value = kvm_get_cr8(vcpu);
3317                 break;
3318         default:
3319                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3320                 return 0;
3321         }
3322
3323         return value;
3324 }
3325
3326 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3327                      unsigned long *rflags)
3328 {
3329         switch (cr) {
3330         case 0:
3331                 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
3332                 *rflags = kvm_x86_ops->get_rflags(vcpu);
3333                 break;
3334         case 2:
3335                 vcpu->arch.cr2 = val;
3336                 break;
3337         case 3:
3338                 kvm_set_cr3(vcpu, val);
3339                 break;
3340         case 4:
3341                 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
3342                 break;
3343         case 8:
3344                 kvm_set_cr8(vcpu, val & 0xfUL);
3345                 break;
3346         default:
3347                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3348         }
3349 }
3350
3351 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
3352 {
3353         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
3354         int j, nent = vcpu->arch.cpuid_nent;
3355
3356         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
3357         /* when no next entry is found, the current entry[i] is reselected */
3358         for (j = i + 1; ; j = (j + 1) % nent) {
3359                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
3360                 if (ej->function == e->function) {
3361                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
3362                         return j;
3363                 }
3364         }
3365         return 0; /* silence gcc, even though control never reaches here */
3366 }
3367
3368 /* find an entry with matching function, matching index (if needed), and that
3369  * should be read next (if it's stateful) */
3370 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
3371         u32 function, u32 index)
3372 {
3373         if (e->function != function)
3374                 return 0;
3375         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
3376                 return 0;
3377         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
3378             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
3379                 return 0;
3380         return 1;
3381 }
3382
3383 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3384                                               u32 function, u32 index)
3385 {
3386         int i;
3387         struct kvm_cpuid_entry2 *best = NULL;
3388
3389         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
3390                 struct kvm_cpuid_entry2 *e;
3391
3392                 e = &vcpu->arch.cpuid_entries[i];
3393                 if (is_matching_cpuid_entry(e, function, index)) {
3394                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
3395                                 move_to_next_stateful_cpuid_entry(vcpu, i);
3396                         best = e;
3397                         break;
3398                 }
3399                 /*
3400                  * Both basic or both extended?
3401                  */
3402                 if (((e->function ^ function) & 0x80000000) == 0)
3403                         if (!best || e->function > best->function)
3404                                 best = e;
3405         }
3406         return best;
3407 }
3408
3409 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3410 {
3411         struct kvm_cpuid_entry2 *best;
3412
3413         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
3414         if (best)
3415                 return best->eax & 0xff;
3416         return 36;
3417 }
3418
3419 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3420 {
3421         u32 function, index;
3422         struct kvm_cpuid_entry2 *best;
3423
3424         function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3425         index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3426         kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3427         kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3428         kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3429         kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3430         best = kvm_find_cpuid_entry(vcpu, function, index);
3431         if (best) {
3432                 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
3433                 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
3434                 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
3435                 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
3436         }
3437         kvm_x86_ops->skip_emulated_instruction(vcpu);
3438         trace_kvm_cpuid(function,
3439                         kvm_register_read(vcpu, VCPU_REGS_RAX),
3440                         kvm_register_read(vcpu, VCPU_REGS_RBX),
3441                         kvm_register_read(vcpu, VCPU_REGS_RCX),
3442                         kvm_register_read(vcpu, VCPU_REGS_RDX));
3443 }
3444 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
3445
3446 /*
3447  * Check if userspace requested an interrupt window, and that the
3448  * interrupt window is open.
3449  *
3450  * No need to exit to userspace if we already have an interrupt queued.
3451  */
3452 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
3453 {
3454         return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
3455                 vcpu->run->request_interrupt_window &&
3456                 kvm_arch_interrupt_allowed(vcpu));
3457 }
3458
3459 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
3460 {
3461         struct kvm_run *kvm_run = vcpu->run;
3462
3463         kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
3464         kvm_run->cr8 = kvm_get_cr8(vcpu);
3465         kvm_run->apic_base = kvm_get_apic_base(vcpu);
3466         if (irqchip_in_kernel(vcpu->kvm))
3467                 kvm_run->ready_for_interrupt_injection = 1;
3468         else
3469                 kvm_run->ready_for_interrupt_injection =
3470                         kvm_arch_interrupt_allowed(vcpu) &&
3471                         !kvm_cpu_has_interrupt(vcpu) &&
3472                         !kvm_event_needs_reinjection(vcpu);
3473 }
3474
3475 static void vapic_enter(struct kvm_vcpu *vcpu)
3476 {
3477         struct kvm_lapic *apic = vcpu->arch.apic;
3478         struct page *page;
3479
3480         if (!apic || !apic->vapic_addr)
3481                 return;
3482
3483         page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
3484
3485         vcpu->arch.apic->vapic_page = page;
3486 }
3487
3488 static void vapic_exit(struct kvm_vcpu *vcpu)
3489 {
3490         struct kvm_lapic *apic = vcpu->arch.apic;
3491
3492         if (!apic || !apic->vapic_addr)
3493                 return;
3494
3495         down_read(&vcpu->kvm->slots_lock);
3496         kvm_release_page_dirty(apic->vapic_page);
3497         mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
3498         up_read(&vcpu->kvm->slots_lock);
3499 }
3500
3501 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3502 {
3503         int max_irr, tpr;
3504
3505         if (!kvm_x86_ops->update_cr8_intercept)
3506                 return;
3507
3508         if (!vcpu->arch.apic)
3509                 return;
3510
3511         if (!vcpu->arch.apic->vapic_addr)
3512                 max_irr = kvm_lapic_find_highest_irr(vcpu);
3513         else
3514                 max_irr = -1;
3515
3516         if (max_irr != -1)
3517                 max_irr >>= 4;
3518
3519         tpr = kvm_lapic_get_cr8(vcpu);
3520
3521         kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3522 }
3523
3524 static void inject_pending_event(struct kvm_vcpu *vcpu)
3525 {
3526         /* try to reinject previous events if any */
3527         if (vcpu->arch.exception.pending) {
3528                 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
3529                                           vcpu->arch.exception.has_error_code,
3530                                           vcpu->arch.exception.error_code);
3531                 return;
3532         }
3533
3534         if (vcpu->arch.nmi_injected) {
3535                 kvm_x86_ops->set_nmi(vcpu);
3536                 return;
3537         }
3538
3539         if (vcpu->arch.interrupt.pending) {
3540                 kvm_x86_ops->set_irq(vcpu);
3541                 return;
3542         }
3543
3544         /* try to inject new event if pending */
3545         if (vcpu->arch.nmi_pending) {
3546                 if (kvm_x86_ops->nmi_allowed(vcpu)) {
3547                         vcpu->arch.nmi_pending = false;
3548                         vcpu->arch.nmi_injected = true;
3549                         kvm_x86_ops->set_nmi(vcpu);
3550                 }
3551         } else if (kvm_cpu_has_interrupt(vcpu)) {
3552                 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
3553                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
3554                                             false);
3555                         kvm_x86_ops->set_irq(vcpu);
3556                 }
3557         }
3558 }
3559
3560 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
3561 {
3562         int r;
3563         bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
3564                 vcpu->run->request_interrupt_window;
3565
3566         if (vcpu->requests)
3567                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3568                         kvm_mmu_unload(vcpu);
3569
3570         r = kvm_mmu_reload(vcpu);
3571         if (unlikely(r))
3572                 goto out;
3573
3574         if (vcpu->requests) {
3575                 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
3576                         __kvm_migrate_timers(vcpu);
3577                 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3578                         kvm_write_guest_time(vcpu);
3579                 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3580                         kvm_mmu_sync_roots(vcpu);
3581                 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3582                         kvm_x86_ops->tlb_flush(vcpu);
3583                 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3584                                        &vcpu->requests)) {
3585                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
3586                         r = 0;
3587                         goto out;
3588                 }
3589                 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3590                         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
3591                         r = 0;
3592                         goto out;
3593                 }
3594         }
3595
3596         preempt_disable();
3597
3598         kvm_x86_ops->prepare_guest_switch(vcpu);
3599         kvm_load_guest_fpu(vcpu);
3600
3601         local_irq_disable();
3602
3603         clear_bit(KVM_REQ_KICK, &vcpu->requests);
3604         smp_mb__after_clear_bit();
3605
3606         if (vcpu->requests || need_resched() || signal_pending(current)) {
3607                 set_bit(KVM_REQ_KICK, &vcpu->requests);
3608                 local_irq_enable();
3609                 preempt_enable();
3610                 r = 1;
3611                 goto out;
3612         }
3613
3614         inject_pending_event(vcpu);
3615
3616         /* enable NMI/IRQ window open exits if needed */
3617         if (vcpu->arch.nmi_pending)
3618                 kvm_x86_ops->enable_nmi_window(vcpu);
3619         else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3620                 kvm_x86_ops->enable_irq_window(vcpu);
3621
3622         if (kvm_lapic_enabled(vcpu)) {
3623                 update_cr8_intercept(vcpu);
3624                 kvm_lapic_sync_to_vapic(vcpu);
3625         }
3626
3627         up_read(&vcpu->kvm->slots_lock);
3628
3629         kvm_guest_enter();
3630
3631         if (unlikely(vcpu->arch.switch_db_regs)) {
3632                 set_debugreg(0, 7);
3633                 set_debugreg(vcpu->arch.eff_db[0], 0);
3634                 set_debugreg(vcpu->arch.eff_db[1], 1);
3635                 set_debugreg(vcpu->arch.eff_db[2], 2);
3636                 set_debugreg(vcpu->arch.eff_db[3], 3);
3637         }
3638
3639         trace_kvm_entry(vcpu->vcpu_id);
3640         kvm_x86_ops->run(vcpu);
3641
3642         if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) {
3643                 set_debugreg(current->thread.debugreg0, 0);
3644                 set_debugreg(current->thread.debugreg1, 1);
3645                 set_debugreg(current->thread.debugreg2, 2);
3646                 set_debugreg(current->thread.debugreg3, 3);
3647                 set_debugreg(current->thread.debugreg6, 6);
3648                 set_debugreg(current->thread.debugreg7, 7);
3649         }
3650
3651         set_bit(KVM_REQ_KICK, &vcpu->requests);
3652         local_irq_enable();
3653
3654         ++vcpu->stat.exits;
3655
3656         /*
3657          * We must have an instruction between local_irq_enable() and
3658          * kvm_guest_exit(), so the timer interrupt isn't delayed by
3659          * the interrupt shadow.  The stat.exits increment will do nicely.
3660          * But we need to prevent reordering, hence this barrier():
3661          */
3662         barrier();
3663
3664         kvm_guest_exit();
3665
3666         preempt_enable();
3667
3668         down_read(&vcpu->kvm->slots_lock);
3669
3670         /*
3671          * Profile KVM exit RIPs:
3672          */
3673         if (unlikely(prof_on == KVM_PROFILING)) {
3674                 unsigned long rip = kvm_rip_read(vcpu);
3675                 profile_hit(KVM_PROFILING, (void *)rip);
3676         }
3677
3678
3679         kvm_lapic_sync_from_vapic(vcpu);
3680
3681         r = kvm_x86_ops->handle_exit(vcpu);
3682 out:
3683         return r;
3684 }
3685
3686
3687 static int __vcpu_run(struct kvm_vcpu *vcpu)
3688 {
3689         int r;
3690
3691         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
3692                 pr_debug("vcpu %d received sipi with vector # %x\n",
3693                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
3694                 kvm_lapic_reset(vcpu);
3695                 r = kvm_arch_vcpu_reset(vcpu);
3696                 if (r)
3697                         return r;
3698                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3699         }
3700
3701         down_read(&vcpu->kvm->slots_lock);
3702         vapic_enter(vcpu);
3703
3704         r = 1;
3705         while (r > 0) {
3706                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
3707                         r = vcpu_enter_guest(vcpu);
3708                 else {
3709                         up_read(&vcpu->kvm->slots_lock);
3710                         kvm_vcpu_block(vcpu);
3711                         down_read(&vcpu->kvm->slots_lock);
3712                         if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
3713                         {
3714                                 switch(vcpu->arch.mp_state) {
3715                                 case KVM_MP_STATE_HALTED:
3716                                         vcpu->arch.mp_state =
3717                                                 KVM_MP_STATE_RUNNABLE;
3718                                 case KVM_MP_STATE_RUNNABLE:
3719                                         break;
3720                                 case KVM_MP_STATE_SIPI_RECEIVED:
3721                                 default:
3722                                         r = -EINTR;
3723                                         break;
3724                                 }
3725                         }
3726                 }
3727
3728                 if (r <= 0)
3729                         break;
3730
3731                 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3732                 if (kvm_cpu_has_pending_timer(vcpu))
3733                         kvm_inject_pending_timer_irqs(vcpu);
3734
3735                 if (dm_request_for_irq_injection(vcpu)) {
3736                         r = -EINTR;
3737                         vcpu->run->exit_reason = KVM_EXIT_INTR;
3738                         ++vcpu->stat.request_irq_exits;
3739                 }
3740                 if (signal_pending(current)) {
3741                         r = -EINTR;
3742                         vcpu->run->exit_reason = KVM_EXIT_INTR;
3743                         ++vcpu->stat.signal_exits;
3744                 }
3745                 if (need_resched()) {
3746                         up_read(&vcpu->kvm->slots_lock);
3747                         kvm_resched(vcpu);
3748                         down_read(&vcpu->kvm->slots_lock);
3749                 }
3750         }
3751
3752         up_read(&vcpu->kvm->slots_lock);
3753         post_kvm_run_save(vcpu);
3754
3755         vapic_exit(vcpu);
3756
3757         return r;
3758 }
3759
3760 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3761 {
3762         int r;
3763         sigset_t sigsaved;
3764
3765         vcpu_load(vcpu);
3766
3767         if (vcpu->sigset_active)
3768                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3769
3770         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
3771                 kvm_vcpu_block(vcpu);
3772                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
3773                 r = -EAGAIN;
3774                 goto out;
3775         }
3776
3777         /* re-sync apic's tpr */
3778         if (!irqchip_in_kernel(vcpu->kvm))
3779                 kvm_set_cr8(vcpu, kvm_run->cr8);
3780
3781         if (vcpu->arch.pio.cur_count) {
3782                 r = complete_pio(vcpu);
3783                 if (r)
3784                         goto out;
3785         }
3786 #if CONFIG_HAS_IOMEM
3787         if (vcpu->mmio_needed) {
3788                 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3789                 vcpu->mmio_read_completed = 1;
3790                 vcpu->mmio_needed = 0;
3791
3792                 down_read(&vcpu->kvm->slots_lock);
3793                 r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
3794                                         EMULTYPE_NO_DECODE);
3795                 up_read(&vcpu->kvm->slots_lock);
3796                 if (r == EMULATE_DO_MMIO) {
3797                         /*
3798                          * Read-modify-write.  Back to userspace.
3799                          */
3800                         r = 0;
3801                         goto out;
3802                 }
3803         }
3804 #endif
3805         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3806                 kvm_register_write(vcpu, VCPU_REGS_RAX,
3807                                      kvm_run->hypercall.ret);
3808
3809         r = __vcpu_run(vcpu);
3810
3811 out:
3812         if (vcpu->sigset_active)
3813                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3814
3815         vcpu_put(vcpu);
3816         return r;
3817 }
3818
3819 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3820 {
3821         vcpu_load(vcpu);
3822
3823         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3824         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3825         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3826         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3827         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3828         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3829         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3830         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3831 #ifdef CONFIG_X86_64
3832         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3833         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3834         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3835         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3836         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3837         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3838         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3839         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
3840 #endif
3841
3842         regs->rip = kvm_rip_read(vcpu);
3843         regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3844
3845         /*
3846          * Don't leak debug flags in case they were set for guest debugging
3847          */
3848         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3849                 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3850
3851         vcpu_put(vcpu);
3852
3853         return 0;
3854 }
3855
3856 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3857 {
3858         vcpu_load(vcpu);
3859
3860         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3861         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3862         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3863         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3864         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3865         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3866         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3867         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
3868 #ifdef CONFIG_X86_64
3869         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3870         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3871         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3872         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3873         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3874         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3875         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3876         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3877
3878 #endif
3879
3880         kvm_rip_write(vcpu, regs->rip);
3881         kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3882
3883
3884         vcpu->arch.exception.pending = false;
3885
3886         vcpu_put(vcpu);
3887
3888         return 0;
3889 }
3890
3891 void kvm_get_segment(struct kvm_vcpu *vcpu,
3892                      struct kvm_segment *var, int seg)
3893 {
3894         kvm_x86_ops->get_segment(vcpu, var, seg);
3895 }
3896
3897 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3898 {
3899         struct kvm_segment cs;
3900
3901         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
3902         *db = cs.db;
3903         *l = cs.l;
3904 }
3905 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3906
3907 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3908                                   struct kvm_sregs *sregs)
3909 {
3910         struct descriptor_table dt;
3911
3912         vcpu_load(vcpu);
3913
3914         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3915         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3916         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3917         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3918         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3919         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3920
3921         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3922         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3923
3924         kvm_x86_ops->get_idt(vcpu, &dt);
3925         sregs->idt.limit = dt.limit;
3926         sregs->idt.base = dt.base;
3927         kvm_x86_ops->get_gdt(vcpu, &dt);
3928         sregs->gdt.limit = dt.limit;
3929         sregs->gdt.base = dt.base;
3930
3931         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3932         sregs->cr0 = vcpu->arch.cr0;
3933         sregs->cr2 = vcpu->arch.cr2;
3934         sregs->cr3 = vcpu->arch.cr3;
3935         sregs->cr4 = vcpu->arch.cr4;
3936         sregs->cr8 = kvm_get_cr8(vcpu);
3937         sregs->efer = vcpu->arch.shadow_efer;
3938         sregs->apic_base = kvm_get_apic_base(vcpu);
3939
3940         memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
3941
3942         if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
3943                 set_bit(vcpu->arch.interrupt.nr,
3944                         (unsigned long *)sregs->interrupt_bitmap);
3945
3946         vcpu_put(vcpu);
3947
3948         return 0;
3949 }
3950
3951 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3952                                     struct kvm_mp_state *mp_state)
3953 {
3954         vcpu_load(vcpu);
3955         mp_state->mp_state = vcpu->arch.mp_state;
3956         vcpu_put(vcpu);
3957         return 0;
3958 }
3959
3960 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3961                                     struct kvm_mp_state *mp_state)
3962 {
3963         vcpu_load(vcpu);
3964         vcpu->arch.mp_state = mp_state->mp_state;
3965         vcpu_put(vcpu);
3966         return 0;
3967 }
3968
3969 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3970                         struct kvm_segment *var, int seg)
3971 {
3972         kvm_x86_ops->set_segment(vcpu, var, seg);
3973 }
3974
3975 static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3976                                    struct kvm_segment *kvm_desct)
3977 {
3978         kvm_desct->base = get_desc_base(seg_desc);
3979         kvm_desct->limit = get_desc_limit(seg_desc);
3980         if (seg_desc->g) {
3981                 kvm_desct->limit <<= 12;
3982                 kvm_desct->limit |= 0xfff;
3983         }
3984         kvm_desct->selector = selector;
3985         kvm_desct->type = seg_desc->type;
3986         kvm_desct->present = seg_desc->p;
3987         kvm_desct->dpl = seg_desc->dpl;
3988         kvm_desct->db = seg_desc->d;
3989         kvm_desct->s = seg_desc->s;
3990         kvm_desct->l = seg_desc->l;
3991         kvm_desct->g = seg_desc->g;
3992         kvm_desct->avl = seg_desc->avl;
3993         if (!selector)
3994                 kvm_desct->unusable = 1;
3995         else
3996                 kvm_desct->unusable = 0;
3997         kvm_desct->padding = 0;
3998 }
3999
4000 static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4001                                           u16 selector,
4002                                           struct descriptor_table *dtable)
4003 {
4004         if (selector & 1 << 2) {
4005                 struct kvm_segment kvm_seg;
4006
4007                 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
4008
4009                 if (kvm_seg.unusable)
4010                         dtable->limit = 0;
4011                 else
4012                         dtable->limit = kvm_seg.limit;
4013                 dtable->base = kvm_seg.base;
4014         }
4015         else
4016                 kvm_x86_ops->get_gdt(vcpu, dtable);
4017 }
4018
4019 /* allowed just for 8 bytes segments */
4020 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4021                                          struct desc_struct *seg_desc)
4022 {
4023         struct descriptor_table dtable;
4024         u16 index = selector >> 3;
4025
4026         get_segment_descriptor_dtable(vcpu, selector, &dtable);
4027
4028         if (dtable.limit < index * 8 + 7) {
4029                 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
4030                 return 1;
4031         }
4032         return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
4033 }
4034
4035 /* allowed just for 8 bytes segments */
4036 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4037                                          struct desc_struct *seg_desc)
4038 {
4039         struct descriptor_table dtable;
4040         u16 index = selector >> 3;
4041
4042         get_segment_descriptor_dtable(vcpu, selector, &dtable);
4043
4044         if (dtable.limit < index * 8 + 7)
4045                 return 1;
4046         return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
4047 }
4048
4049 static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
4050                              struct desc_struct *seg_desc)
4051 {
4052         u32 base_addr = get_desc_base(seg_desc);
4053
4054         return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
4055 }
4056
4057 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
4058 {
4059         struct kvm_segment kvm_seg;
4060
4061         kvm_get_segment(vcpu, &kvm_seg, seg);
4062         return kvm_seg.selector;
4063 }
4064
4065 static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
4066                                                 u16 selector,
4067                                                 struct kvm_segment *kvm_seg)
4068 {
4069         struct desc_struct seg_desc;
4070
4071         if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
4072                 return 1;
4073         seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
4074         return 0;
4075 }
4076
4077 static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
4078 {
4079         struct kvm_segment segvar = {
4080                 .base = selector << 4,
4081                 .limit = 0xffff,
4082                 .selector = selector,
4083                 .type = 3,
4084                 .present = 1,
4085                 .dpl = 3,
4086                 .db = 0,
4087                 .s = 1,
4088                 .l = 0,
4089                 .g = 0,
4090                 .avl = 0,
4091                 .unusable = 0,
4092         };
4093         kvm_x86_ops->set_segment(vcpu, &segvar, seg);
4094         return 0;
4095 }
4096
4097 static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
4098 {
4099         return (seg != VCPU_SREG_LDTR) &&
4100                 (seg != VCPU_SREG_TR) &&
4101                 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_VM);
4102 }
4103
4104 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4105                                 int type_bits, int seg)
4106 {
4107         struct kvm_segment kvm_seg;
4108
4109         if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE))
4110                 return kvm_load_realmode_segment(vcpu, selector, seg);
4111         if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
4112                 return 1;
4113         kvm_seg.type |= type_bits;
4114
4115         if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
4116             seg != VCPU_SREG_LDTR)
4117                 if (!kvm_seg.s)
4118                         kvm_seg.unusable = 1;
4119
4120         kvm_set_segment(vcpu, &kvm_seg, seg);
4121         return 0;
4122 }
4123
4124 static void save_state_to_tss32(struct kvm_vcpu *vcpu,
4125                                 struct tss_segment_32 *tss)
4126 {
4127         tss->cr3 = vcpu->arch.cr3;
4128         tss->eip = kvm_rip_read(vcpu);
4129         tss->eflags = kvm_x86_ops->get_rflags(vcpu);
4130         tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4131         tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4132         tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4133         tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4134         tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4135         tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4136         tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4137         tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
4138         tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4139         tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4140         tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4141         tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4142         tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
4143         tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
4144         tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
4145 }
4146
4147 static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4148                                   struct tss_segment_32 *tss)
4149 {
4150         kvm_set_cr3(vcpu, tss->cr3);
4151
4152         kvm_rip_write(vcpu, tss->eip);
4153         kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
4154
4155         kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
4156         kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
4157         kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
4158         kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
4159         kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
4160         kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
4161         kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
4162         kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
4163
4164         if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
4165                 return 1;
4166
4167         if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
4168                 return 1;
4169
4170         if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
4171                 return 1;
4172
4173         if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
4174                 return 1;
4175
4176         if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
4177                 return 1;
4178
4179         if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
4180                 return 1;
4181
4182         if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
4183                 return 1;
4184         return 0;
4185 }
4186
4187 static void save_state_to_tss16(struct kvm_vcpu *vcpu,
4188                                 struct tss_segment_16 *tss)
4189 {
4190         tss->ip = kvm_rip_read(vcpu);
4191         tss->flag = kvm_x86_ops->get_rflags(vcpu);
4192         tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4193         tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4194         tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4195         tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4196         tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4197         tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4198         tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
4199         tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
4200
4201         tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4202         tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4203         tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4204         tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4205         tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
4206 }
4207
4208 static int load_state_from_tss16(struct kvm_vcpu *vcpu,
4209                                  struct tss_segment_16 *tss)
4210 {
4211         kvm_rip_write(vcpu, tss->ip);
4212         kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
4213         kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
4214         kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
4215         kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
4216         kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
4217         kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
4218         kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
4219         kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
4220         kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
4221
4222         if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
4223                 return 1;
4224
4225         if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
4226                 return 1;
4227
4228         if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
4229                 return 1;
4230
4231         if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
4232                 return 1;
4233
4234         if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
4235                 return 1;
4236         return 0;
4237 }
4238
4239 static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
4240                               u16 old_tss_sel, u32 old_tss_base,
4241                               struct desc_struct *nseg_desc)
4242 {
4243         struct tss_segment_16 tss_segment_16;
4244         int ret = 0;
4245
4246         if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4247                            sizeof tss_segment_16))
4248                 goto out;
4249
4250         save_state_to_tss16(vcpu, &tss_segment_16);
4251
4252         if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4253                             sizeof tss_segment_16))
4254                 goto out;
4255
4256         if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4257                            &tss_segment_16, sizeof tss_segment_16))
4258                 goto out;
4259
4260         if (old_tss_sel != 0xffff) {
4261                 tss_segment_16.prev_task_link = old_tss_sel;
4262
4263                 if (kvm_write_guest(vcpu->kvm,
4264                                     get_tss_base_addr(vcpu, nseg_desc),
4265                                     &tss_segment_16.prev_task_link,
4266                                     sizeof tss_segment_16.prev_task_link))
4267                         goto out;
4268         }
4269
4270         if (load_state_from_tss16(vcpu, &tss_segment_16))
4271                 goto out;
4272
4273         ret = 1;
4274 out:
4275         return ret;
4276 }
4277
4278 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
4279                        u16 old_tss_sel, u32 old_tss_base,
4280                        struct desc_struct *nseg_desc)
4281 {
4282         struct tss_segment_32 tss_segment_32;
4283         int ret = 0;
4284
4285         if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4286                            sizeof tss_segment_32))
4287                 goto out;
4288
4289         save_state_to_tss32(vcpu, &tss_segment_32);
4290
4291         if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4292                             sizeof tss_segment_32))
4293                 goto out;
4294
4295         if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4296                            &tss_segment_32, sizeof tss_segment_32))
4297                 goto out;
4298
4299         if (old_tss_sel != 0xffff) {
4300                 tss_segment_32.prev_task_link = old_tss_sel;
4301
4302                 if (kvm_write_guest(vcpu->kvm,
4303                                     get_tss_base_addr(vcpu, nseg_desc),
4304                                     &tss_segment_32.prev_task_link,
4305                                     sizeof tss_segment_32.prev_task_link))
4306                         goto out;
4307         }
4308
4309         if (load_state_from_tss32(vcpu, &tss_segment_32))
4310                 goto out;
4311
4312         ret = 1;
4313 out:
4314         return ret;
4315 }
4316
4317 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4318 {
4319         struct kvm_segment tr_seg;
4320         struct desc_struct cseg_desc;
4321         struct desc_struct nseg_desc;
4322         int ret = 0;
4323         u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
4324         u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
4325
4326         old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
4327
4328         /* FIXME: Handle errors. Failure to read either TSS or their
4329          * descriptors should generate a pagefault.
4330          */
4331         if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
4332                 goto out;
4333
4334         if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
4335                 goto out;
4336
4337         if (reason != TASK_SWITCH_IRET) {
4338                 int cpl;
4339
4340                 cpl = kvm_x86_ops->get_cpl(vcpu);
4341                 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
4342                         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4343                         return 1;
4344                 }
4345         }
4346
4347         if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
4348                 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
4349                 return 1;
4350         }
4351
4352         if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
4353                 cseg_desc.type &= ~(1 << 1); //clear the B flag
4354                 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
4355         }
4356
4357         if (reason == TASK_SWITCH_IRET) {
4358                 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4359                 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
4360         }
4361
4362         /* set back link to prev task only if NT bit is set in eflags
4363            note that old_tss_sel is not used afetr this point */
4364         if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4365                 old_tss_sel = 0xffff;
4366
4367         /* set back link to prev task only if NT bit is set in eflags
4368            note that old_tss_sel is not used afetr this point */
4369         if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4370                 old_tss_sel = 0xffff;
4371
4372         if (nseg_desc.type & 8)
4373                 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
4374                                          old_tss_base, &nseg_desc);
4375         else
4376                 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
4377                                          old_tss_base, &nseg_desc);
4378
4379         if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
4380                 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4381                 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
4382         }
4383
4384         if (reason != TASK_SWITCH_IRET) {
4385                 nseg_desc.type |= (1 << 1);
4386                 save_guest_segment_descriptor(vcpu, tss_selector,
4387                                               &nseg_desc);
4388         }
4389
4390         kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
4391         seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4392         tr_seg.type = 11;
4393         kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
4394 out:
4395         return ret;
4396 }
4397 EXPORT_SYMBOL_GPL(kvm_task_switch);
4398
4399 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4400                                   struct kvm_sregs *sregs)
4401 {
4402         int mmu_reset_needed = 0;
4403         int pending_vec, max_bits;
4404         struct descriptor_table dt;
4405
4406         vcpu_load(vcpu);
4407
4408         dt.limit = sregs->idt.limit;
4409         dt.base = sregs->idt.base;
4410         kvm_x86_ops->set_idt(vcpu, &dt);
4411         dt.limit = sregs->gdt.limit;
4412         dt.base = sregs->gdt.base;
4413         kvm_x86_ops->set_gdt(vcpu, &dt);
4414
4415         vcpu->arch.cr2 = sregs->cr2;
4416         mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
4417         vcpu->arch.cr3 = sregs->cr3;
4418
4419         kvm_set_cr8(vcpu, sregs->cr8);
4420
4421         mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
4422         kvm_x86_ops->set_efer(vcpu, sregs->efer);
4423         kvm_set_apic_base(vcpu, sregs->apic_base);
4424
4425         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
4426
4427         mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
4428         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
4429         vcpu->arch.cr0 = sregs->cr0;
4430
4431         mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
4432         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4433         if (!is_long_mode(vcpu) && is_pae(vcpu))
4434                 load_pdptrs(vcpu, vcpu->arch.cr3);
4435
4436         if (mmu_reset_needed)
4437                 kvm_mmu_reset_context(vcpu);
4438
4439         max_bits = (sizeof sregs->interrupt_bitmap) << 3;
4440         pending_vec = find_first_bit(
4441                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
4442         if (pending_vec < max_bits) {
4443                 kvm_queue_interrupt(vcpu, pending_vec, false);
4444                 pr_debug("Set back pending irq %d\n", pending_vec);
4445                 if (irqchip_in_kernel(vcpu->kvm))
4446                         kvm_pic_clear_isr_ack(vcpu->kvm);
4447         }
4448
4449         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4450         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4451         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4452         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4453         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4454         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
4455
4456         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4457         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
4458
4459         update_cr8_intercept(vcpu);
4460
4461         /* Older userspace won't unhalt the vcpu on reset. */
4462         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
4463             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4464             !(vcpu->arch.cr0 & X86_CR0_PE))
4465                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4466
4467         vcpu_put(vcpu);
4468
4469         return 0;
4470 }
4471
4472 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4473                                         struct kvm_guest_debug *dbg)
4474 {
4475         int i, r;
4476
4477         vcpu_load(vcpu);
4478
4479         if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
4480             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
4481                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4482                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4483                 vcpu->arch.switch_db_regs =
4484                         (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4485         } else {
4486                 for (i = 0; i < KVM_NR_DB_REGS; i++)
4487                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4488                 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4489         }
4490
4491         r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
4492
4493         if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4494                 kvm_queue_exception(vcpu, DB_VECTOR);
4495         else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
4496                 kvm_queue_exception(vcpu, BP_VECTOR);
4497
4498         vcpu_put(vcpu);
4499
4500         return r;
4501 }
4502
4503 /*
4504  * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
4505  * we have asm/x86/processor.h
4506  */
4507 struct fxsave {
4508         u16     cwd;
4509         u16     swd;
4510         u16     twd;
4511         u16     fop;
4512         u64     rip;
4513         u64     rdp;
4514         u32     mxcsr;
4515         u32     mxcsr_mask;
4516         u32     st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
4517 #ifdef CONFIG_X86_64
4518         u32     xmm_space[64];  /* 16*16 bytes for each XMM-reg = 256 bytes */
4519 #else
4520         u32     xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
4521 #endif
4522 };
4523
4524 /*
4525  * Translate a guest virtual address to a guest physical address.
4526  */
4527 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4528                                     struct kvm_translation *tr)
4529 {
4530         unsigned long vaddr = tr->linear_address;
4531         gpa_t gpa;
4532
4533         vcpu_load(vcpu);
4534         down_read(&vcpu->kvm->slots_lock);
4535         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
4536         up_read(&vcpu->kvm->slots_lock);
4537         tr->physical_address = gpa;
4538         tr->valid = gpa != UNMAPPED_GVA;
4539         tr->writeable = 1;
4540         tr->usermode = 0;
4541         vcpu_put(vcpu);
4542
4543         return 0;
4544 }
4545
4546 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4547 {
4548         struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
4549
4550         vcpu_load(vcpu);
4551
4552         memcpy(fpu->fpr, fxsave->st_space, 128);
4553         fpu->fcw = fxsave->cwd;
4554         fpu->fsw = fxsave->swd;
4555         fpu->ftwx = fxsave->twd;
4556         fpu->last_opcode = fxsave->fop;
4557         fpu->last_ip = fxsave->rip;
4558         fpu->last_dp = fxsave->rdp;
4559         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
4560
4561         vcpu_put(vcpu);
4562
4563         return 0;
4564 }
4565
4566 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4567 {
4568         struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
4569
4570         vcpu_load(vcpu);
4571
4572         memcpy(fxsave->st_space, fpu->fpr, 128);
4573         fxsave->cwd = fpu->fcw;
4574         fxsave->swd = fpu->fsw;
4575         fxsave->twd = fpu->ftwx;
4576         fxsave->fop = fpu->last_opcode;
4577         fxsave->rip = fpu->last_ip;
4578         fxsave->rdp = fpu->last_dp;
4579         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4580
4581         vcpu_put(vcpu);
4582
4583         return 0;
4584 }
4585
4586 void fx_init(struct kvm_vcpu *vcpu)
4587 {
4588         unsigned after_mxcsr_mask;
4589
4590         /*
4591          * Touch the fpu the first time in non atomic context as if
4592          * this is the first fpu instruction the exception handler
4593          * will fire before the instruction returns and it'll have to
4594          * allocate ram with GFP_KERNEL.
4595          */
4596         if (!used_math())
4597                 kvm_fx_save(&vcpu->arch.host_fx_image);
4598
4599         /* Initialize guest FPU by resetting ours and saving into guest's */
4600         preempt_disable();
4601         kvm_fx_save(&vcpu->arch.host_fx_image);
4602         kvm_fx_finit();
4603         kvm_fx_save(&vcpu->arch.guest_fx_image);
4604         kvm_fx_restore(&vcpu->arch.host_fx_image);
4605         preempt_enable();
4606
4607         vcpu->arch.cr0 |= X86_CR0_ET;
4608         after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
4609         vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4610         memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
4611                0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4612 }
4613 EXPORT_SYMBOL_GPL(fx_init);
4614
4615 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4616 {
4617         if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4618                 return;
4619
4620         vcpu->guest_fpu_loaded = 1;
4621         kvm_fx_save(&vcpu->arch.host_fx_image);
4622         kvm_fx_restore(&vcpu->arch.guest_fx_image);
4623 }
4624 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4625
4626 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4627 {
4628         if (!vcpu->guest_fpu_loaded)
4629                 return;
4630
4631         vcpu->guest_fpu_loaded = 0;
4632         kvm_fx_save(&vcpu->arch.guest_fx_image);
4633         kvm_fx_restore(&vcpu->arch.host_fx_image);
4634         ++vcpu->stat.fpu_reload;
4635 }
4636 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
4637
4638 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4639 {
4640         if (vcpu->arch.time_page) {
4641                 kvm_release_page_dirty(vcpu->arch.time_page);
4642                 vcpu->arch.time_page = NULL;
4643         }
4644
4645         kvm_x86_ops->vcpu_free(vcpu);
4646 }
4647
4648 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
4649                                                 unsigned int id)
4650 {
4651         return kvm_x86_ops->vcpu_create(kvm, id);
4652 }
4653
4654 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
4655 {
4656         int r;
4657
4658         /* We do fxsave: this must be aligned. */
4659         BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
4660
4661         vcpu->arch.mtrr_state.have_fixed = 1;
4662         vcpu_load(vcpu);
4663         r = kvm_arch_vcpu_reset(vcpu);
4664         if (r == 0)
4665                 r = kvm_mmu_setup(vcpu);
4666         vcpu_put(vcpu);
4667         if (r < 0)
4668                 goto free_vcpu;
4669
4670         return 0;
4671 free_vcpu:
4672         kvm_x86_ops->vcpu_free(vcpu);
4673         return r;
4674 }
4675
4676 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
4677 {
4678         vcpu_load(vcpu);
4679         kvm_mmu_unload(vcpu);
4680         vcpu_put(vcpu);
4681
4682         kvm_x86_ops->vcpu_free(vcpu);
4683 }
4684
4685 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4686 {
4687         vcpu->arch.nmi_pending = false;
4688         vcpu->arch.nmi_injected = false;
4689
4690         vcpu->arch.switch_db_regs = 0;
4691         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4692         vcpu->arch.dr6 = DR6_FIXED_1;
4693         vcpu->arch.dr7 = DR7_FIXED_1;
4694
4695         return kvm_x86_ops->vcpu_reset(vcpu);
4696 }
4697
4698 int kvm_arch_hardware_enable(void *garbage)
4699 {
4700         /*
4701          * Since this may be called from a hotplug notifcation,
4702          * we can't get the CPU frequency directly.
4703          */
4704         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
4705                 int cpu = raw_smp_processor_id();
4706                 per_cpu(cpu_tsc_khz, cpu) = 0;
4707         }
4708         return kvm_x86_ops->hardware_enable(garbage);
4709 }
4710
4711 void kvm_arch_hardware_disable(void *garbage)
4712 {
4713         kvm_x86_ops->hardware_disable(garbage);
4714 }
4715
4716 int kvm_arch_hardware_setup(void)
4717 {
4718         return kvm_x86_ops->hardware_setup();
4719 }
4720
4721 void kvm_arch_hardware_unsetup(void)
4722 {
4723         kvm_x86_ops->hardware_unsetup();
4724 }
4725
4726 void kvm_arch_check_processor_compat(void *rtn)
4727 {
4728         kvm_x86_ops->check_processor_compatibility(rtn);
4729 }
4730
4731 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4732 {
4733         struct page *page;
4734         struct kvm *kvm;
4735         int r;
4736
4737         BUG_ON(vcpu->kvm == NULL);
4738         kvm = vcpu->kvm;
4739
4740         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
4741         if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
4742                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4743         else
4744                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
4745
4746         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4747         if (!page) {
4748                 r = -ENOMEM;
4749                 goto fail;
4750         }
4751         vcpu->arch.pio_data = page_address(page);
4752
4753         r = kvm_mmu_create(vcpu);
4754         if (r < 0)
4755                 goto fail_free_pio_data;
4756
4757         if (irqchip_in_kernel(kvm)) {
4758                 r = kvm_create_lapic(vcpu);
4759                 if (r < 0)
4760                         goto fail_mmu_destroy;
4761         }
4762
4763         vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
4764                                        GFP_KERNEL);
4765         if (!vcpu->arch.mce_banks) {
4766                 r = -ENOMEM;
4767                 goto fail_mmu_destroy;
4768         }
4769         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
4770
4771         return 0;
4772
4773 fail_mmu_destroy:
4774         kvm_mmu_destroy(vcpu);
4775 fail_free_pio_data:
4776         free_page((unsigned long)vcpu->arch.pio_data);
4777 fail:
4778         return r;
4779 }
4780
4781 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
4782 {
4783         kvm_free_lapic(vcpu);
4784         down_read(&vcpu->kvm->slots_lock);
4785         kvm_mmu_destroy(vcpu);
4786         up_read(&vcpu->kvm->slots_lock);
4787         free_page((unsigned long)vcpu->arch.pio_data);
4788 }
4789
4790 struct  kvm *kvm_arch_create_vm(void)
4791 {
4792         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4793
4794         if (!kvm)
4795                 return ERR_PTR(-ENOMEM);
4796
4797         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4798         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
4799
4800         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4801         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4802
4803         rdtscll(kvm->arch.vm_init_tsc);
4804
4805         return kvm;
4806 }
4807
4808 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4809 {
4810         vcpu_load(vcpu);
4811         kvm_mmu_unload(vcpu);
4812         vcpu_put(vcpu);
4813 }
4814
4815 static void kvm_free_vcpus(struct kvm *kvm)
4816 {
4817         unsigned int i;
4818         struct kvm_vcpu *vcpu;
4819
4820         /*
4821          * Unpin any mmu pages first.
4822          */
4823         kvm_for_each_vcpu(i, vcpu, kvm)
4824                 kvm_unload_vcpu_mmu(vcpu);
4825         kvm_for_each_vcpu(i, vcpu, kvm)
4826                 kvm_arch_vcpu_free(vcpu);
4827
4828         mutex_lock(&kvm->lock);
4829         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
4830                 kvm->vcpus[i] = NULL;
4831
4832         atomic_set(&kvm->online_vcpus, 0);
4833         mutex_unlock(&kvm->lock);
4834 }
4835
4836 void kvm_arch_sync_events(struct kvm *kvm)
4837 {
4838         kvm_free_all_assigned_devices(kvm);
4839 }
4840
4841 void kvm_arch_destroy_vm(struct kvm *kvm)
4842 {
4843         kvm_iommu_unmap_guest(kvm);
4844         kvm_free_pit(kvm);
4845         kfree(kvm->arch.vpic);
4846         kfree(kvm->arch.vioapic);
4847         kvm_free_vcpus(kvm);
4848         kvm_free_physmem(kvm);
4849         if (kvm->arch.apic_access_page)
4850                 put_page(kvm->arch.apic_access_page);
4851         if (kvm->arch.ept_identity_pagetable)
4852                 put_page(kvm->arch.ept_identity_pagetable);
4853         kfree(kvm);
4854 }
4855
4856 int kvm_arch_set_memory_region(struct kvm *kvm,
4857                                 struct kvm_userspace_memory_region *mem,
4858                                 struct kvm_memory_slot old,
4859                                 int user_alloc)
4860 {
4861         int npages = mem->memory_size >> PAGE_SHIFT;
4862         struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4863
4864         /*To keep backward compatibility with older userspace,
4865          *x86 needs to hanlde !user_alloc case.
4866          */
4867         if (!user_alloc) {
4868                 if (npages && !old.rmap) {
4869                         unsigned long userspace_addr;
4870
4871                         down_write(&current->mm->mmap_sem);
4872                         userspace_addr = do_mmap(NULL, 0,
4873                                                  npages * PAGE_SIZE,
4874                                                  PROT_READ | PROT_WRITE,
4875                                                  MAP_PRIVATE | MAP_ANONYMOUS,
4876                                                  0);
4877                         up_write(&current->mm->mmap_sem);
4878
4879                         if (IS_ERR((void *)userspace_addr))
4880                                 return PTR_ERR((void *)userspace_addr);
4881
4882                         /* set userspace_addr atomically for kvm_hva_to_rmapp */
4883                         spin_lock(&kvm->mmu_lock);
4884                         memslot->userspace_addr = userspace_addr;
4885                         spin_unlock(&kvm->mmu_lock);
4886                 } else {
4887                         if (!old.user_alloc && old.rmap) {
4888                                 int ret;
4889
4890                                 down_write(&current->mm->mmap_sem);
4891                                 ret = do_munmap(current->mm, old.userspace_addr,
4892                                                 old.npages * PAGE_SIZE);
4893                                 up_write(&current->mm->mmap_sem);
4894                                 if (ret < 0)
4895                                         printk(KERN_WARNING
4896                                        "kvm_vm_ioctl_set_memory_region: "
4897                                        "failed to munmap memory\n");
4898                         }
4899                 }
4900         }
4901
4902         spin_lock(&kvm->mmu_lock);
4903         if (!kvm->arch.n_requested_mmu_pages) {
4904                 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4905                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4906         }
4907
4908         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4909         spin_unlock(&kvm->mmu_lock);
4910
4911         return 0;
4912 }
4913
4914 void kvm_arch_flush_shadow(struct kvm *kvm)
4915 {
4916         kvm_mmu_zap_all(kvm);
4917         kvm_reload_remote_mmus(kvm);
4918 }
4919
4920 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4921 {
4922         return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
4923                 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4924                 || vcpu->arch.nmi_pending ||
4925                 (kvm_arch_interrupt_allowed(vcpu) &&
4926                  kvm_cpu_has_interrupt(vcpu));
4927 }
4928
4929 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4930 {
4931         int me;
4932         int cpu = vcpu->cpu;
4933
4934         if (waitqueue_active(&vcpu->wq)) {
4935                 wake_up_interruptible(&vcpu->wq);
4936                 ++vcpu->stat.halt_wakeup;
4937         }
4938
4939         me = get_cpu();
4940         if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
4941                 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
4942                         smp_send_reschedule(cpu);
4943         put_cpu();
4944 }
4945
4946 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
4947 {
4948         return kvm_x86_ops->interrupt_allowed(vcpu);
4949 }
4950
4951 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
4952 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
4953 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
4954 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
4955 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);