KVM: modify memslots layout in struct kvm
[safe/jmp/linux-2.6] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Amit Shah    <amit.shah@qumranet.com>
14  *   Ben-Ami Yassour <benami@il.ibm.com>
15  *
16  * This work is licensed under the terms of the GNU GPL, version 2.  See
17  * the COPYING file in the top-level directory.
18  *
19  */
20
21 #include <linux/kvm_host.h>
22 #include "irq.h"
23 #include "mmu.h"
24 #include "i8254.h"
25 #include "tss.h"
26 #include "kvm_cache_regs.h"
27 #include "x86.h"
28
29 #include <linux/clocksource.h>
30 #include <linux/interrupt.h>
31 #include <linux/kvm.h>
32 #include <linux/fs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/highmem.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/cpufreq.h>
40 #include <linux/user-return-notifier.h>
41 #include <trace/events/kvm.h>
42 #undef TRACE_INCLUDE_FILE
43 #define CREATE_TRACE_POINTS
44 #include "trace.h"
45
46 #include <asm/debugreg.h>
47 #include <asm/uaccess.h>
48 #include <asm/msr.h>
49 #include <asm/desc.h>
50 #include <asm/mtrr.h>
51 #include <asm/mce.h>
52
53 #define MAX_IO_MSRS 256
54 #define CR0_RESERVED_BITS                                               \
55         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
56                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
57                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
58 #define CR4_RESERVED_BITS                                               \
59         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
60                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
61                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
62                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
63
64 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
65
66 #define KVM_MAX_MCE_BANKS 32
67 #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
68
69 /* EFER defaults:
70  * - enable syscall per default because its emulated by KVM
71  * - enable LME and LMA per default on 64 bit KVM
72  */
73 #ifdef CONFIG_X86_64
74 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
75 #else
76 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
77 #endif
78
79 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
80 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
81
82 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
83 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
84                                     struct kvm_cpuid_entry2 __user *entries);
85
86 struct kvm_x86_ops *kvm_x86_ops;
87 EXPORT_SYMBOL_GPL(kvm_x86_ops);
88
89 int ignore_msrs = 0;
90 module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
91
92 #define KVM_NR_SHARED_MSRS 16
93
94 struct kvm_shared_msrs_global {
95         int nr;
96         u32 msrs[KVM_NR_SHARED_MSRS];
97 };
98
99 struct kvm_shared_msrs {
100         struct user_return_notifier urn;
101         bool registered;
102         struct kvm_shared_msr_values {
103                 u64 host;
104                 u64 curr;
105         } values[KVM_NR_SHARED_MSRS];
106 };
107
108 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
109 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
110
111 struct kvm_stats_debugfs_item debugfs_entries[] = {
112         { "pf_fixed", VCPU_STAT(pf_fixed) },
113         { "pf_guest", VCPU_STAT(pf_guest) },
114         { "tlb_flush", VCPU_STAT(tlb_flush) },
115         { "invlpg", VCPU_STAT(invlpg) },
116         { "exits", VCPU_STAT(exits) },
117         { "io_exits", VCPU_STAT(io_exits) },
118         { "mmio_exits", VCPU_STAT(mmio_exits) },
119         { "signal_exits", VCPU_STAT(signal_exits) },
120         { "irq_window", VCPU_STAT(irq_window_exits) },
121         { "nmi_window", VCPU_STAT(nmi_window_exits) },
122         { "halt_exits", VCPU_STAT(halt_exits) },
123         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
124         { "hypercalls", VCPU_STAT(hypercalls) },
125         { "request_irq", VCPU_STAT(request_irq_exits) },
126         { "irq_exits", VCPU_STAT(irq_exits) },
127         { "host_state_reload", VCPU_STAT(host_state_reload) },
128         { "efer_reload", VCPU_STAT(efer_reload) },
129         { "fpu_reload", VCPU_STAT(fpu_reload) },
130         { "insn_emulation", VCPU_STAT(insn_emulation) },
131         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
132         { "irq_injections", VCPU_STAT(irq_injections) },
133         { "nmi_injections", VCPU_STAT(nmi_injections) },
134         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
135         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
136         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
137         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
138         { "mmu_flooded", VM_STAT(mmu_flooded) },
139         { "mmu_recycled", VM_STAT(mmu_recycled) },
140         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
141         { "mmu_unsync", VM_STAT(mmu_unsync) },
142         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
143         { "largepages", VM_STAT(lpages) },
144         { NULL }
145 };
146
147 static void kvm_on_user_return(struct user_return_notifier *urn)
148 {
149         unsigned slot;
150         struct kvm_shared_msrs *locals
151                 = container_of(urn, struct kvm_shared_msrs, urn);
152         struct kvm_shared_msr_values *values;
153
154         for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
155                 values = &locals->values[slot];
156                 if (values->host != values->curr) {
157                         wrmsrl(shared_msrs_global.msrs[slot], values->host);
158                         values->curr = values->host;
159                 }
160         }
161         locals->registered = false;
162         user_return_notifier_unregister(urn);
163 }
164
165 static void shared_msr_update(unsigned slot, u32 msr)
166 {
167         struct kvm_shared_msrs *smsr;
168         u64 value;
169
170         smsr = &__get_cpu_var(shared_msrs);
171         /* only read, and nobody should modify it at this time,
172          * so don't need lock */
173         if (slot >= shared_msrs_global.nr) {
174                 printk(KERN_ERR "kvm: invalid MSR slot!");
175                 return;
176         }
177         rdmsrl_safe(msr, &value);
178         smsr->values[slot].host = value;
179         smsr->values[slot].curr = value;
180 }
181
182 void kvm_define_shared_msr(unsigned slot, u32 msr)
183 {
184         if (slot >= shared_msrs_global.nr)
185                 shared_msrs_global.nr = slot + 1;
186         shared_msrs_global.msrs[slot] = msr;
187         /* we need ensured the shared_msr_global have been updated */
188         smp_wmb();
189 }
190 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
191
192 static void kvm_shared_msr_cpu_online(void)
193 {
194         unsigned i;
195
196         for (i = 0; i < shared_msrs_global.nr; ++i)
197                 shared_msr_update(i, shared_msrs_global.msrs[i]);
198 }
199
200 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
201 {
202         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
203
204         if (((value ^ smsr->values[slot].curr) & mask) == 0)
205                 return;
206         smsr->values[slot].curr = value;
207         wrmsrl(shared_msrs_global.msrs[slot], value);
208         if (!smsr->registered) {
209                 smsr->urn.on_user_return = kvm_on_user_return;
210                 user_return_notifier_register(&smsr->urn);
211                 smsr->registered = true;
212         }
213 }
214 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
215
216 static void drop_user_return_notifiers(void *ignore)
217 {
218         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
219
220         if (smsr->registered)
221                 kvm_on_user_return(&smsr->urn);
222 }
223
224 unsigned long segment_base(u16 selector)
225 {
226         struct descriptor_table gdt;
227         struct desc_struct *d;
228         unsigned long table_base;
229         unsigned long v;
230
231         if (selector == 0)
232                 return 0;
233
234         kvm_get_gdt(&gdt);
235         table_base = gdt.base;
236
237         if (selector & 4) {           /* from ldt */
238                 u16 ldt_selector = kvm_read_ldt();
239
240                 table_base = segment_base(ldt_selector);
241         }
242         d = (struct desc_struct *)(table_base + (selector & ~7));
243         v = get_desc_base(d);
244 #ifdef CONFIG_X86_64
245         if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
246                 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
247 #endif
248         return v;
249 }
250 EXPORT_SYMBOL_GPL(segment_base);
251
252 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
253 {
254         if (irqchip_in_kernel(vcpu->kvm))
255                 return vcpu->arch.apic_base;
256         else
257                 return vcpu->arch.apic_base;
258 }
259 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
260
261 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
262 {
263         /* TODO: reserve bits check */
264         if (irqchip_in_kernel(vcpu->kvm))
265                 kvm_lapic_set_base(vcpu, data);
266         else
267                 vcpu->arch.apic_base = data;
268 }
269 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
270
271 #define EXCPT_BENIGN            0
272 #define EXCPT_CONTRIBUTORY      1
273 #define EXCPT_PF                2
274
275 static int exception_class(int vector)
276 {
277         switch (vector) {
278         case PF_VECTOR:
279                 return EXCPT_PF;
280         case DE_VECTOR:
281         case TS_VECTOR:
282         case NP_VECTOR:
283         case SS_VECTOR:
284         case GP_VECTOR:
285                 return EXCPT_CONTRIBUTORY;
286         default:
287                 break;
288         }
289         return EXCPT_BENIGN;
290 }
291
292 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
293                 unsigned nr, bool has_error, u32 error_code)
294 {
295         u32 prev_nr;
296         int class1, class2;
297
298         if (!vcpu->arch.exception.pending) {
299         queue:
300                 vcpu->arch.exception.pending = true;
301                 vcpu->arch.exception.has_error_code = has_error;
302                 vcpu->arch.exception.nr = nr;
303                 vcpu->arch.exception.error_code = error_code;
304                 return;
305         }
306
307         /* to check exception */
308         prev_nr = vcpu->arch.exception.nr;
309         if (prev_nr == DF_VECTOR) {
310                 /* triple fault -> shutdown */
311                 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
312                 return;
313         }
314         class1 = exception_class(prev_nr);
315         class2 = exception_class(nr);
316         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
317                 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
318                 /* generate double fault per SDM Table 5-5 */
319                 vcpu->arch.exception.pending = true;
320                 vcpu->arch.exception.has_error_code = true;
321                 vcpu->arch.exception.nr = DF_VECTOR;
322                 vcpu->arch.exception.error_code = 0;
323         } else
324                 /* replace previous exception with a new one in a hope
325                    that instruction re-execution will regenerate lost
326                    exception */
327                 goto queue;
328 }
329
330 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
331 {
332         kvm_multiple_exception(vcpu, nr, false, 0);
333 }
334 EXPORT_SYMBOL_GPL(kvm_queue_exception);
335
336 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
337                            u32 error_code)
338 {
339         ++vcpu->stat.pf_guest;
340         vcpu->arch.cr2 = addr;
341         kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
342 }
343
344 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
345 {
346         vcpu->arch.nmi_pending = 1;
347 }
348 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
349
350 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
351 {
352         kvm_multiple_exception(vcpu, nr, true, error_code);
353 }
354 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
355
356 /*
357  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
358  * a #GP and return false.
359  */
360 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
361 {
362         if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
363                 return true;
364         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
365         return false;
366 }
367 EXPORT_SYMBOL_GPL(kvm_require_cpl);
368
369 /*
370  * Load the pae pdptrs.  Return true is they are all valid.
371  */
372 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
373 {
374         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
375         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
376         int i;
377         int ret;
378         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
379
380         ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
381                                   offset * sizeof(u64), sizeof(pdpte));
382         if (ret < 0) {
383                 ret = 0;
384                 goto out;
385         }
386         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
387                 if (is_present_gpte(pdpte[i]) &&
388                     (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
389                         ret = 0;
390                         goto out;
391                 }
392         }
393         ret = 1;
394
395         memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
396         __set_bit(VCPU_EXREG_PDPTR,
397                   (unsigned long *)&vcpu->arch.regs_avail);
398         __set_bit(VCPU_EXREG_PDPTR,
399                   (unsigned long *)&vcpu->arch.regs_dirty);
400 out:
401
402         return ret;
403 }
404 EXPORT_SYMBOL_GPL(load_pdptrs);
405
406 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
407 {
408         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
409         bool changed = true;
410         int r;
411
412         if (is_long_mode(vcpu) || !is_pae(vcpu))
413                 return false;
414
415         if (!test_bit(VCPU_EXREG_PDPTR,
416                       (unsigned long *)&vcpu->arch.regs_avail))
417                 return true;
418
419         r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
420         if (r < 0)
421                 goto out;
422         changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
423 out:
424
425         return changed;
426 }
427
428 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
429 {
430         if (cr0 & CR0_RESERVED_BITS) {
431                 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
432                        cr0, vcpu->arch.cr0);
433                 kvm_inject_gp(vcpu, 0);
434                 return;
435         }
436
437         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
438                 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
439                 kvm_inject_gp(vcpu, 0);
440                 return;
441         }
442
443         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
444                 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
445                        "and a clear PE flag\n");
446                 kvm_inject_gp(vcpu, 0);
447                 return;
448         }
449
450         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
451 #ifdef CONFIG_X86_64
452                 if ((vcpu->arch.shadow_efer & EFER_LME)) {
453                         int cs_db, cs_l;
454
455                         if (!is_pae(vcpu)) {
456                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
457                                        "in long mode while PAE is disabled\n");
458                                 kvm_inject_gp(vcpu, 0);
459                                 return;
460                         }
461                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
462                         if (cs_l) {
463                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
464                                        "in long mode while CS.L == 1\n");
465                                 kvm_inject_gp(vcpu, 0);
466                                 return;
467
468                         }
469                 } else
470 #endif
471                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
472                         printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
473                                "reserved bits\n");
474                         kvm_inject_gp(vcpu, 0);
475                         return;
476                 }
477
478         }
479
480         kvm_x86_ops->set_cr0(vcpu, cr0);
481         vcpu->arch.cr0 = cr0;
482
483         kvm_mmu_reset_context(vcpu);
484         return;
485 }
486 EXPORT_SYMBOL_GPL(kvm_set_cr0);
487
488 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
489 {
490         kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
491 }
492 EXPORT_SYMBOL_GPL(kvm_lmsw);
493
494 void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
495 {
496         unsigned long old_cr4 = kvm_read_cr4(vcpu);
497         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
498
499         if (cr4 & CR4_RESERVED_BITS) {
500                 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
501                 kvm_inject_gp(vcpu, 0);
502                 return;
503         }
504
505         if (is_long_mode(vcpu)) {
506                 if (!(cr4 & X86_CR4_PAE)) {
507                         printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
508                                "in long mode\n");
509                         kvm_inject_gp(vcpu, 0);
510                         return;
511                 }
512         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
513                    && ((cr4 ^ old_cr4) & pdptr_bits)
514                    && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
515                 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
516                 kvm_inject_gp(vcpu, 0);
517                 return;
518         }
519
520         if (cr4 & X86_CR4_VMXE) {
521                 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
522                 kvm_inject_gp(vcpu, 0);
523                 return;
524         }
525         kvm_x86_ops->set_cr4(vcpu, cr4);
526         vcpu->arch.cr4 = cr4;
527         vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
528         kvm_mmu_reset_context(vcpu);
529 }
530 EXPORT_SYMBOL_GPL(kvm_set_cr4);
531
532 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
533 {
534         if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
535                 kvm_mmu_sync_roots(vcpu);
536                 kvm_mmu_flush_tlb(vcpu);
537                 return;
538         }
539
540         if (is_long_mode(vcpu)) {
541                 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
542                         printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
543                         kvm_inject_gp(vcpu, 0);
544                         return;
545                 }
546         } else {
547                 if (is_pae(vcpu)) {
548                         if (cr3 & CR3_PAE_RESERVED_BITS) {
549                                 printk(KERN_DEBUG
550                                        "set_cr3: #GP, reserved bits\n");
551                                 kvm_inject_gp(vcpu, 0);
552                                 return;
553                         }
554                         if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
555                                 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
556                                        "reserved bits\n");
557                                 kvm_inject_gp(vcpu, 0);
558                                 return;
559                         }
560                 }
561                 /*
562                  * We don't check reserved bits in nonpae mode, because
563                  * this isn't enforced, and VMware depends on this.
564                  */
565         }
566
567         /*
568          * Does the new cr3 value map to physical memory? (Note, we
569          * catch an invalid cr3 even in real-mode, because it would
570          * cause trouble later on when we turn on paging anyway.)
571          *
572          * A real CPU would silently accept an invalid cr3 and would
573          * attempt to use it - with largely undefined (and often hard
574          * to debug) behavior on the guest side.
575          */
576         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
577                 kvm_inject_gp(vcpu, 0);
578         else {
579                 vcpu->arch.cr3 = cr3;
580                 vcpu->arch.mmu.new_cr3(vcpu);
581         }
582 }
583 EXPORT_SYMBOL_GPL(kvm_set_cr3);
584
585 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
586 {
587         if (cr8 & CR8_RESERVED_BITS) {
588                 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
589                 kvm_inject_gp(vcpu, 0);
590                 return;
591         }
592         if (irqchip_in_kernel(vcpu->kvm))
593                 kvm_lapic_set_tpr(vcpu, cr8);
594         else
595                 vcpu->arch.cr8 = cr8;
596 }
597 EXPORT_SYMBOL_GPL(kvm_set_cr8);
598
599 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
600 {
601         if (irqchip_in_kernel(vcpu->kvm))
602                 return kvm_lapic_get_cr8(vcpu);
603         else
604                 return vcpu->arch.cr8;
605 }
606 EXPORT_SYMBOL_GPL(kvm_get_cr8);
607
608 static inline u32 bit(int bitno)
609 {
610         return 1 << (bitno & 31);
611 }
612
613 /*
614  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
615  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
616  *
617  * This list is modified at module load time to reflect the
618  * capabilities of the host cpu. This capabilities test skips MSRs that are
619  * kvm-specific. Those are put in the beginning of the list.
620  */
621
622 #define KVM_SAVE_MSRS_BEGIN     2
623 static u32 msrs_to_save[] = {
624         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
625         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
626         MSR_K6_STAR,
627 #ifdef CONFIG_X86_64
628         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
629 #endif
630         MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
631 };
632
633 static unsigned num_msrs_to_save;
634
635 static u32 emulated_msrs[] = {
636         MSR_IA32_MISC_ENABLE,
637 };
638
639 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
640 {
641         if (efer & efer_reserved_bits) {
642                 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
643                        efer);
644                 kvm_inject_gp(vcpu, 0);
645                 return;
646         }
647
648         if (is_paging(vcpu)
649             && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
650                 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
651                 kvm_inject_gp(vcpu, 0);
652                 return;
653         }
654
655         if (efer & EFER_FFXSR) {
656                 struct kvm_cpuid_entry2 *feat;
657
658                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
659                 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
660                         printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
661                         kvm_inject_gp(vcpu, 0);
662                         return;
663                 }
664         }
665
666         if (efer & EFER_SVME) {
667                 struct kvm_cpuid_entry2 *feat;
668
669                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
670                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
671                         printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
672                         kvm_inject_gp(vcpu, 0);
673                         return;
674                 }
675         }
676
677         kvm_x86_ops->set_efer(vcpu, efer);
678
679         efer &= ~EFER_LMA;
680         efer |= vcpu->arch.shadow_efer & EFER_LMA;
681
682         vcpu->arch.shadow_efer = efer;
683
684         vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
685         kvm_mmu_reset_context(vcpu);
686 }
687
688 void kvm_enable_efer_bits(u64 mask)
689 {
690        efer_reserved_bits &= ~mask;
691 }
692 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
693
694
695 /*
696  * Writes msr value into into the appropriate "register".
697  * Returns 0 on success, non-0 otherwise.
698  * Assumes vcpu_load() was already called.
699  */
700 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
701 {
702         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
703 }
704
705 /*
706  * Adapt set_msr() to msr_io()'s calling convention
707  */
708 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
709 {
710         return kvm_set_msr(vcpu, index, *data);
711 }
712
713 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
714 {
715         static int version;
716         struct pvclock_wall_clock wc;
717         struct timespec boot;
718
719         if (!wall_clock)
720                 return;
721
722         version++;
723
724         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
725
726         /*
727          * The guest calculates current wall clock time by adding
728          * system time (updated by kvm_write_guest_time below) to the
729          * wall clock specified here.  guest system time equals host
730          * system time for us, thus we must fill in host boot time here.
731          */
732         getboottime(&boot);
733
734         wc.sec = boot.tv_sec;
735         wc.nsec = boot.tv_nsec;
736         wc.version = version;
737
738         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
739
740         version++;
741         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
742 }
743
744 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
745 {
746         uint32_t quotient, remainder;
747
748         /* Don't try to replace with do_div(), this one calculates
749          * "(dividend << 32) / divisor" */
750         __asm__ ( "divl %4"
751                   : "=a" (quotient), "=d" (remainder)
752                   : "0" (0), "1" (dividend), "r" (divisor) );
753         return quotient;
754 }
755
756 static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
757 {
758         uint64_t nsecs = 1000000000LL;
759         int32_t  shift = 0;
760         uint64_t tps64;
761         uint32_t tps32;
762
763         tps64 = tsc_khz * 1000LL;
764         while (tps64 > nsecs*2) {
765                 tps64 >>= 1;
766                 shift--;
767         }
768
769         tps32 = (uint32_t)tps64;
770         while (tps32 <= (uint32_t)nsecs) {
771                 tps32 <<= 1;
772                 shift++;
773         }
774
775         hv_clock->tsc_shift = shift;
776         hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
777
778         pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
779                  __func__, tsc_khz, hv_clock->tsc_shift,
780                  hv_clock->tsc_to_system_mul);
781 }
782
783 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
784
785 static void kvm_write_guest_time(struct kvm_vcpu *v)
786 {
787         struct timespec ts;
788         unsigned long flags;
789         struct kvm_vcpu_arch *vcpu = &v->arch;
790         void *shared_kaddr;
791         unsigned long this_tsc_khz;
792
793         if ((!vcpu->time_page))
794                 return;
795
796         this_tsc_khz = get_cpu_var(cpu_tsc_khz);
797         if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
798                 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
799                 vcpu->hv_clock_tsc_khz = this_tsc_khz;
800         }
801         put_cpu_var(cpu_tsc_khz);
802
803         /* Keep irq disabled to prevent changes to the clock */
804         local_irq_save(flags);
805         kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
806         ktime_get_ts(&ts);
807         monotonic_to_bootbased(&ts);
808         local_irq_restore(flags);
809
810         /* With all the info we got, fill in the values */
811
812         vcpu->hv_clock.system_time = ts.tv_nsec +
813                                      (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
814
815         /*
816          * The interface expects us to write an even number signaling that the
817          * update is finished. Since the guest won't see the intermediate
818          * state, we just increase by 2 at the end.
819          */
820         vcpu->hv_clock.version += 2;
821
822         shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
823
824         memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
825                sizeof(vcpu->hv_clock));
826
827         kunmap_atomic(shared_kaddr, KM_USER0);
828
829         mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
830 }
831
832 static int kvm_request_guest_time_update(struct kvm_vcpu *v)
833 {
834         struct kvm_vcpu_arch *vcpu = &v->arch;
835
836         if (!vcpu->time_page)
837                 return 0;
838         set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
839         return 1;
840 }
841
842 static bool msr_mtrr_valid(unsigned msr)
843 {
844         switch (msr) {
845         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
846         case MSR_MTRRfix64K_00000:
847         case MSR_MTRRfix16K_80000:
848         case MSR_MTRRfix16K_A0000:
849         case MSR_MTRRfix4K_C0000:
850         case MSR_MTRRfix4K_C8000:
851         case MSR_MTRRfix4K_D0000:
852         case MSR_MTRRfix4K_D8000:
853         case MSR_MTRRfix4K_E0000:
854         case MSR_MTRRfix4K_E8000:
855         case MSR_MTRRfix4K_F0000:
856         case MSR_MTRRfix4K_F8000:
857         case MSR_MTRRdefType:
858         case MSR_IA32_CR_PAT:
859                 return true;
860         case 0x2f8:
861                 return true;
862         }
863         return false;
864 }
865
866 static bool valid_pat_type(unsigned t)
867 {
868         return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
869 }
870
871 static bool valid_mtrr_type(unsigned t)
872 {
873         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
874 }
875
876 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
877 {
878         int i;
879
880         if (!msr_mtrr_valid(msr))
881                 return false;
882
883         if (msr == MSR_IA32_CR_PAT) {
884                 for (i = 0; i < 8; i++)
885                         if (!valid_pat_type((data >> (i * 8)) & 0xff))
886                                 return false;
887                 return true;
888         } else if (msr == MSR_MTRRdefType) {
889                 if (data & ~0xcff)
890                         return false;
891                 return valid_mtrr_type(data & 0xff);
892         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
893                 for (i = 0; i < 8 ; i++)
894                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
895                                 return false;
896                 return true;
897         }
898
899         /* variable MTRRs */
900         return valid_mtrr_type(data & 0xff);
901 }
902
903 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
904 {
905         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
906
907         if (!mtrr_valid(vcpu, msr, data))
908                 return 1;
909
910         if (msr == MSR_MTRRdefType) {
911                 vcpu->arch.mtrr_state.def_type = data;
912                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
913         } else if (msr == MSR_MTRRfix64K_00000)
914                 p[0] = data;
915         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
916                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
917         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
918                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
919         else if (msr == MSR_IA32_CR_PAT)
920                 vcpu->arch.pat = data;
921         else {  /* Variable MTRRs */
922                 int idx, is_mtrr_mask;
923                 u64 *pt;
924
925                 idx = (msr - 0x200) / 2;
926                 is_mtrr_mask = msr - 0x200 - 2 * idx;
927                 if (!is_mtrr_mask)
928                         pt =
929                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
930                 else
931                         pt =
932                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
933                 *pt = data;
934         }
935
936         kvm_mmu_reset_context(vcpu);
937         return 0;
938 }
939
940 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
941 {
942         u64 mcg_cap = vcpu->arch.mcg_cap;
943         unsigned bank_num = mcg_cap & 0xff;
944
945         switch (msr) {
946         case MSR_IA32_MCG_STATUS:
947                 vcpu->arch.mcg_status = data;
948                 break;
949         case MSR_IA32_MCG_CTL:
950                 if (!(mcg_cap & MCG_CTL_P))
951                         return 1;
952                 if (data != 0 && data != ~(u64)0)
953                         return -1;
954                 vcpu->arch.mcg_ctl = data;
955                 break;
956         default:
957                 if (msr >= MSR_IA32_MC0_CTL &&
958                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
959                         u32 offset = msr - MSR_IA32_MC0_CTL;
960                         /* only 0 or all 1s can be written to IA32_MCi_CTL */
961                         if ((offset & 0x3) == 0 &&
962                             data != 0 && data != ~(u64)0)
963                                 return -1;
964                         vcpu->arch.mce_banks[offset] = data;
965                         break;
966                 }
967                 return 1;
968         }
969         return 0;
970 }
971
972 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
973 {
974         struct kvm *kvm = vcpu->kvm;
975         int lm = is_long_mode(vcpu);
976         u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
977                 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
978         u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
979                 : kvm->arch.xen_hvm_config.blob_size_32;
980         u32 page_num = data & ~PAGE_MASK;
981         u64 page_addr = data & PAGE_MASK;
982         u8 *page;
983         int r;
984
985         r = -E2BIG;
986         if (page_num >= blob_size)
987                 goto out;
988         r = -ENOMEM;
989         page = kzalloc(PAGE_SIZE, GFP_KERNEL);
990         if (!page)
991                 goto out;
992         r = -EFAULT;
993         if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
994                 goto out_free;
995         if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
996                 goto out_free;
997         r = 0;
998 out_free:
999         kfree(page);
1000 out:
1001         return r;
1002 }
1003
1004 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1005 {
1006         switch (msr) {
1007         case MSR_EFER:
1008                 set_efer(vcpu, data);
1009                 break;
1010         case MSR_K7_HWCR:
1011                 data &= ~(u64)0x40;     /* ignore flush filter disable */
1012                 if (data != 0) {
1013                         pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1014                                 data);
1015                         return 1;
1016                 }
1017                 break;
1018         case MSR_FAM10H_MMIO_CONF_BASE:
1019                 if (data != 0) {
1020                         pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1021                                 "0x%llx\n", data);
1022                         return 1;
1023                 }
1024                 break;
1025         case MSR_AMD64_NB_CFG:
1026                 break;
1027         case MSR_IA32_DEBUGCTLMSR:
1028                 if (!data) {
1029                         /* We support the non-activated case already */
1030                         break;
1031                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1032                         /* Values other than LBR and BTF are vendor-specific,
1033                            thus reserved and should throw a #GP */
1034                         return 1;
1035                 }
1036                 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1037                         __func__, data);
1038                 break;
1039         case MSR_IA32_UCODE_REV:
1040         case MSR_IA32_UCODE_WRITE:
1041         case MSR_VM_HSAVE_PA:
1042         case MSR_AMD64_PATCH_LOADER:
1043                 break;
1044         case 0x200 ... 0x2ff:
1045                 return set_msr_mtrr(vcpu, msr, data);
1046         case MSR_IA32_APICBASE:
1047                 kvm_set_apic_base(vcpu, data);
1048                 break;
1049         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1050                 return kvm_x2apic_msr_write(vcpu, msr, data);
1051         case MSR_IA32_MISC_ENABLE:
1052                 vcpu->arch.ia32_misc_enable_msr = data;
1053                 break;
1054         case MSR_KVM_WALL_CLOCK:
1055                 vcpu->kvm->arch.wall_clock = data;
1056                 kvm_write_wall_clock(vcpu->kvm, data);
1057                 break;
1058         case MSR_KVM_SYSTEM_TIME: {
1059                 if (vcpu->arch.time_page) {
1060                         kvm_release_page_dirty(vcpu->arch.time_page);
1061                         vcpu->arch.time_page = NULL;
1062                 }
1063
1064                 vcpu->arch.time = data;
1065
1066                 /* we verify if the enable bit is set... */
1067                 if (!(data & 1))
1068                         break;
1069
1070                 /* ...but clean it before doing the actual write */
1071                 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1072
1073                 vcpu->arch.time_page =
1074                                 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
1075
1076                 if (is_error_page(vcpu->arch.time_page)) {
1077                         kvm_release_page_clean(vcpu->arch.time_page);
1078                         vcpu->arch.time_page = NULL;
1079                 }
1080
1081                 kvm_request_guest_time_update(vcpu);
1082                 break;
1083         }
1084         case MSR_IA32_MCG_CTL:
1085         case MSR_IA32_MCG_STATUS:
1086         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1087                 return set_msr_mce(vcpu, msr, data);
1088
1089         /* Performance counters are not protected by a CPUID bit,
1090          * so we should check all of them in the generic path for the sake of
1091          * cross vendor migration.
1092          * Writing a zero into the event select MSRs disables them,
1093          * which we perfectly emulate ;-). Any other value should be at least
1094          * reported, some guests depend on them.
1095          */
1096         case MSR_P6_EVNTSEL0:
1097         case MSR_P6_EVNTSEL1:
1098         case MSR_K7_EVNTSEL0:
1099         case MSR_K7_EVNTSEL1:
1100         case MSR_K7_EVNTSEL2:
1101         case MSR_K7_EVNTSEL3:
1102                 if (data != 0)
1103                         pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1104                                 "0x%x data 0x%llx\n", msr, data);
1105                 break;
1106         /* at least RHEL 4 unconditionally writes to the perfctr registers,
1107          * so we ignore writes to make it happy.
1108          */
1109         case MSR_P6_PERFCTR0:
1110         case MSR_P6_PERFCTR1:
1111         case MSR_K7_PERFCTR0:
1112         case MSR_K7_PERFCTR1:
1113         case MSR_K7_PERFCTR2:
1114         case MSR_K7_PERFCTR3:
1115                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1116                         "0x%x data 0x%llx\n", msr, data);
1117                 break;
1118         default:
1119                 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1120                         return xen_hvm_config(vcpu, data);
1121                 if (!ignore_msrs) {
1122                         pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1123                                 msr, data);
1124                         return 1;
1125                 } else {
1126                         pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1127                                 msr, data);
1128                         break;
1129                 }
1130         }
1131         return 0;
1132 }
1133 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1134
1135
1136 /*
1137  * Reads an msr value (of 'msr_index') into 'pdata'.
1138  * Returns 0 on success, non-0 otherwise.
1139  * Assumes vcpu_load() was already called.
1140  */
1141 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1142 {
1143         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1144 }
1145
1146 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1147 {
1148         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1149
1150         if (!msr_mtrr_valid(msr))
1151                 return 1;
1152
1153         if (msr == MSR_MTRRdefType)
1154                 *pdata = vcpu->arch.mtrr_state.def_type +
1155                          (vcpu->arch.mtrr_state.enabled << 10);
1156         else if (msr == MSR_MTRRfix64K_00000)
1157                 *pdata = p[0];
1158         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1159                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1160         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1161                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1162         else if (msr == MSR_IA32_CR_PAT)
1163                 *pdata = vcpu->arch.pat;
1164         else {  /* Variable MTRRs */
1165                 int idx, is_mtrr_mask;
1166                 u64 *pt;
1167
1168                 idx = (msr - 0x200) / 2;
1169                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1170                 if (!is_mtrr_mask)
1171                         pt =
1172                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1173                 else
1174                         pt =
1175                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1176                 *pdata = *pt;
1177         }
1178
1179         return 0;
1180 }
1181
1182 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1183 {
1184         u64 data;
1185         u64 mcg_cap = vcpu->arch.mcg_cap;
1186         unsigned bank_num = mcg_cap & 0xff;
1187
1188         switch (msr) {
1189         case MSR_IA32_P5_MC_ADDR:
1190         case MSR_IA32_P5_MC_TYPE:
1191                 data = 0;
1192                 break;
1193         case MSR_IA32_MCG_CAP:
1194                 data = vcpu->arch.mcg_cap;
1195                 break;
1196         case MSR_IA32_MCG_CTL:
1197                 if (!(mcg_cap & MCG_CTL_P))
1198                         return 1;
1199                 data = vcpu->arch.mcg_ctl;
1200                 break;
1201         case MSR_IA32_MCG_STATUS:
1202                 data = vcpu->arch.mcg_status;
1203                 break;
1204         default:
1205                 if (msr >= MSR_IA32_MC0_CTL &&
1206                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1207                         u32 offset = msr - MSR_IA32_MC0_CTL;
1208                         data = vcpu->arch.mce_banks[offset];
1209                         break;
1210                 }
1211                 return 1;
1212         }
1213         *pdata = data;
1214         return 0;
1215 }
1216
1217 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1218 {
1219         u64 data;
1220
1221         switch (msr) {
1222         case MSR_IA32_PLATFORM_ID:
1223         case MSR_IA32_UCODE_REV:
1224         case MSR_IA32_EBL_CR_POWERON:
1225         case MSR_IA32_DEBUGCTLMSR:
1226         case MSR_IA32_LASTBRANCHFROMIP:
1227         case MSR_IA32_LASTBRANCHTOIP:
1228         case MSR_IA32_LASTINTFROMIP:
1229         case MSR_IA32_LASTINTTOIP:
1230         case MSR_K8_SYSCFG:
1231         case MSR_K7_HWCR:
1232         case MSR_VM_HSAVE_PA:
1233         case MSR_P6_PERFCTR0:
1234         case MSR_P6_PERFCTR1:
1235         case MSR_P6_EVNTSEL0:
1236         case MSR_P6_EVNTSEL1:
1237         case MSR_K7_EVNTSEL0:
1238         case MSR_K7_PERFCTR0:
1239         case MSR_K8_INT_PENDING_MSG:
1240         case MSR_AMD64_NB_CFG:
1241         case MSR_FAM10H_MMIO_CONF_BASE:
1242                 data = 0;
1243                 break;
1244         case MSR_MTRRcap:
1245                 data = 0x500 | KVM_NR_VAR_MTRR;
1246                 break;
1247         case 0x200 ... 0x2ff:
1248                 return get_msr_mtrr(vcpu, msr, pdata);
1249         case 0xcd: /* fsb frequency */
1250                 data = 3;
1251                 break;
1252         case MSR_IA32_APICBASE:
1253                 data = kvm_get_apic_base(vcpu);
1254                 break;
1255         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1256                 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1257                 break;
1258         case MSR_IA32_MISC_ENABLE:
1259                 data = vcpu->arch.ia32_misc_enable_msr;
1260                 break;
1261         case MSR_IA32_PERF_STATUS:
1262                 /* TSC increment by tick */
1263                 data = 1000ULL;
1264                 /* CPU multiplier */
1265                 data |= (((uint64_t)4ULL) << 40);
1266                 break;
1267         case MSR_EFER:
1268                 data = vcpu->arch.shadow_efer;
1269                 break;
1270         case MSR_KVM_WALL_CLOCK:
1271                 data = vcpu->kvm->arch.wall_clock;
1272                 break;
1273         case MSR_KVM_SYSTEM_TIME:
1274                 data = vcpu->arch.time;
1275                 break;
1276         case MSR_IA32_P5_MC_ADDR:
1277         case MSR_IA32_P5_MC_TYPE:
1278         case MSR_IA32_MCG_CAP:
1279         case MSR_IA32_MCG_CTL:
1280         case MSR_IA32_MCG_STATUS:
1281         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1282                 return get_msr_mce(vcpu, msr, pdata);
1283         default:
1284                 if (!ignore_msrs) {
1285                         pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1286                         return 1;
1287                 } else {
1288                         pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1289                         data = 0;
1290                 }
1291                 break;
1292         }
1293         *pdata = data;
1294         return 0;
1295 }
1296 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1297
1298 /*
1299  * Read or write a bunch of msrs. All parameters are kernel addresses.
1300  *
1301  * @return number of msrs set successfully.
1302  */
1303 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1304                     struct kvm_msr_entry *entries,
1305                     int (*do_msr)(struct kvm_vcpu *vcpu,
1306                                   unsigned index, u64 *data))
1307 {
1308         int i;
1309
1310         vcpu_load(vcpu);
1311
1312         down_read(&vcpu->kvm->slots_lock);
1313         for (i = 0; i < msrs->nmsrs; ++i)
1314                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1315                         break;
1316         up_read(&vcpu->kvm->slots_lock);
1317
1318         vcpu_put(vcpu);
1319
1320         return i;
1321 }
1322
1323 /*
1324  * Read or write a bunch of msrs. Parameters are user addresses.
1325  *
1326  * @return number of msrs set successfully.
1327  */
1328 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1329                   int (*do_msr)(struct kvm_vcpu *vcpu,
1330                                 unsigned index, u64 *data),
1331                   int writeback)
1332 {
1333         struct kvm_msrs msrs;
1334         struct kvm_msr_entry *entries;
1335         int r, n;
1336         unsigned size;
1337
1338         r = -EFAULT;
1339         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1340                 goto out;
1341
1342         r = -E2BIG;
1343         if (msrs.nmsrs >= MAX_IO_MSRS)
1344                 goto out;
1345
1346         r = -ENOMEM;
1347         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1348         entries = vmalloc(size);
1349         if (!entries)
1350                 goto out;
1351
1352         r = -EFAULT;
1353         if (copy_from_user(entries, user_msrs->entries, size))
1354                 goto out_free;
1355
1356         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1357         if (r < 0)
1358                 goto out_free;
1359
1360         r = -EFAULT;
1361         if (writeback && copy_to_user(user_msrs->entries, entries, size))
1362                 goto out_free;
1363
1364         r = n;
1365
1366 out_free:
1367         vfree(entries);
1368 out:
1369         return r;
1370 }
1371
1372 int kvm_dev_ioctl_check_extension(long ext)
1373 {
1374         int r;
1375
1376         switch (ext) {
1377         case KVM_CAP_IRQCHIP:
1378         case KVM_CAP_HLT:
1379         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
1380         case KVM_CAP_SET_TSS_ADDR:
1381         case KVM_CAP_EXT_CPUID:
1382         case KVM_CAP_CLOCKSOURCE:
1383         case KVM_CAP_PIT:
1384         case KVM_CAP_NOP_IO_DELAY:
1385         case KVM_CAP_MP_STATE:
1386         case KVM_CAP_SYNC_MMU:
1387         case KVM_CAP_REINJECT_CONTROL:
1388         case KVM_CAP_IRQ_INJECT_STATUS:
1389         case KVM_CAP_ASSIGN_DEV_IRQ:
1390         case KVM_CAP_IRQFD:
1391         case KVM_CAP_IOEVENTFD:
1392         case KVM_CAP_PIT2:
1393         case KVM_CAP_PIT_STATE2:
1394         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1395         case KVM_CAP_XEN_HVM:
1396         case KVM_CAP_ADJUST_CLOCK:
1397         case KVM_CAP_VCPU_EVENTS:
1398                 r = 1;
1399                 break;
1400         case KVM_CAP_COALESCED_MMIO:
1401                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1402                 break;
1403         case KVM_CAP_VAPIC:
1404                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1405                 break;
1406         case KVM_CAP_NR_VCPUS:
1407                 r = KVM_MAX_VCPUS;
1408                 break;
1409         case KVM_CAP_NR_MEMSLOTS:
1410                 r = KVM_MEMORY_SLOTS;
1411                 break;
1412         case KVM_CAP_PV_MMU:    /* obsolete */
1413                 r = 0;
1414                 break;
1415         case KVM_CAP_IOMMU:
1416                 r = iommu_found();
1417                 break;
1418         case KVM_CAP_MCE:
1419                 r = KVM_MAX_MCE_BANKS;
1420                 break;
1421         default:
1422                 r = 0;
1423                 break;
1424         }
1425         return r;
1426
1427 }
1428
1429 long kvm_arch_dev_ioctl(struct file *filp,
1430                         unsigned int ioctl, unsigned long arg)
1431 {
1432         void __user *argp = (void __user *)arg;
1433         long r;
1434
1435         switch (ioctl) {
1436         case KVM_GET_MSR_INDEX_LIST: {
1437                 struct kvm_msr_list __user *user_msr_list = argp;
1438                 struct kvm_msr_list msr_list;
1439                 unsigned n;
1440
1441                 r = -EFAULT;
1442                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1443                         goto out;
1444                 n = msr_list.nmsrs;
1445                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1446                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1447                         goto out;
1448                 r = -E2BIG;
1449                 if (n < msr_list.nmsrs)
1450                         goto out;
1451                 r = -EFAULT;
1452                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1453                                  num_msrs_to_save * sizeof(u32)))
1454                         goto out;
1455                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
1456                                  &emulated_msrs,
1457                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1458                         goto out;
1459                 r = 0;
1460                 break;
1461         }
1462         case KVM_GET_SUPPORTED_CPUID: {
1463                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1464                 struct kvm_cpuid2 cpuid;
1465
1466                 r = -EFAULT;
1467                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1468                         goto out;
1469                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
1470                                                       cpuid_arg->entries);
1471                 if (r)
1472                         goto out;
1473
1474                 r = -EFAULT;
1475                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1476                         goto out;
1477                 r = 0;
1478                 break;
1479         }
1480         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1481                 u64 mce_cap;
1482
1483                 mce_cap = KVM_MCE_CAP_SUPPORTED;
1484                 r = -EFAULT;
1485                 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1486                         goto out;
1487                 r = 0;
1488                 break;
1489         }
1490         default:
1491                 r = -EINVAL;
1492         }
1493 out:
1494         return r;
1495 }
1496
1497 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1498 {
1499         kvm_x86_ops->vcpu_load(vcpu, cpu);
1500         if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
1501                 unsigned long khz = cpufreq_quick_get(cpu);
1502                 if (!khz)
1503                         khz = tsc_khz;
1504                 per_cpu(cpu_tsc_khz, cpu) = khz;
1505         }
1506         kvm_request_guest_time_update(vcpu);
1507 }
1508
1509 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1510 {
1511         kvm_x86_ops->vcpu_put(vcpu);
1512         kvm_put_guest_fpu(vcpu);
1513 }
1514
1515 static int is_efer_nx(void)
1516 {
1517         unsigned long long efer = 0;
1518
1519         rdmsrl_safe(MSR_EFER, &efer);
1520         return efer & EFER_NX;
1521 }
1522
1523 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1524 {
1525         int i;
1526         struct kvm_cpuid_entry2 *e, *entry;
1527
1528         entry = NULL;
1529         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1530                 e = &vcpu->arch.cpuid_entries[i];
1531                 if (e->function == 0x80000001) {
1532                         entry = e;
1533                         break;
1534                 }
1535         }
1536         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
1537                 entry->edx &= ~(1 << 20);
1538                 printk(KERN_INFO "kvm: guest NX capability removed\n");
1539         }
1540 }
1541
1542 /* when an old userspace process fills a new kernel module */
1543 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1544                                     struct kvm_cpuid *cpuid,
1545                                     struct kvm_cpuid_entry __user *entries)
1546 {
1547         int r, i;
1548         struct kvm_cpuid_entry *cpuid_entries;
1549
1550         r = -E2BIG;
1551         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1552                 goto out;
1553         r = -ENOMEM;
1554         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1555         if (!cpuid_entries)
1556                 goto out;
1557         r = -EFAULT;
1558         if (copy_from_user(cpuid_entries, entries,
1559                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1560                 goto out_free;
1561         for (i = 0; i < cpuid->nent; i++) {
1562                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1563                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1564                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1565                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1566                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1567                 vcpu->arch.cpuid_entries[i].index = 0;
1568                 vcpu->arch.cpuid_entries[i].flags = 0;
1569                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1570                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1571                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1572         }
1573         vcpu->arch.cpuid_nent = cpuid->nent;
1574         cpuid_fix_nx_cap(vcpu);
1575         r = 0;
1576         kvm_apic_set_version(vcpu);
1577         kvm_x86_ops->cpuid_update(vcpu);
1578
1579 out_free:
1580         vfree(cpuid_entries);
1581 out:
1582         return r;
1583 }
1584
1585 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1586                                      struct kvm_cpuid2 *cpuid,
1587                                      struct kvm_cpuid_entry2 __user *entries)
1588 {
1589         int r;
1590
1591         r = -E2BIG;
1592         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1593                 goto out;
1594         r = -EFAULT;
1595         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
1596                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
1597                 goto out;
1598         vcpu->arch.cpuid_nent = cpuid->nent;
1599         kvm_apic_set_version(vcpu);
1600         kvm_x86_ops->cpuid_update(vcpu);
1601         return 0;
1602
1603 out:
1604         return r;
1605 }
1606
1607 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1608                                      struct kvm_cpuid2 *cpuid,
1609                                      struct kvm_cpuid_entry2 __user *entries)
1610 {
1611         int r;
1612
1613         r = -E2BIG;
1614         if (cpuid->nent < vcpu->arch.cpuid_nent)
1615                 goto out;
1616         r = -EFAULT;
1617         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1618                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
1619                 goto out;
1620         return 0;
1621
1622 out:
1623         cpuid->nent = vcpu->arch.cpuid_nent;
1624         return r;
1625 }
1626
1627 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1628                            u32 index)
1629 {
1630         entry->function = function;
1631         entry->index = index;
1632         cpuid_count(entry->function, entry->index,
1633                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1634         entry->flags = 0;
1635 }
1636
1637 #define F(x) bit(X86_FEATURE_##x)
1638
1639 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1640                          u32 index, int *nent, int maxnent)
1641 {
1642         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
1643         unsigned f_gbpages = kvm_x86_ops->gb_page_enable() ? F(GBPAGES) : 0;
1644 #ifdef CONFIG_X86_64
1645         unsigned f_lm = F(LM);
1646 #else
1647         unsigned f_lm = 0;
1648 #endif
1649         unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
1650
1651         /* cpuid 1.edx */
1652         const u32 kvm_supported_word0_x86_features =
1653                 F(FPU) | F(VME) | F(DE) | F(PSE) |
1654                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1655                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1656                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1657                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1658                 0 /* Reserved, DS, ACPI */ | F(MMX) |
1659                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1660                 0 /* HTT, TM, Reserved, PBE */;
1661         /* cpuid 0x80000001.edx */
1662         const u32 kvm_supported_word1_x86_features =
1663                 F(FPU) | F(VME) | F(DE) | F(PSE) |
1664                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1665                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1666                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1667                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1668                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1669                 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
1670                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1671         /* cpuid 1.ecx */
1672         const u32 kvm_supported_word4_x86_features =
1673                 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1674                 0 /* DS-CPL, VMX, SMX, EST */ |
1675                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1676                 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1677                 0 /* Reserved, DCA */ | F(XMM4_1) |
1678                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
1679                 0 /* Reserved, XSAVE, OSXSAVE */;
1680         /* cpuid 0x80000001.ecx */
1681         const u32 kvm_supported_word6_x86_features =
1682                 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1683                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1684                 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1685                 0 /* SKINIT */ | 0 /* WDT */;
1686
1687         /* all calls to cpuid_count() should be made on the same cpu */
1688         get_cpu();
1689         do_cpuid_1_ent(entry, function, index);
1690         ++*nent;
1691
1692         switch (function) {
1693         case 0:
1694                 entry->eax = min(entry->eax, (u32)0xb);
1695                 break;
1696         case 1:
1697                 entry->edx &= kvm_supported_word0_x86_features;
1698                 entry->ecx &= kvm_supported_word4_x86_features;
1699                 /* we support x2apic emulation even if host does not support
1700                  * it since we emulate x2apic in software */
1701                 entry->ecx |= F(X2APIC);
1702                 break;
1703         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1704          * may return different values. This forces us to get_cpu() before
1705          * issuing the first command, and also to emulate this annoying behavior
1706          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1707         case 2: {
1708                 int t, times = entry->eax & 0xff;
1709
1710                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1711                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
1712                 for (t = 1; t < times && *nent < maxnent; ++t) {
1713                         do_cpuid_1_ent(&entry[t], function, 0);
1714                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1715                         ++*nent;
1716                 }
1717                 break;
1718         }
1719         /* function 4 and 0xb have additional index. */
1720         case 4: {
1721                 int i, cache_type;
1722
1723                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1724                 /* read more entries until cache_type is zero */
1725                 for (i = 1; *nent < maxnent; ++i) {
1726                         cache_type = entry[i - 1].eax & 0x1f;
1727                         if (!cache_type)
1728                                 break;
1729                         do_cpuid_1_ent(&entry[i], function, i);
1730                         entry[i].flags |=
1731                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1732                         ++*nent;
1733                 }
1734                 break;
1735         }
1736         case 0xb: {
1737                 int i, level_type;
1738
1739                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1740                 /* read more entries until level_type is zero */
1741                 for (i = 1; *nent < maxnent; ++i) {
1742                         level_type = entry[i - 1].ecx & 0xff00;
1743                         if (!level_type)
1744                                 break;
1745                         do_cpuid_1_ent(&entry[i], function, i);
1746                         entry[i].flags |=
1747                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1748                         ++*nent;
1749                 }
1750                 break;
1751         }
1752         case 0x80000000:
1753                 entry->eax = min(entry->eax, 0x8000001a);
1754                 break;
1755         case 0x80000001:
1756                 entry->edx &= kvm_supported_word1_x86_features;
1757                 entry->ecx &= kvm_supported_word6_x86_features;
1758                 break;
1759         }
1760         put_cpu();
1761 }
1762
1763 #undef F
1764
1765 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1766                                      struct kvm_cpuid_entry2 __user *entries)
1767 {
1768         struct kvm_cpuid_entry2 *cpuid_entries;
1769         int limit, nent = 0, r = -E2BIG;
1770         u32 func;
1771
1772         if (cpuid->nent < 1)
1773                 goto out;
1774         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1775                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1776         r = -ENOMEM;
1777         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1778         if (!cpuid_entries)
1779                 goto out;
1780
1781         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1782         limit = cpuid_entries[0].eax;
1783         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1784                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1785                              &nent, cpuid->nent);
1786         r = -E2BIG;
1787         if (nent >= cpuid->nent)
1788                 goto out_free;
1789
1790         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1791         limit = cpuid_entries[nent - 1].eax;
1792         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1793                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1794                              &nent, cpuid->nent);
1795         r = -E2BIG;
1796         if (nent >= cpuid->nent)
1797                 goto out_free;
1798
1799         r = -EFAULT;
1800         if (copy_to_user(entries, cpuid_entries,
1801                          nent * sizeof(struct kvm_cpuid_entry2)))
1802                 goto out_free;
1803         cpuid->nent = nent;
1804         r = 0;
1805
1806 out_free:
1807         vfree(cpuid_entries);
1808 out:
1809         return r;
1810 }
1811
1812 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1813                                     struct kvm_lapic_state *s)
1814 {
1815         vcpu_load(vcpu);
1816         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
1817         vcpu_put(vcpu);
1818
1819         return 0;
1820 }
1821
1822 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1823                                     struct kvm_lapic_state *s)
1824 {
1825         vcpu_load(vcpu);
1826         memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1827         kvm_apic_post_state_restore(vcpu);
1828         update_cr8_intercept(vcpu);
1829         vcpu_put(vcpu);
1830
1831         return 0;
1832 }
1833
1834 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1835                                     struct kvm_interrupt *irq)
1836 {
1837         if (irq->irq < 0 || irq->irq >= 256)
1838                 return -EINVAL;
1839         if (irqchip_in_kernel(vcpu->kvm))
1840                 return -ENXIO;
1841         vcpu_load(vcpu);
1842
1843         kvm_queue_interrupt(vcpu, irq->irq, false);
1844
1845         vcpu_put(vcpu);
1846
1847         return 0;
1848 }
1849
1850 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1851 {
1852         vcpu_load(vcpu);
1853         kvm_inject_nmi(vcpu);
1854         vcpu_put(vcpu);
1855
1856         return 0;
1857 }
1858
1859 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1860                                            struct kvm_tpr_access_ctl *tac)
1861 {
1862         if (tac->flags)
1863                 return -EINVAL;
1864         vcpu->arch.tpr_access_reporting = !!tac->enabled;
1865         return 0;
1866 }
1867
1868 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1869                                         u64 mcg_cap)
1870 {
1871         int r;
1872         unsigned bank_num = mcg_cap & 0xff, bank;
1873
1874         r = -EINVAL;
1875         if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
1876                 goto out;
1877         if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
1878                 goto out;
1879         r = 0;
1880         vcpu->arch.mcg_cap = mcg_cap;
1881         /* Init IA32_MCG_CTL to all 1s */
1882         if (mcg_cap & MCG_CTL_P)
1883                 vcpu->arch.mcg_ctl = ~(u64)0;
1884         /* Init IA32_MCi_CTL to all 1s */
1885         for (bank = 0; bank < bank_num; bank++)
1886                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
1887 out:
1888         return r;
1889 }
1890
1891 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
1892                                       struct kvm_x86_mce *mce)
1893 {
1894         u64 mcg_cap = vcpu->arch.mcg_cap;
1895         unsigned bank_num = mcg_cap & 0xff;
1896         u64 *banks = vcpu->arch.mce_banks;
1897
1898         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
1899                 return -EINVAL;
1900         /*
1901          * if IA32_MCG_CTL is not all 1s, the uncorrected error
1902          * reporting is disabled
1903          */
1904         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1905             vcpu->arch.mcg_ctl != ~(u64)0)
1906                 return 0;
1907         banks += 4 * mce->bank;
1908         /*
1909          * if IA32_MCi_CTL is not all 1s, the uncorrected error
1910          * reporting is disabled for the bank
1911          */
1912         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
1913                 return 0;
1914         if (mce->status & MCI_STATUS_UC) {
1915                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
1916                     !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
1917                         printk(KERN_DEBUG "kvm: set_mce: "
1918                                "injects mce exception while "
1919                                "previous one is in progress!\n");
1920                         set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
1921                         return 0;
1922                 }
1923                 if (banks[1] & MCI_STATUS_VAL)
1924                         mce->status |= MCI_STATUS_OVER;
1925                 banks[2] = mce->addr;
1926                 banks[3] = mce->misc;
1927                 vcpu->arch.mcg_status = mce->mcg_status;
1928                 banks[1] = mce->status;
1929                 kvm_queue_exception(vcpu, MC_VECTOR);
1930         } else if (!(banks[1] & MCI_STATUS_VAL)
1931                    || !(banks[1] & MCI_STATUS_UC)) {
1932                 if (banks[1] & MCI_STATUS_VAL)
1933                         mce->status |= MCI_STATUS_OVER;
1934                 banks[2] = mce->addr;
1935                 banks[3] = mce->misc;
1936                 banks[1] = mce->status;
1937         } else
1938                 banks[1] |= MCI_STATUS_OVER;
1939         return 0;
1940 }
1941
1942 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
1943                                                struct kvm_vcpu_events *events)
1944 {
1945         vcpu_load(vcpu);
1946
1947         events->exception.injected = vcpu->arch.exception.pending;
1948         events->exception.nr = vcpu->arch.exception.nr;
1949         events->exception.has_error_code = vcpu->arch.exception.has_error_code;
1950         events->exception.error_code = vcpu->arch.exception.error_code;
1951
1952         events->interrupt.injected = vcpu->arch.interrupt.pending;
1953         events->interrupt.nr = vcpu->arch.interrupt.nr;
1954         events->interrupt.soft = vcpu->arch.interrupt.soft;
1955
1956         events->nmi.injected = vcpu->arch.nmi_injected;
1957         events->nmi.pending = vcpu->arch.nmi_pending;
1958         events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
1959
1960         events->sipi_vector = vcpu->arch.sipi_vector;
1961
1962         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
1963                          | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
1964
1965         vcpu_put(vcpu);
1966 }
1967
1968 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
1969                                               struct kvm_vcpu_events *events)
1970 {
1971         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
1972                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
1973                 return -EINVAL;
1974
1975         vcpu_load(vcpu);
1976
1977         vcpu->arch.exception.pending = events->exception.injected;
1978         vcpu->arch.exception.nr = events->exception.nr;
1979         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
1980         vcpu->arch.exception.error_code = events->exception.error_code;
1981
1982         vcpu->arch.interrupt.pending = events->interrupt.injected;
1983         vcpu->arch.interrupt.nr = events->interrupt.nr;
1984         vcpu->arch.interrupt.soft = events->interrupt.soft;
1985         if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
1986                 kvm_pic_clear_isr_ack(vcpu->kvm);
1987
1988         vcpu->arch.nmi_injected = events->nmi.injected;
1989         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
1990                 vcpu->arch.nmi_pending = events->nmi.pending;
1991         kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
1992
1993         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
1994                 vcpu->arch.sipi_vector = events->sipi_vector;
1995
1996         vcpu_put(vcpu);
1997
1998         return 0;
1999 }
2000
2001 long kvm_arch_vcpu_ioctl(struct file *filp,
2002                          unsigned int ioctl, unsigned long arg)
2003 {
2004         struct kvm_vcpu *vcpu = filp->private_data;
2005         void __user *argp = (void __user *)arg;
2006         int r;
2007         struct kvm_lapic_state *lapic = NULL;
2008
2009         switch (ioctl) {
2010         case KVM_GET_LAPIC: {
2011                 r = -EINVAL;
2012                 if (!vcpu->arch.apic)
2013                         goto out;
2014                 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2015
2016                 r = -ENOMEM;
2017                 if (!lapic)
2018                         goto out;
2019                 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
2020                 if (r)
2021                         goto out;
2022                 r = -EFAULT;
2023                 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
2024                         goto out;
2025                 r = 0;
2026                 break;
2027         }
2028         case KVM_SET_LAPIC: {
2029                 r = -EINVAL;
2030                 if (!vcpu->arch.apic)
2031                         goto out;
2032                 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2033                 r = -ENOMEM;
2034                 if (!lapic)
2035                         goto out;
2036                 r = -EFAULT;
2037                 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
2038                         goto out;
2039                 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
2040                 if (r)
2041                         goto out;
2042                 r = 0;
2043                 break;
2044         }
2045         case KVM_INTERRUPT: {
2046                 struct kvm_interrupt irq;
2047
2048                 r = -EFAULT;
2049                 if (copy_from_user(&irq, argp, sizeof irq))
2050                         goto out;
2051                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2052                 if (r)
2053                         goto out;
2054                 r = 0;
2055                 break;
2056         }
2057         case KVM_NMI: {
2058                 r = kvm_vcpu_ioctl_nmi(vcpu);
2059                 if (r)
2060                         goto out;
2061                 r = 0;
2062                 break;
2063         }
2064         case KVM_SET_CPUID: {
2065                 struct kvm_cpuid __user *cpuid_arg = argp;
2066                 struct kvm_cpuid cpuid;
2067
2068                 r = -EFAULT;
2069                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2070                         goto out;
2071                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2072                 if (r)
2073                         goto out;
2074                 break;
2075         }
2076         case KVM_SET_CPUID2: {
2077                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2078                 struct kvm_cpuid2 cpuid;
2079
2080                 r = -EFAULT;
2081                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2082                         goto out;
2083                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
2084                                               cpuid_arg->entries);
2085                 if (r)
2086                         goto out;
2087                 break;
2088         }
2089         case KVM_GET_CPUID2: {
2090                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2091                 struct kvm_cpuid2 cpuid;
2092
2093                 r = -EFAULT;
2094                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2095                         goto out;
2096                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
2097                                               cpuid_arg->entries);
2098                 if (r)
2099                         goto out;
2100                 r = -EFAULT;
2101                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2102                         goto out;
2103                 r = 0;
2104                 break;
2105         }
2106         case KVM_GET_MSRS:
2107                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2108                 break;
2109         case KVM_SET_MSRS:
2110                 r = msr_io(vcpu, argp, do_set_msr, 0);
2111                 break;
2112         case KVM_TPR_ACCESS_REPORTING: {
2113                 struct kvm_tpr_access_ctl tac;
2114
2115                 r = -EFAULT;
2116                 if (copy_from_user(&tac, argp, sizeof tac))
2117                         goto out;
2118                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2119                 if (r)
2120                         goto out;
2121                 r = -EFAULT;
2122                 if (copy_to_user(argp, &tac, sizeof tac))
2123                         goto out;
2124                 r = 0;
2125                 break;
2126         };
2127         case KVM_SET_VAPIC_ADDR: {
2128                 struct kvm_vapic_addr va;
2129
2130                 r = -EINVAL;
2131                 if (!irqchip_in_kernel(vcpu->kvm))
2132                         goto out;
2133                 r = -EFAULT;
2134                 if (copy_from_user(&va, argp, sizeof va))
2135                         goto out;
2136                 r = 0;
2137                 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2138                 break;
2139         }
2140         case KVM_X86_SETUP_MCE: {
2141                 u64 mcg_cap;
2142
2143                 r = -EFAULT;
2144                 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2145                         goto out;
2146                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2147                 break;
2148         }
2149         case KVM_X86_SET_MCE: {
2150                 struct kvm_x86_mce mce;
2151
2152                 r = -EFAULT;
2153                 if (copy_from_user(&mce, argp, sizeof mce))
2154                         goto out;
2155                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2156                 break;
2157         }
2158         case KVM_GET_VCPU_EVENTS: {
2159                 struct kvm_vcpu_events events;
2160
2161                 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2162
2163                 r = -EFAULT;
2164                 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2165                         break;
2166                 r = 0;
2167                 break;
2168         }
2169         case KVM_SET_VCPU_EVENTS: {
2170                 struct kvm_vcpu_events events;
2171
2172                 r = -EFAULT;
2173                 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2174                         break;
2175
2176                 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2177                 break;
2178         }
2179         default:
2180                 r = -EINVAL;
2181         }
2182 out:
2183         kfree(lapic);
2184         return r;
2185 }
2186
2187 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2188 {
2189         int ret;
2190
2191         if (addr > (unsigned int)(-3 * PAGE_SIZE))
2192                 return -1;
2193         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2194         return ret;
2195 }
2196
2197 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2198                                               u64 ident_addr)
2199 {
2200         kvm->arch.ept_identity_map_addr = ident_addr;
2201         return 0;
2202 }
2203
2204 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2205                                           u32 kvm_nr_mmu_pages)
2206 {
2207         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2208                 return -EINVAL;
2209
2210         down_write(&kvm->slots_lock);
2211         spin_lock(&kvm->mmu_lock);
2212
2213         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
2214         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
2215
2216         spin_unlock(&kvm->mmu_lock);
2217         up_write(&kvm->slots_lock);
2218         return 0;
2219 }
2220
2221 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2222 {
2223         return kvm->arch.n_alloc_mmu_pages;
2224 }
2225
2226 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
2227 {
2228         int i;
2229         struct kvm_mem_alias *alias;
2230
2231         for (i = 0; i < kvm->arch.naliases; ++i) {
2232                 alias = &kvm->arch.aliases[i];
2233                 if (gfn >= alias->base_gfn
2234                     && gfn < alias->base_gfn + alias->npages)
2235                         return alias->target_gfn + gfn - alias->base_gfn;
2236         }
2237         return gfn;
2238 }
2239
2240 /*
2241  * Set a new alias region.  Aliases map a portion of physical memory into
2242  * another portion.  This is useful for memory windows, for example the PC
2243  * VGA region.
2244  */
2245 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
2246                                          struct kvm_memory_alias *alias)
2247 {
2248         int r, n;
2249         struct kvm_mem_alias *p;
2250
2251         r = -EINVAL;
2252         /* General sanity checks */
2253         if (alias->memory_size & (PAGE_SIZE - 1))
2254                 goto out;
2255         if (alias->guest_phys_addr & (PAGE_SIZE - 1))
2256                 goto out;
2257         if (alias->slot >= KVM_ALIAS_SLOTS)
2258                 goto out;
2259         if (alias->guest_phys_addr + alias->memory_size
2260             < alias->guest_phys_addr)
2261                 goto out;
2262         if (alias->target_phys_addr + alias->memory_size
2263             < alias->target_phys_addr)
2264                 goto out;
2265
2266         down_write(&kvm->slots_lock);
2267         spin_lock(&kvm->mmu_lock);
2268
2269         p = &kvm->arch.aliases[alias->slot];
2270         p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
2271         p->npages = alias->memory_size >> PAGE_SHIFT;
2272         p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
2273
2274         for (n = KVM_ALIAS_SLOTS; n > 0; --n)
2275                 if (kvm->arch.aliases[n - 1].npages)
2276                         break;
2277         kvm->arch.naliases = n;
2278
2279         spin_unlock(&kvm->mmu_lock);
2280         kvm_mmu_zap_all(kvm);
2281
2282         up_write(&kvm->slots_lock);
2283
2284         return 0;
2285
2286 out:
2287         return r;
2288 }
2289
2290 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2291 {
2292         int r;
2293
2294         r = 0;
2295         switch (chip->chip_id) {
2296         case KVM_IRQCHIP_PIC_MASTER:
2297                 memcpy(&chip->chip.pic,
2298                         &pic_irqchip(kvm)->pics[0],
2299                         sizeof(struct kvm_pic_state));
2300                 break;
2301         case KVM_IRQCHIP_PIC_SLAVE:
2302                 memcpy(&chip->chip.pic,
2303                         &pic_irqchip(kvm)->pics[1],
2304                         sizeof(struct kvm_pic_state));
2305                 break;
2306         case KVM_IRQCHIP_IOAPIC:
2307                 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
2308                 break;
2309         default:
2310                 r = -EINVAL;
2311                 break;
2312         }
2313         return r;
2314 }
2315
2316 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2317 {
2318         int r;
2319
2320         r = 0;
2321         switch (chip->chip_id) {
2322         case KVM_IRQCHIP_PIC_MASTER:
2323                 spin_lock(&pic_irqchip(kvm)->lock);
2324                 memcpy(&pic_irqchip(kvm)->pics[0],
2325                         &chip->chip.pic,
2326                         sizeof(struct kvm_pic_state));
2327                 spin_unlock(&pic_irqchip(kvm)->lock);
2328                 break;
2329         case KVM_IRQCHIP_PIC_SLAVE:
2330                 spin_lock(&pic_irqchip(kvm)->lock);
2331                 memcpy(&pic_irqchip(kvm)->pics[1],
2332                         &chip->chip.pic,
2333                         sizeof(struct kvm_pic_state));
2334                 spin_unlock(&pic_irqchip(kvm)->lock);
2335                 break;
2336         case KVM_IRQCHIP_IOAPIC:
2337                 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
2338                 break;
2339         default:
2340                 r = -EINVAL;
2341                 break;
2342         }
2343         kvm_pic_update_irq(pic_irqchip(kvm));
2344         return r;
2345 }
2346
2347 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2348 {
2349         int r = 0;
2350
2351         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2352         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
2353         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2354         return r;
2355 }
2356
2357 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2358 {
2359         int r = 0;
2360
2361         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2362         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
2363         kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2364         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2365         return r;
2366 }
2367
2368 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2369 {
2370         int r = 0;
2371
2372         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2373         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2374                 sizeof(ps->channels));
2375         ps->flags = kvm->arch.vpit->pit_state.flags;
2376         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2377         return r;
2378 }
2379
2380 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2381 {
2382         int r = 0, start = 0;
2383         u32 prev_legacy, cur_legacy;
2384         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2385         prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2386         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2387         if (!prev_legacy && cur_legacy)
2388                 start = 1;
2389         memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2390                sizeof(kvm->arch.vpit->pit_state.channels));
2391         kvm->arch.vpit->pit_state.flags = ps->flags;
2392         kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
2393         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2394         return r;
2395 }
2396
2397 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2398                                  struct kvm_reinject_control *control)
2399 {
2400         if (!kvm->arch.vpit)
2401                 return -ENXIO;
2402         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2403         kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
2404         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2405         return 0;
2406 }
2407
2408 /*
2409  * Get (and clear) the dirty memory log for a memory slot.
2410  */
2411 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2412                                       struct kvm_dirty_log *log)
2413 {
2414         int r;
2415         int n;
2416         struct kvm_memory_slot *memslot;
2417         int is_dirty = 0;
2418
2419         down_write(&kvm->slots_lock);
2420
2421         r = kvm_get_dirty_log(kvm, log, &is_dirty);
2422         if (r)
2423                 goto out;
2424
2425         /* If nothing is dirty, don't bother messing with page tables. */
2426         if (is_dirty) {
2427                 spin_lock(&kvm->mmu_lock);
2428                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2429                 spin_unlock(&kvm->mmu_lock);
2430                 memslot = &kvm->memslots->memslots[log->slot];
2431                 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
2432                 memset(memslot->dirty_bitmap, 0, n);
2433         }
2434         r = 0;
2435 out:
2436         up_write(&kvm->slots_lock);
2437         return r;
2438 }
2439
2440 long kvm_arch_vm_ioctl(struct file *filp,
2441                        unsigned int ioctl, unsigned long arg)
2442 {
2443         struct kvm *kvm = filp->private_data;
2444         void __user *argp = (void __user *)arg;
2445         int r = -ENOTTY;
2446         /*
2447          * This union makes it completely explicit to gcc-3.x
2448          * that these two variables' stack usage should be
2449          * combined, not added together.
2450          */
2451         union {
2452                 struct kvm_pit_state ps;
2453                 struct kvm_pit_state2 ps2;
2454                 struct kvm_memory_alias alias;
2455                 struct kvm_pit_config pit_config;
2456         } u;
2457
2458         switch (ioctl) {
2459         case KVM_SET_TSS_ADDR:
2460                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2461                 if (r < 0)
2462                         goto out;
2463                 break;
2464         case KVM_SET_IDENTITY_MAP_ADDR: {
2465                 u64 ident_addr;
2466
2467                 r = -EFAULT;
2468                 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
2469                         goto out;
2470                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
2471                 if (r < 0)
2472                         goto out;
2473                 break;
2474         }
2475         case KVM_SET_MEMORY_REGION: {
2476                 struct kvm_memory_region kvm_mem;
2477                 struct kvm_userspace_memory_region kvm_userspace_mem;
2478
2479                 r = -EFAULT;
2480                 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2481                         goto out;
2482                 kvm_userspace_mem.slot = kvm_mem.slot;
2483                 kvm_userspace_mem.flags = kvm_mem.flags;
2484                 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
2485                 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
2486                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
2487                 if (r)
2488                         goto out;
2489                 break;
2490         }
2491         case KVM_SET_NR_MMU_PAGES:
2492                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
2493                 if (r)
2494                         goto out;
2495                 break;
2496         case KVM_GET_NR_MMU_PAGES:
2497                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
2498                 break;
2499         case KVM_SET_MEMORY_ALIAS:
2500                 r = -EFAULT;
2501                 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
2502                         goto out;
2503                 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
2504                 if (r)
2505                         goto out;
2506                 break;
2507         case KVM_CREATE_IRQCHIP: {
2508                 struct kvm_pic *vpic;
2509
2510                 mutex_lock(&kvm->lock);
2511                 r = -EEXIST;
2512                 if (kvm->arch.vpic)
2513                         goto create_irqchip_unlock;
2514                 r = -ENOMEM;
2515                 vpic = kvm_create_pic(kvm);
2516                 if (vpic) {
2517                         r = kvm_ioapic_init(kvm);
2518                         if (r) {
2519                                 kfree(vpic);
2520                                 goto create_irqchip_unlock;
2521                         }
2522                 } else
2523                         goto create_irqchip_unlock;
2524                 smp_wmb();
2525                 kvm->arch.vpic = vpic;
2526                 smp_wmb();
2527                 r = kvm_setup_default_irq_routing(kvm);
2528                 if (r) {
2529                         mutex_lock(&kvm->irq_lock);
2530                         kfree(kvm->arch.vpic);
2531                         kfree(kvm->arch.vioapic);
2532                         kvm->arch.vpic = NULL;
2533                         kvm->arch.vioapic = NULL;
2534                         mutex_unlock(&kvm->irq_lock);
2535                 }
2536         create_irqchip_unlock:
2537                 mutex_unlock(&kvm->lock);
2538                 break;
2539         }
2540         case KVM_CREATE_PIT:
2541                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
2542                 goto create_pit;
2543         case KVM_CREATE_PIT2:
2544                 r = -EFAULT;
2545                 if (copy_from_user(&u.pit_config, argp,
2546                                    sizeof(struct kvm_pit_config)))
2547                         goto out;
2548         create_pit:
2549                 down_write(&kvm->slots_lock);
2550                 r = -EEXIST;
2551                 if (kvm->arch.vpit)
2552                         goto create_pit_unlock;
2553                 r = -ENOMEM;
2554                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
2555                 if (kvm->arch.vpit)
2556                         r = 0;
2557         create_pit_unlock:
2558                 up_write(&kvm->slots_lock);
2559                 break;
2560         case KVM_IRQ_LINE_STATUS:
2561         case KVM_IRQ_LINE: {
2562                 struct kvm_irq_level irq_event;
2563
2564                 r = -EFAULT;
2565                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2566                         goto out;
2567                 if (irqchip_in_kernel(kvm)) {
2568                         __s32 status;
2569                         status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2570                                         irq_event.irq, irq_event.level);
2571                         if (ioctl == KVM_IRQ_LINE_STATUS) {
2572                                 irq_event.status = status;
2573                                 if (copy_to_user(argp, &irq_event,
2574                                                         sizeof irq_event))
2575                                         goto out;
2576                         }
2577                         r = 0;
2578                 }
2579                 break;
2580         }
2581         case KVM_GET_IRQCHIP: {
2582                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
2583                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
2584
2585                 r = -ENOMEM;
2586                 if (!chip)
2587                         goto out;
2588                 r = -EFAULT;
2589                 if (copy_from_user(chip, argp, sizeof *chip))
2590                         goto get_irqchip_out;
2591                 r = -ENXIO;
2592                 if (!irqchip_in_kernel(kvm))
2593                         goto get_irqchip_out;
2594                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
2595                 if (r)
2596                         goto get_irqchip_out;
2597                 r = -EFAULT;
2598                 if (copy_to_user(argp, chip, sizeof *chip))
2599                         goto get_irqchip_out;
2600                 r = 0;
2601         get_irqchip_out:
2602                 kfree(chip);
2603                 if (r)
2604                         goto out;
2605                 break;
2606         }
2607         case KVM_SET_IRQCHIP: {
2608                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
2609                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
2610
2611                 r = -ENOMEM;
2612                 if (!chip)
2613                         goto out;
2614                 r = -EFAULT;
2615                 if (copy_from_user(chip, argp, sizeof *chip))
2616                         goto set_irqchip_out;
2617                 r = -ENXIO;
2618                 if (!irqchip_in_kernel(kvm))
2619                         goto set_irqchip_out;
2620                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
2621                 if (r)
2622                         goto set_irqchip_out;
2623                 r = 0;
2624         set_irqchip_out:
2625                 kfree(chip);
2626                 if (r)
2627                         goto out;
2628                 break;
2629         }
2630         case KVM_GET_PIT: {
2631                 r = -EFAULT;
2632                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
2633                         goto out;
2634                 r = -ENXIO;
2635                 if (!kvm->arch.vpit)
2636                         goto out;
2637                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
2638                 if (r)
2639                         goto out;
2640                 r = -EFAULT;
2641                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
2642                         goto out;
2643                 r = 0;
2644                 break;
2645         }
2646         case KVM_SET_PIT: {
2647                 r = -EFAULT;
2648                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
2649                         goto out;
2650                 r = -ENXIO;
2651                 if (!kvm->arch.vpit)
2652                         goto out;
2653                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
2654                 if (r)
2655                         goto out;
2656                 r = 0;
2657                 break;
2658         }
2659         case KVM_GET_PIT2: {
2660                 r = -ENXIO;
2661                 if (!kvm->arch.vpit)
2662                         goto out;
2663                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
2664                 if (r)
2665                         goto out;
2666                 r = -EFAULT;
2667                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
2668                         goto out;
2669                 r = 0;
2670                 break;
2671         }
2672         case KVM_SET_PIT2: {
2673                 r = -EFAULT;
2674                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
2675                         goto out;
2676                 r = -ENXIO;
2677                 if (!kvm->arch.vpit)
2678                         goto out;
2679                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
2680                 if (r)
2681                         goto out;
2682                 r = 0;
2683                 break;
2684         }
2685         case KVM_REINJECT_CONTROL: {
2686                 struct kvm_reinject_control control;
2687                 r =  -EFAULT;
2688                 if (copy_from_user(&control, argp, sizeof(control)))
2689                         goto out;
2690                 r = kvm_vm_ioctl_reinject(kvm, &control);
2691                 if (r)
2692                         goto out;
2693                 r = 0;
2694                 break;
2695         }
2696         case KVM_XEN_HVM_CONFIG: {
2697                 r = -EFAULT;
2698                 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
2699                                    sizeof(struct kvm_xen_hvm_config)))
2700                         goto out;
2701                 r = -EINVAL;
2702                 if (kvm->arch.xen_hvm_config.flags)
2703                         goto out;
2704                 r = 0;
2705                 break;
2706         }
2707         case KVM_SET_CLOCK: {
2708                 struct timespec now;
2709                 struct kvm_clock_data user_ns;
2710                 u64 now_ns;
2711                 s64 delta;
2712
2713                 r = -EFAULT;
2714                 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
2715                         goto out;
2716
2717                 r = -EINVAL;
2718                 if (user_ns.flags)
2719                         goto out;
2720
2721                 r = 0;
2722                 ktime_get_ts(&now);
2723                 now_ns = timespec_to_ns(&now);
2724                 delta = user_ns.clock - now_ns;
2725                 kvm->arch.kvmclock_offset = delta;
2726                 break;
2727         }
2728         case KVM_GET_CLOCK: {
2729                 struct timespec now;
2730                 struct kvm_clock_data user_ns;
2731                 u64 now_ns;
2732
2733                 ktime_get_ts(&now);
2734                 now_ns = timespec_to_ns(&now);
2735                 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
2736                 user_ns.flags = 0;
2737
2738                 r = -EFAULT;
2739                 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
2740                         goto out;
2741                 r = 0;
2742                 break;
2743         }
2744
2745         default:
2746                 ;
2747         }
2748 out:
2749         return r;
2750 }
2751
2752 static void kvm_init_msr_list(void)
2753 {
2754         u32 dummy[2];
2755         unsigned i, j;
2756
2757         /* skip the first msrs in the list. KVM-specific */
2758         for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
2759                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2760                         continue;
2761                 if (j < i)
2762                         msrs_to_save[j] = msrs_to_save[i];
2763                 j++;
2764         }
2765         num_msrs_to_save = j;
2766 }
2767
2768 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
2769                            const void *v)
2770 {
2771         if (vcpu->arch.apic &&
2772             !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
2773                 return 0;
2774
2775         return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
2776 }
2777
2778 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
2779 {
2780         if (vcpu->arch.apic &&
2781             !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
2782                 return 0;
2783
2784         return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
2785 }
2786
2787 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2788                                struct kvm_vcpu *vcpu)
2789 {
2790         void *data = val;
2791         int r = X86EMUL_CONTINUE;
2792
2793         while (bytes) {
2794                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2795                 unsigned offset = addr & (PAGE_SIZE-1);
2796                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
2797                 int ret;
2798
2799                 if (gpa == UNMAPPED_GVA) {
2800                         r = X86EMUL_PROPAGATE_FAULT;
2801                         goto out;
2802                 }
2803                 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
2804                 if (ret < 0) {
2805                         r = X86EMUL_UNHANDLEABLE;
2806                         goto out;
2807                 }
2808
2809                 bytes -= toread;
2810                 data += toread;
2811                 addr += toread;
2812         }
2813 out:
2814         return r;
2815 }
2816
2817 static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2818                                 struct kvm_vcpu *vcpu)
2819 {
2820         void *data = val;
2821         int r = X86EMUL_CONTINUE;
2822
2823         while (bytes) {
2824                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2825                 unsigned offset = addr & (PAGE_SIZE-1);
2826                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2827                 int ret;
2828
2829                 if (gpa == UNMAPPED_GVA) {
2830                         r = X86EMUL_PROPAGATE_FAULT;
2831                         goto out;
2832                 }
2833                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2834                 if (ret < 0) {
2835                         r = X86EMUL_UNHANDLEABLE;
2836                         goto out;
2837                 }
2838
2839                 bytes -= towrite;
2840                 data += towrite;
2841                 addr += towrite;
2842         }
2843 out:
2844         return r;
2845 }
2846
2847
2848 static int emulator_read_emulated(unsigned long addr,
2849                                   void *val,
2850                                   unsigned int bytes,
2851                                   struct kvm_vcpu *vcpu)
2852 {
2853         gpa_t                 gpa;
2854
2855         if (vcpu->mmio_read_completed) {
2856                 memcpy(val, vcpu->mmio_data, bytes);
2857                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
2858                                vcpu->mmio_phys_addr, *(u64 *)val);
2859                 vcpu->mmio_read_completed = 0;
2860                 return X86EMUL_CONTINUE;
2861         }
2862
2863         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2864
2865         /* For APIC access vmexit */
2866         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2867                 goto mmio;
2868
2869         if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2870                                 == X86EMUL_CONTINUE)
2871                 return X86EMUL_CONTINUE;
2872         if (gpa == UNMAPPED_GVA)
2873                 return X86EMUL_PROPAGATE_FAULT;
2874
2875 mmio:
2876         /*
2877          * Is this MMIO handled locally?
2878          */
2879         if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
2880                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
2881                 return X86EMUL_CONTINUE;
2882         }
2883
2884         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
2885
2886         vcpu->mmio_needed = 1;
2887         vcpu->mmio_phys_addr = gpa;
2888         vcpu->mmio_size = bytes;
2889         vcpu->mmio_is_write = 0;
2890
2891         return X86EMUL_UNHANDLEABLE;
2892 }
2893
2894 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
2895                           const void *val, int bytes)
2896 {
2897         int ret;
2898
2899         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
2900         if (ret < 0)
2901                 return 0;
2902         kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
2903         return 1;
2904 }
2905
2906 static int emulator_write_emulated_onepage(unsigned long addr,
2907                                            const void *val,
2908                                            unsigned int bytes,
2909                                            struct kvm_vcpu *vcpu)
2910 {
2911         gpa_t                 gpa;
2912
2913         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2914
2915         if (gpa == UNMAPPED_GVA) {
2916                 kvm_inject_page_fault(vcpu, addr, 2);
2917                 return X86EMUL_PROPAGATE_FAULT;
2918         }
2919
2920         /* For APIC access vmexit */
2921         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2922                 goto mmio;
2923
2924         if (emulator_write_phys(vcpu, gpa, val, bytes))
2925                 return X86EMUL_CONTINUE;
2926
2927 mmio:
2928         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
2929         /*
2930          * Is this MMIO handled locally?
2931          */
2932         if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
2933                 return X86EMUL_CONTINUE;
2934
2935         vcpu->mmio_needed = 1;
2936         vcpu->mmio_phys_addr = gpa;
2937         vcpu->mmio_size = bytes;
2938         vcpu->mmio_is_write = 1;
2939         memcpy(vcpu->mmio_data, val, bytes);
2940
2941         return X86EMUL_CONTINUE;
2942 }
2943
2944 int emulator_write_emulated(unsigned long addr,
2945                                    const void *val,
2946                                    unsigned int bytes,
2947                                    struct kvm_vcpu *vcpu)
2948 {
2949         /* Crossing a page boundary? */
2950         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2951                 int rc, now;
2952
2953                 now = -addr & ~PAGE_MASK;
2954                 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2955                 if (rc != X86EMUL_CONTINUE)
2956                         return rc;
2957                 addr += now;
2958                 val += now;
2959                 bytes -= now;
2960         }
2961         return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2962 }
2963 EXPORT_SYMBOL_GPL(emulator_write_emulated);
2964
2965 static int emulator_cmpxchg_emulated(unsigned long addr,
2966                                      const void *old,
2967                                      const void *new,
2968                                      unsigned int bytes,
2969                                      struct kvm_vcpu *vcpu)
2970 {
2971         printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
2972 #ifndef CONFIG_X86_64
2973         /* guests cmpxchg8b have to be emulated atomically */
2974         if (bytes == 8) {
2975                 gpa_t gpa;
2976                 struct page *page;
2977                 char *kaddr;
2978                 u64 val;
2979
2980                 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2981
2982                 if (gpa == UNMAPPED_GVA ||
2983                    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2984                         goto emul_write;
2985
2986                 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2987                         goto emul_write;
2988
2989                 val = *(u64 *)new;
2990
2991                 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2992
2993                 kaddr = kmap_atomic(page, KM_USER0);
2994                 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2995                 kunmap_atomic(kaddr, KM_USER0);
2996                 kvm_release_page_dirty(page);
2997         }
2998 emul_write:
2999 #endif
3000
3001         return emulator_write_emulated(addr, new, bytes, vcpu);
3002 }
3003
3004 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
3005 {
3006         return kvm_x86_ops->get_segment_base(vcpu, seg);
3007 }
3008
3009 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3010 {
3011         kvm_mmu_invlpg(vcpu, address);
3012         return X86EMUL_CONTINUE;
3013 }
3014
3015 int emulate_clts(struct kvm_vcpu *vcpu)
3016 {
3017         kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
3018         return X86EMUL_CONTINUE;
3019 }
3020
3021 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
3022 {
3023         struct kvm_vcpu *vcpu = ctxt->vcpu;
3024
3025         switch (dr) {
3026         case 0 ... 3:
3027                 *dest = kvm_x86_ops->get_dr(vcpu, dr);
3028                 return X86EMUL_CONTINUE;
3029         default:
3030                 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
3031                 return X86EMUL_UNHANDLEABLE;
3032         }
3033 }
3034
3035 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
3036 {
3037         unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
3038         int exception;
3039
3040         kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
3041         if (exception) {
3042                 /* FIXME: better handling */
3043                 return X86EMUL_UNHANDLEABLE;
3044         }
3045         return X86EMUL_CONTINUE;
3046 }
3047
3048 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
3049 {
3050         u8 opcodes[4];
3051         unsigned long rip = kvm_rip_read(vcpu);
3052         unsigned long rip_linear;
3053
3054         if (!printk_ratelimit())
3055                 return;
3056
3057         rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
3058
3059         kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
3060
3061         printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
3062                context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
3063 }
3064 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
3065
3066 static struct x86_emulate_ops emulate_ops = {
3067         .read_std            = kvm_read_guest_virt,
3068         .read_emulated       = emulator_read_emulated,
3069         .write_emulated      = emulator_write_emulated,
3070         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
3071 };
3072
3073 static void cache_all_regs(struct kvm_vcpu *vcpu)
3074 {
3075         kvm_register_read(vcpu, VCPU_REGS_RAX);
3076         kvm_register_read(vcpu, VCPU_REGS_RSP);
3077         kvm_register_read(vcpu, VCPU_REGS_RIP);
3078         vcpu->arch.regs_dirty = ~0;
3079 }
3080
3081 int emulate_instruction(struct kvm_vcpu *vcpu,
3082                         unsigned long cr2,
3083                         u16 error_code,
3084                         int emulation_type)
3085 {
3086         int r, shadow_mask;
3087         struct decode_cache *c;
3088         struct kvm_run *run = vcpu->run;
3089
3090         kvm_clear_exception_queue(vcpu);
3091         vcpu->arch.mmio_fault_cr2 = cr2;
3092         /*
3093          * TODO: fix emulate.c to use guest_read/write_register
3094          * instead of direct ->regs accesses, can save hundred cycles
3095          * on Intel for instructions that don't read/change RSP, for
3096          * for example.
3097          */
3098         cache_all_regs(vcpu);
3099
3100         vcpu->mmio_is_write = 0;
3101         vcpu->arch.pio.string = 0;
3102
3103         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
3104                 int cs_db, cs_l;
3105                 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3106
3107                 vcpu->arch.emulate_ctxt.vcpu = vcpu;
3108                 vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
3109                 vcpu->arch.emulate_ctxt.mode =
3110                         (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
3111                         ? X86EMUL_MODE_REAL : cs_l
3112                         ? X86EMUL_MODE_PROT64 : cs_db
3113                         ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
3114
3115                 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
3116
3117                 /* Only allow emulation of specific instructions on #UD
3118                  * (namely VMMCALL, sysenter, sysexit, syscall)*/
3119                 c = &vcpu->arch.emulate_ctxt.decode;
3120                 if (emulation_type & EMULTYPE_TRAP_UD) {
3121                         if (!c->twobyte)
3122                                 return EMULATE_FAIL;
3123                         switch (c->b) {
3124                         case 0x01: /* VMMCALL */
3125                                 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3126                                         return EMULATE_FAIL;
3127                                 break;
3128                         case 0x34: /* sysenter */
3129                         case 0x35: /* sysexit */
3130                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
3131                                         return EMULATE_FAIL;
3132                                 break;
3133                         case 0x05: /* syscall */
3134                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
3135                                         return EMULATE_FAIL;
3136                                 break;
3137                         default:
3138                                 return EMULATE_FAIL;
3139                         }
3140
3141                         if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
3142                                 return EMULATE_FAIL;
3143                 }
3144
3145                 ++vcpu->stat.insn_emulation;
3146                 if (r)  {
3147                         ++vcpu->stat.insn_emulation_fail;
3148                         if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
3149                                 return EMULATE_DONE;
3150                         return EMULATE_FAIL;
3151                 }
3152         }
3153
3154         if (emulation_type & EMULTYPE_SKIP) {
3155                 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
3156                 return EMULATE_DONE;
3157         }
3158
3159         r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
3160         shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
3161
3162         if (r == 0)
3163                 kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
3164
3165         if (vcpu->arch.pio.string)
3166                 return EMULATE_DO_MMIO;
3167
3168         if ((r || vcpu->mmio_is_write) && run) {
3169                 run->exit_reason = KVM_EXIT_MMIO;
3170                 run->mmio.phys_addr = vcpu->mmio_phys_addr;
3171                 memcpy(run->mmio.data, vcpu->mmio_data, 8);
3172                 run->mmio.len = vcpu->mmio_size;
3173                 run->mmio.is_write = vcpu->mmio_is_write;
3174         }
3175
3176         if (r) {
3177                 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
3178                         return EMULATE_DONE;
3179                 if (!vcpu->mmio_needed) {
3180                         kvm_report_emulation_failure(vcpu, "mmio");
3181                         return EMULATE_FAIL;
3182                 }
3183                 return EMULATE_DO_MMIO;
3184         }
3185
3186         kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
3187
3188         if (vcpu->mmio_is_write) {
3189                 vcpu->mmio_needed = 0;
3190                 return EMULATE_DO_MMIO;
3191         }
3192
3193         return EMULATE_DONE;
3194 }
3195 EXPORT_SYMBOL_GPL(emulate_instruction);
3196
3197 static int pio_copy_data(struct kvm_vcpu *vcpu)
3198 {
3199         void *p = vcpu->arch.pio_data;
3200         gva_t q = vcpu->arch.pio.guest_gva;
3201         unsigned bytes;
3202         int ret;
3203
3204         bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
3205         if (vcpu->arch.pio.in)
3206                 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
3207         else
3208                 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
3209         return ret;
3210 }
3211
3212 int complete_pio(struct kvm_vcpu *vcpu)
3213 {
3214         struct kvm_pio_request *io = &vcpu->arch.pio;
3215         long delta;
3216         int r;
3217         unsigned long val;
3218
3219         if (!io->string) {
3220                 if (io->in) {
3221                         val = kvm_register_read(vcpu, VCPU_REGS_RAX);
3222                         memcpy(&val, vcpu->arch.pio_data, io->size);
3223                         kvm_register_write(vcpu, VCPU_REGS_RAX, val);
3224                 }
3225         } else {
3226                 if (io->in) {
3227                         r = pio_copy_data(vcpu);
3228                         if (r)
3229                                 return r;
3230                 }
3231
3232                 delta = 1;
3233                 if (io->rep) {
3234                         delta *= io->cur_count;
3235                         /*
3236                          * The size of the register should really depend on
3237                          * current address size.
3238                          */
3239                         val = kvm_register_read(vcpu, VCPU_REGS_RCX);
3240                         val -= delta;
3241                         kvm_register_write(vcpu, VCPU_REGS_RCX, val);
3242                 }
3243                 if (io->down)
3244                         delta = -delta;
3245                 delta *= io->size;
3246                 if (io->in) {
3247                         val = kvm_register_read(vcpu, VCPU_REGS_RDI);
3248                         val += delta;
3249                         kvm_register_write(vcpu, VCPU_REGS_RDI, val);
3250                 } else {
3251                         val = kvm_register_read(vcpu, VCPU_REGS_RSI);
3252                         val += delta;
3253                         kvm_register_write(vcpu, VCPU_REGS_RSI, val);
3254                 }
3255         }
3256
3257         io->count -= io->cur_count;
3258         io->cur_count = 0;
3259
3260         return 0;
3261 }
3262
3263 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
3264 {
3265         /* TODO: String I/O for in kernel device */
3266         int r;
3267
3268         if (vcpu->arch.pio.in)
3269                 r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
3270                                     vcpu->arch.pio.size, pd);
3271         else
3272                 r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
3273                                      vcpu->arch.pio.size, pd);
3274         return r;
3275 }
3276
3277 static int pio_string_write(struct kvm_vcpu *vcpu)
3278 {
3279         struct kvm_pio_request *io = &vcpu->arch.pio;
3280         void *pd = vcpu->arch.pio_data;
3281         int i, r = 0;
3282
3283         for (i = 0; i < io->cur_count; i++) {
3284                 if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
3285                                      io->port, io->size, pd)) {
3286                         r = -EOPNOTSUPP;
3287                         break;
3288                 }
3289                 pd += io->size;
3290         }
3291         return r;
3292 }
3293
3294 int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
3295 {
3296         unsigned long val;
3297
3298         vcpu->run->exit_reason = KVM_EXIT_IO;
3299         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
3300         vcpu->run->io.size = vcpu->arch.pio.size = size;
3301         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3302         vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
3303         vcpu->run->io.port = vcpu->arch.pio.port = port;
3304         vcpu->arch.pio.in = in;
3305         vcpu->arch.pio.string = 0;
3306         vcpu->arch.pio.down = 0;
3307         vcpu->arch.pio.rep = 0;
3308
3309         trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
3310                       size, 1);
3311
3312         val = kvm_register_read(vcpu, VCPU_REGS_RAX);
3313         memcpy(vcpu->arch.pio_data, &val, 4);
3314
3315         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3316                 complete_pio(vcpu);
3317                 return 1;
3318         }
3319         return 0;
3320 }
3321 EXPORT_SYMBOL_GPL(kvm_emulate_pio);
3322
3323 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
3324                   int size, unsigned long count, int down,
3325                   gva_t address, int rep, unsigned port)
3326 {
3327         unsigned now, in_page;
3328         int ret = 0;
3329
3330         vcpu->run->exit_reason = KVM_EXIT_IO;
3331         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
3332         vcpu->run->io.size = vcpu->arch.pio.size = size;
3333         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3334         vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
3335         vcpu->run->io.port = vcpu->arch.pio.port = port;
3336         vcpu->arch.pio.in = in;
3337         vcpu->arch.pio.string = 1;
3338         vcpu->arch.pio.down = down;
3339         vcpu->arch.pio.rep = rep;
3340
3341         trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
3342                       size, count);
3343
3344         if (!count) {
3345                 kvm_x86_ops->skip_emulated_instruction(vcpu);
3346                 return 1;
3347         }
3348
3349         if (!down)
3350                 in_page = PAGE_SIZE - offset_in_page(address);
3351         else
3352                 in_page = offset_in_page(address) + size;
3353         now = min(count, (unsigned long)in_page / size);
3354         if (!now)
3355                 now = 1;
3356         if (down) {
3357                 /*
3358                  * String I/O in reverse.  Yuck.  Kill the guest, fix later.
3359                  */
3360                 pr_unimpl(vcpu, "guest string pio down\n");
3361                 kvm_inject_gp(vcpu, 0);
3362                 return 1;
3363         }
3364         vcpu->run->io.count = now;
3365         vcpu->arch.pio.cur_count = now;
3366
3367         if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
3368                 kvm_x86_ops->skip_emulated_instruction(vcpu);
3369
3370         vcpu->arch.pio.guest_gva = address;
3371
3372         if (!vcpu->arch.pio.in) {
3373                 /* string PIO write */
3374                 ret = pio_copy_data(vcpu);
3375                 if (ret == X86EMUL_PROPAGATE_FAULT) {
3376                         kvm_inject_gp(vcpu, 0);
3377                         return 1;
3378                 }
3379                 if (ret == 0 && !pio_string_write(vcpu)) {
3380                         complete_pio(vcpu);
3381                         if (vcpu->arch.pio.count == 0)
3382                                 ret = 1;
3383                 }
3384         }
3385         /* no string PIO read support yet */
3386
3387         return ret;
3388 }
3389 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
3390
3391 static void bounce_off(void *info)
3392 {
3393         /* nothing */
3394 }
3395
3396 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
3397                                      void *data)
3398 {
3399         struct cpufreq_freqs *freq = data;
3400         struct kvm *kvm;
3401         struct kvm_vcpu *vcpu;
3402         int i, send_ipi = 0;
3403
3404         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
3405                 return 0;
3406         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
3407                 return 0;
3408         per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
3409
3410         spin_lock(&kvm_lock);
3411         list_for_each_entry(kvm, &vm_list, vm_list) {
3412                 kvm_for_each_vcpu(i, vcpu, kvm) {
3413                         if (vcpu->cpu != freq->cpu)
3414                                 continue;
3415                         if (!kvm_request_guest_time_update(vcpu))
3416                                 continue;
3417                         if (vcpu->cpu != smp_processor_id())
3418                                 send_ipi++;
3419                 }
3420         }
3421         spin_unlock(&kvm_lock);
3422
3423         if (freq->old < freq->new && send_ipi) {
3424                 /*
3425                  * We upscale the frequency.  Must make the guest
3426                  * doesn't see old kvmclock values while running with
3427                  * the new frequency, otherwise we risk the guest sees
3428                  * time go backwards.
3429                  *
3430                  * In case we update the frequency for another cpu
3431                  * (which might be in guest context) send an interrupt
3432                  * to kick the cpu out of guest context.  Next time
3433                  * guest context is entered kvmclock will be updated,
3434                  * so the guest will not see stale values.
3435                  */
3436                 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
3437         }
3438         return 0;
3439 }
3440
3441 static struct notifier_block kvmclock_cpufreq_notifier_block = {
3442         .notifier_call  = kvmclock_cpufreq_notifier
3443 };
3444
3445 static void kvm_timer_init(void)
3446 {
3447         int cpu;
3448
3449         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
3450                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
3451                                           CPUFREQ_TRANSITION_NOTIFIER);
3452                 for_each_online_cpu(cpu) {
3453                         unsigned long khz = cpufreq_get(cpu);
3454                         if (!khz)
3455                                 khz = tsc_khz;
3456                         per_cpu(cpu_tsc_khz, cpu) = khz;
3457                 }
3458         } else {
3459                 for_each_possible_cpu(cpu)
3460                         per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
3461         }
3462 }
3463
3464 int kvm_arch_init(void *opaque)
3465 {
3466         int r;
3467         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
3468
3469         if (kvm_x86_ops) {
3470                 printk(KERN_ERR "kvm: already loaded the other module\n");
3471                 r = -EEXIST;
3472                 goto out;
3473         }
3474
3475         if (!ops->cpu_has_kvm_support()) {
3476                 printk(KERN_ERR "kvm: no hardware support\n");
3477                 r = -EOPNOTSUPP;
3478                 goto out;
3479         }
3480         if (ops->disabled_by_bios()) {
3481                 printk(KERN_ERR "kvm: disabled by bios\n");
3482                 r = -EOPNOTSUPP;
3483                 goto out;
3484         }
3485
3486         r = kvm_mmu_module_init();
3487         if (r)
3488                 goto out;
3489
3490         kvm_init_msr_list();
3491
3492         kvm_x86_ops = ops;
3493         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
3494         kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
3495         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
3496                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
3497
3498         kvm_timer_init();
3499
3500         return 0;
3501
3502 out:
3503         return r;
3504 }
3505
3506 void kvm_arch_exit(void)
3507 {
3508         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
3509                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
3510                                             CPUFREQ_TRANSITION_NOTIFIER);
3511         kvm_x86_ops = NULL;
3512         kvm_mmu_module_exit();
3513 }
3514
3515 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
3516 {
3517         ++vcpu->stat.halt_exits;
3518         if (irqchip_in_kernel(vcpu->kvm)) {
3519                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
3520                 return 1;
3521         } else {
3522                 vcpu->run->exit_reason = KVM_EXIT_HLT;
3523                 return 0;
3524         }
3525 }
3526 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
3527
3528 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
3529                            unsigned long a1)
3530 {
3531         if (is_long_mode(vcpu))
3532                 return a0;
3533         else
3534                 return a0 | ((gpa_t)a1 << 32);
3535 }
3536
3537 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
3538 {
3539         unsigned long nr, a0, a1, a2, a3, ret;
3540         int r = 1;
3541
3542         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
3543         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
3544         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
3545         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
3546         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
3547
3548         trace_kvm_hypercall(nr, a0, a1, a2, a3);
3549
3550         if (!is_long_mode(vcpu)) {
3551                 nr &= 0xFFFFFFFF;
3552                 a0 &= 0xFFFFFFFF;
3553                 a1 &= 0xFFFFFFFF;
3554                 a2 &= 0xFFFFFFFF;
3555                 a3 &= 0xFFFFFFFF;
3556         }
3557
3558         if (kvm_x86_ops->get_cpl(vcpu) != 0) {
3559                 ret = -KVM_EPERM;
3560                 goto out;
3561         }
3562
3563         switch (nr) {
3564         case KVM_HC_VAPIC_POLL_IRQ:
3565                 ret = 0;
3566                 break;
3567         case KVM_HC_MMU_OP:
3568                 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
3569                 break;
3570         default:
3571                 ret = -KVM_ENOSYS;
3572                 break;
3573         }
3574 out:
3575         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
3576         ++vcpu->stat.hypercalls;
3577         return r;
3578 }
3579 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
3580
3581 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
3582 {
3583         char instruction[3];
3584         int ret = 0;
3585         unsigned long rip = kvm_rip_read(vcpu);
3586
3587
3588         /*
3589          * Blow out the MMU to ensure that no other VCPU has an active mapping
3590          * to ensure that the updated hypercall appears atomically across all
3591          * VCPUs.
3592          */
3593         kvm_mmu_zap_all(vcpu->kvm);
3594
3595         kvm_x86_ops->patch_hypercall(vcpu, instruction);
3596         if (emulator_write_emulated(rip, instruction, 3, vcpu)
3597             != X86EMUL_CONTINUE)
3598                 ret = -EFAULT;
3599
3600         return ret;
3601 }
3602
3603 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3604 {
3605         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3606 }
3607
3608 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3609 {
3610         struct descriptor_table dt = { limit, base };
3611
3612         kvm_x86_ops->set_gdt(vcpu, &dt);
3613 }
3614
3615 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3616 {
3617         struct descriptor_table dt = { limit, base };
3618
3619         kvm_x86_ops->set_idt(vcpu, &dt);
3620 }
3621
3622 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
3623                    unsigned long *rflags)
3624 {
3625         kvm_lmsw(vcpu, msw);
3626         *rflags = kvm_get_rflags(vcpu);
3627 }
3628
3629 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3630 {
3631         unsigned long value;
3632
3633         switch (cr) {
3634         case 0:
3635                 value = vcpu->arch.cr0;
3636                 break;
3637         case 2:
3638                 value = vcpu->arch.cr2;
3639                 break;
3640         case 3:
3641                 value = vcpu->arch.cr3;
3642                 break;
3643         case 4:
3644                 value = kvm_read_cr4(vcpu);
3645                 break;
3646         case 8:
3647                 value = kvm_get_cr8(vcpu);
3648                 break;
3649         default:
3650                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3651                 return 0;
3652         }
3653
3654         return value;
3655 }
3656
3657 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3658                      unsigned long *rflags)
3659 {
3660         switch (cr) {
3661         case 0:
3662                 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
3663                 *rflags = kvm_get_rflags(vcpu);
3664                 break;
3665         case 2:
3666                 vcpu->arch.cr2 = val;
3667                 break;
3668         case 3:
3669                 kvm_set_cr3(vcpu, val);
3670                 break;
3671         case 4:
3672                 kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
3673                 break;
3674         case 8:
3675                 kvm_set_cr8(vcpu, val & 0xfUL);
3676                 break;
3677         default:
3678                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3679         }
3680 }
3681
3682 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
3683 {
3684         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
3685         int j, nent = vcpu->arch.cpuid_nent;
3686
3687         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
3688         /* when no next entry is found, the current entry[i] is reselected */
3689         for (j = i + 1; ; j = (j + 1) % nent) {
3690                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
3691                 if (ej->function == e->function) {
3692                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
3693                         return j;
3694                 }
3695         }
3696         return 0; /* silence gcc, even though control never reaches here */
3697 }
3698
3699 /* find an entry with matching function, matching index (if needed), and that
3700  * should be read next (if it's stateful) */
3701 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
3702         u32 function, u32 index)
3703 {
3704         if (e->function != function)
3705                 return 0;
3706         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
3707                 return 0;
3708         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
3709             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
3710                 return 0;
3711         return 1;
3712 }
3713
3714 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3715                                               u32 function, u32 index)
3716 {
3717         int i;
3718         struct kvm_cpuid_entry2 *best = NULL;
3719
3720         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
3721                 struct kvm_cpuid_entry2 *e;
3722
3723                 e = &vcpu->arch.cpuid_entries[i];
3724                 if (is_matching_cpuid_entry(e, function, index)) {
3725                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
3726                                 move_to_next_stateful_cpuid_entry(vcpu, i);
3727                         best = e;
3728                         break;
3729                 }
3730                 /*
3731                  * Both basic or both extended?
3732                  */
3733                 if (((e->function ^ function) & 0x80000000) == 0)
3734                         if (!best || e->function > best->function)
3735                                 best = e;
3736         }
3737         return best;
3738 }
3739 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
3740
3741 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3742 {
3743         struct kvm_cpuid_entry2 *best;
3744
3745         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
3746         if (best)
3747                 return best->eax & 0xff;
3748         return 36;
3749 }
3750
3751 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3752 {
3753         u32 function, index;
3754         struct kvm_cpuid_entry2 *best;
3755
3756         function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3757         index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3758         kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3759         kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3760         kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3761         kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3762         best = kvm_find_cpuid_entry(vcpu, function, index);
3763         if (best) {
3764                 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
3765                 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
3766                 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
3767                 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
3768         }
3769         kvm_x86_ops->skip_emulated_instruction(vcpu);
3770         trace_kvm_cpuid(function,
3771                         kvm_register_read(vcpu, VCPU_REGS_RAX),
3772                         kvm_register_read(vcpu, VCPU_REGS_RBX),
3773                         kvm_register_read(vcpu, VCPU_REGS_RCX),
3774                         kvm_register_read(vcpu, VCPU_REGS_RDX));
3775 }
3776 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
3777
3778 /*
3779  * Check if userspace requested an interrupt window, and that the
3780  * interrupt window is open.
3781  *
3782  * No need to exit to userspace if we already have an interrupt queued.
3783  */
3784 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
3785 {
3786         return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
3787                 vcpu->run->request_interrupt_window &&
3788                 kvm_arch_interrupt_allowed(vcpu));
3789 }
3790
3791 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
3792 {
3793         struct kvm_run *kvm_run = vcpu->run;
3794
3795         kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
3796         kvm_run->cr8 = kvm_get_cr8(vcpu);
3797         kvm_run->apic_base = kvm_get_apic_base(vcpu);
3798         if (irqchip_in_kernel(vcpu->kvm))
3799                 kvm_run->ready_for_interrupt_injection = 1;
3800         else
3801                 kvm_run->ready_for_interrupt_injection =
3802                         kvm_arch_interrupt_allowed(vcpu) &&
3803                         !kvm_cpu_has_interrupt(vcpu) &&
3804                         !kvm_event_needs_reinjection(vcpu);
3805 }
3806
3807 static void vapic_enter(struct kvm_vcpu *vcpu)
3808 {
3809         struct kvm_lapic *apic = vcpu->arch.apic;
3810         struct page *page;
3811
3812         if (!apic || !apic->vapic_addr)
3813                 return;
3814
3815         page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
3816
3817         vcpu->arch.apic->vapic_page = page;
3818 }
3819
3820 static void vapic_exit(struct kvm_vcpu *vcpu)
3821 {
3822         struct kvm_lapic *apic = vcpu->arch.apic;
3823
3824         if (!apic || !apic->vapic_addr)
3825                 return;
3826
3827         down_read(&vcpu->kvm->slots_lock);
3828         kvm_release_page_dirty(apic->vapic_page);
3829         mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
3830         up_read(&vcpu->kvm->slots_lock);
3831 }
3832
3833 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3834 {
3835         int max_irr, tpr;
3836
3837         if (!kvm_x86_ops->update_cr8_intercept)
3838                 return;
3839
3840         if (!vcpu->arch.apic)
3841                 return;
3842
3843         if (!vcpu->arch.apic->vapic_addr)
3844                 max_irr = kvm_lapic_find_highest_irr(vcpu);
3845         else
3846                 max_irr = -1;
3847
3848         if (max_irr != -1)
3849                 max_irr >>= 4;
3850
3851         tpr = kvm_lapic_get_cr8(vcpu);
3852
3853         kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3854 }
3855
3856 static void inject_pending_event(struct kvm_vcpu *vcpu)
3857 {
3858         /* try to reinject previous events if any */
3859         if (vcpu->arch.exception.pending) {
3860                 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
3861                                           vcpu->arch.exception.has_error_code,
3862                                           vcpu->arch.exception.error_code);
3863                 return;
3864         }
3865
3866         if (vcpu->arch.nmi_injected) {
3867                 kvm_x86_ops->set_nmi(vcpu);
3868                 return;
3869         }
3870
3871         if (vcpu->arch.interrupt.pending) {
3872                 kvm_x86_ops->set_irq(vcpu);
3873                 return;
3874         }
3875
3876         /* try to inject new event if pending */
3877         if (vcpu->arch.nmi_pending) {
3878                 if (kvm_x86_ops->nmi_allowed(vcpu)) {
3879                         vcpu->arch.nmi_pending = false;
3880                         vcpu->arch.nmi_injected = true;
3881                         kvm_x86_ops->set_nmi(vcpu);
3882                 }
3883         } else if (kvm_cpu_has_interrupt(vcpu)) {
3884                 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
3885                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
3886                                             false);
3887                         kvm_x86_ops->set_irq(vcpu);
3888                 }
3889         }
3890 }
3891
3892 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
3893 {
3894         int r;
3895         bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
3896                 vcpu->run->request_interrupt_window;
3897
3898         if (vcpu->requests)
3899                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3900                         kvm_mmu_unload(vcpu);
3901
3902         r = kvm_mmu_reload(vcpu);
3903         if (unlikely(r))
3904                 goto out;
3905
3906         if (vcpu->requests) {
3907                 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
3908                         __kvm_migrate_timers(vcpu);
3909                 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3910                         kvm_write_guest_time(vcpu);
3911                 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3912                         kvm_mmu_sync_roots(vcpu);
3913                 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3914                         kvm_x86_ops->tlb_flush(vcpu);
3915                 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3916                                        &vcpu->requests)) {
3917                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
3918                         r = 0;
3919                         goto out;
3920                 }
3921                 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3922                         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
3923                         r = 0;
3924                         goto out;
3925                 }
3926         }
3927
3928         preempt_disable();
3929
3930         kvm_x86_ops->prepare_guest_switch(vcpu);
3931         kvm_load_guest_fpu(vcpu);
3932
3933         local_irq_disable();
3934
3935         clear_bit(KVM_REQ_KICK, &vcpu->requests);
3936         smp_mb__after_clear_bit();
3937
3938         if (vcpu->requests || need_resched() || signal_pending(current)) {
3939                 set_bit(KVM_REQ_KICK, &vcpu->requests);
3940                 local_irq_enable();
3941                 preempt_enable();
3942                 r = 1;
3943                 goto out;
3944         }
3945
3946         inject_pending_event(vcpu);
3947
3948         /* enable NMI/IRQ window open exits if needed */
3949         if (vcpu->arch.nmi_pending)
3950                 kvm_x86_ops->enable_nmi_window(vcpu);
3951         else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3952                 kvm_x86_ops->enable_irq_window(vcpu);
3953
3954         if (kvm_lapic_enabled(vcpu)) {
3955                 update_cr8_intercept(vcpu);
3956                 kvm_lapic_sync_to_vapic(vcpu);
3957         }
3958
3959         up_read(&vcpu->kvm->slots_lock);
3960
3961         kvm_guest_enter();
3962
3963         if (unlikely(vcpu->arch.switch_db_regs)) {
3964                 set_debugreg(0, 7);
3965                 set_debugreg(vcpu->arch.eff_db[0], 0);
3966                 set_debugreg(vcpu->arch.eff_db[1], 1);
3967                 set_debugreg(vcpu->arch.eff_db[2], 2);
3968                 set_debugreg(vcpu->arch.eff_db[3], 3);
3969         }
3970
3971         trace_kvm_entry(vcpu->vcpu_id);
3972         kvm_x86_ops->run(vcpu);
3973
3974         /*
3975          * If the guest has used debug registers, at least dr7
3976          * will be disabled while returning to the host.
3977          * If we don't have active breakpoints in the host, we don't
3978          * care about the messed up debug address registers. But if
3979          * we have some of them active, restore the old state.
3980          */
3981         if (hw_breakpoint_active())
3982                 hw_breakpoint_restore();
3983
3984         set_bit(KVM_REQ_KICK, &vcpu->requests);
3985         local_irq_enable();
3986
3987         ++vcpu->stat.exits;
3988
3989         /*
3990          * We must have an instruction between local_irq_enable() and
3991          * kvm_guest_exit(), so the timer interrupt isn't delayed by
3992          * the interrupt shadow.  The stat.exits increment will do nicely.
3993          * But we need to prevent reordering, hence this barrier():
3994          */
3995         barrier();
3996
3997         kvm_guest_exit();
3998
3999         preempt_enable();
4000
4001         down_read(&vcpu->kvm->slots_lock);
4002
4003         /*
4004          * Profile KVM exit RIPs:
4005          */
4006         if (unlikely(prof_on == KVM_PROFILING)) {
4007                 unsigned long rip = kvm_rip_read(vcpu);
4008                 profile_hit(KVM_PROFILING, (void *)rip);
4009         }
4010
4011
4012         kvm_lapic_sync_from_vapic(vcpu);
4013
4014         r = kvm_x86_ops->handle_exit(vcpu);
4015 out:
4016         return r;
4017 }
4018
4019
4020 static int __vcpu_run(struct kvm_vcpu *vcpu)
4021 {
4022         int r;
4023
4024         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
4025                 pr_debug("vcpu %d received sipi with vector # %x\n",
4026                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
4027                 kvm_lapic_reset(vcpu);
4028                 r = kvm_arch_vcpu_reset(vcpu);
4029                 if (r)
4030                         return r;
4031                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4032         }
4033
4034         down_read(&vcpu->kvm->slots_lock);
4035         vapic_enter(vcpu);
4036
4037         r = 1;
4038         while (r > 0) {
4039                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
4040                         r = vcpu_enter_guest(vcpu);
4041                 else {
4042                         up_read(&vcpu->kvm->slots_lock);
4043                         kvm_vcpu_block(vcpu);
4044                         down_read(&vcpu->kvm->slots_lock);
4045                         if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
4046                         {
4047                                 switch(vcpu->arch.mp_state) {
4048                                 case KVM_MP_STATE_HALTED:
4049                                         vcpu->arch.mp_state =
4050                                                 KVM_MP_STATE_RUNNABLE;
4051                                 case KVM_MP_STATE_RUNNABLE:
4052                                         break;
4053                                 case KVM_MP_STATE_SIPI_RECEIVED:
4054                                 default:
4055                                         r = -EINTR;
4056                                         break;
4057                                 }
4058                         }
4059                 }
4060
4061                 if (r <= 0)
4062                         break;
4063
4064                 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
4065                 if (kvm_cpu_has_pending_timer(vcpu))
4066                         kvm_inject_pending_timer_irqs(vcpu);
4067
4068                 if (dm_request_for_irq_injection(vcpu)) {
4069                         r = -EINTR;
4070                         vcpu->run->exit_reason = KVM_EXIT_INTR;
4071                         ++vcpu->stat.request_irq_exits;
4072                 }
4073                 if (signal_pending(current)) {
4074                         r = -EINTR;
4075                         vcpu->run->exit_reason = KVM_EXIT_INTR;
4076                         ++vcpu->stat.signal_exits;
4077                 }
4078                 if (need_resched()) {
4079                         up_read(&vcpu->kvm->slots_lock);
4080                         kvm_resched(vcpu);
4081                         down_read(&vcpu->kvm->slots_lock);
4082                 }
4083         }
4084
4085         up_read(&vcpu->kvm->slots_lock);
4086         post_kvm_run_save(vcpu);
4087
4088         vapic_exit(vcpu);
4089
4090         return r;
4091 }
4092
4093 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4094 {
4095         int r;
4096         sigset_t sigsaved;
4097
4098         vcpu_load(vcpu);
4099
4100         if (vcpu->sigset_active)
4101                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
4102
4103         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
4104                 kvm_vcpu_block(vcpu);
4105                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
4106                 r = -EAGAIN;
4107                 goto out;
4108         }
4109
4110         /* re-sync apic's tpr */
4111         if (!irqchip_in_kernel(vcpu->kvm))
4112                 kvm_set_cr8(vcpu, kvm_run->cr8);
4113
4114         if (vcpu->arch.pio.cur_count) {
4115                 r = complete_pio(vcpu);
4116                 if (r)
4117                         goto out;
4118         }
4119         if (vcpu->mmio_needed) {
4120                 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
4121                 vcpu->mmio_read_completed = 1;
4122                 vcpu->mmio_needed = 0;
4123
4124                 down_read(&vcpu->kvm->slots_lock);
4125                 r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
4126                                         EMULTYPE_NO_DECODE);
4127                 up_read(&vcpu->kvm->slots_lock);
4128                 if (r == EMULATE_DO_MMIO) {
4129                         /*
4130                          * Read-modify-write.  Back to userspace.
4131                          */
4132                         r = 0;
4133                         goto out;
4134                 }
4135         }
4136         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
4137                 kvm_register_write(vcpu, VCPU_REGS_RAX,
4138                                      kvm_run->hypercall.ret);
4139
4140         r = __vcpu_run(vcpu);
4141
4142 out:
4143         if (vcpu->sigset_active)
4144                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
4145
4146         vcpu_put(vcpu);
4147         return r;
4148 }
4149
4150 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4151 {
4152         vcpu_load(vcpu);
4153
4154         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4155         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4156         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4157         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4158         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4159         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
4160         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4161         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4162 #ifdef CONFIG_X86_64
4163         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
4164         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
4165         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
4166         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
4167         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
4168         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
4169         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
4170         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
4171 #endif
4172
4173         regs->rip = kvm_rip_read(vcpu);
4174         regs->rflags = kvm_get_rflags(vcpu);
4175
4176         vcpu_put(vcpu);
4177
4178         return 0;
4179 }
4180
4181 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4182 {
4183         vcpu_load(vcpu);
4184
4185         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
4186         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
4187         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
4188         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
4189         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
4190         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
4191         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
4192         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
4193 #ifdef CONFIG_X86_64
4194         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
4195         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
4196         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
4197         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
4198         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
4199         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
4200         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
4201         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
4202 #endif
4203
4204         kvm_rip_write(vcpu, regs->rip);
4205         kvm_set_rflags(vcpu, regs->rflags);
4206
4207         vcpu->arch.exception.pending = false;
4208
4209         vcpu_put(vcpu);
4210
4211         return 0;
4212 }
4213
4214 void kvm_get_segment(struct kvm_vcpu *vcpu,
4215                      struct kvm_segment *var, int seg)
4216 {
4217         kvm_x86_ops->get_segment(vcpu, var, seg);
4218 }
4219
4220 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
4221 {
4222         struct kvm_segment cs;
4223
4224         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
4225         *db = cs.db;
4226         *l = cs.l;
4227 }
4228 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
4229
4230 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4231                                   struct kvm_sregs *sregs)
4232 {
4233         struct descriptor_table dt;
4234
4235         vcpu_load(vcpu);
4236
4237         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4238         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4239         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4240         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4241         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4242         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
4243
4244         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4245         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
4246
4247         kvm_x86_ops->get_idt(vcpu, &dt);
4248         sregs->idt.limit = dt.limit;
4249         sregs->idt.base = dt.base;
4250         kvm_x86_ops->get_gdt(vcpu, &dt);
4251         sregs->gdt.limit = dt.limit;
4252         sregs->gdt.base = dt.base;
4253
4254         sregs->cr0 = vcpu->arch.cr0;
4255         sregs->cr2 = vcpu->arch.cr2;
4256         sregs->cr3 = vcpu->arch.cr3;
4257         sregs->cr4 = kvm_read_cr4(vcpu);
4258         sregs->cr8 = kvm_get_cr8(vcpu);
4259         sregs->efer = vcpu->arch.shadow_efer;
4260         sregs->apic_base = kvm_get_apic_base(vcpu);
4261
4262         memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
4263
4264         if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
4265                 set_bit(vcpu->arch.interrupt.nr,
4266                         (unsigned long *)sregs->interrupt_bitmap);
4267
4268         vcpu_put(vcpu);
4269
4270         return 0;
4271 }
4272
4273 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4274                                     struct kvm_mp_state *mp_state)
4275 {
4276         vcpu_load(vcpu);
4277         mp_state->mp_state = vcpu->arch.mp_state;
4278         vcpu_put(vcpu);
4279         return 0;
4280 }
4281
4282 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4283                                     struct kvm_mp_state *mp_state)
4284 {
4285         vcpu_load(vcpu);
4286         vcpu->arch.mp_state = mp_state->mp_state;
4287         vcpu_put(vcpu);
4288         return 0;
4289 }
4290
4291 static void kvm_set_segment(struct kvm_vcpu *vcpu,
4292                         struct kvm_segment *var, int seg)
4293 {
4294         kvm_x86_ops->set_segment(vcpu, var, seg);
4295 }
4296
4297 static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
4298                                    struct kvm_segment *kvm_desct)
4299 {
4300         kvm_desct->base = get_desc_base(seg_desc);
4301         kvm_desct->limit = get_desc_limit(seg_desc);
4302         if (seg_desc->g) {
4303                 kvm_desct->limit <<= 12;
4304                 kvm_desct->limit |= 0xfff;
4305         }
4306         kvm_desct->selector = selector;
4307         kvm_desct->type = seg_desc->type;
4308         kvm_desct->present = seg_desc->p;
4309         kvm_desct->dpl = seg_desc->dpl;
4310         kvm_desct->db = seg_desc->d;
4311         kvm_desct->s = seg_desc->s;
4312         kvm_desct->l = seg_desc->l;
4313         kvm_desct->g = seg_desc->g;
4314         kvm_desct->avl = seg_desc->avl;
4315         if (!selector)
4316                 kvm_desct->unusable = 1;
4317         else
4318                 kvm_desct->unusable = 0;
4319         kvm_desct->padding = 0;
4320 }
4321
4322 static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4323                                           u16 selector,
4324                                           struct descriptor_table *dtable)
4325 {
4326         if (selector & 1 << 2) {
4327                 struct kvm_segment kvm_seg;
4328
4329                 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
4330
4331                 if (kvm_seg.unusable)
4332                         dtable->limit = 0;
4333                 else
4334                         dtable->limit = kvm_seg.limit;
4335                 dtable->base = kvm_seg.base;
4336         }
4337         else
4338                 kvm_x86_ops->get_gdt(vcpu, dtable);
4339 }
4340
4341 /* allowed just for 8 bytes segments */
4342 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4343                                          struct desc_struct *seg_desc)
4344 {
4345         struct descriptor_table dtable;
4346         u16 index = selector >> 3;
4347
4348         get_segment_descriptor_dtable(vcpu, selector, &dtable);
4349
4350         if (dtable.limit < index * 8 + 7) {
4351                 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
4352                 return 1;
4353         }
4354         return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
4355 }
4356
4357 /* allowed just for 8 bytes segments */
4358 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4359                                          struct desc_struct *seg_desc)
4360 {
4361         struct descriptor_table dtable;
4362         u16 index = selector >> 3;
4363
4364         get_segment_descriptor_dtable(vcpu, selector, &dtable);
4365
4366         if (dtable.limit < index * 8 + 7)
4367                 return 1;
4368         return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
4369 }
4370
4371 static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
4372                              struct desc_struct *seg_desc)
4373 {
4374         u32 base_addr = get_desc_base(seg_desc);
4375
4376         return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
4377 }
4378
4379 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
4380 {
4381         struct kvm_segment kvm_seg;
4382
4383         kvm_get_segment(vcpu, &kvm_seg, seg);
4384         return kvm_seg.selector;
4385 }
4386
4387 static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
4388                                                 u16 selector,
4389                                                 struct kvm_segment *kvm_seg)
4390 {
4391         struct desc_struct seg_desc;
4392
4393         if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
4394                 return 1;
4395         seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
4396         return 0;
4397 }
4398
4399 static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
4400 {
4401         struct kvm_segment segvar = {
4402                 .base = selector << 4,
4403                 .limit = 0xffff,
4404                 .selector = selector,
4405                 .type = 3,
4406                 .present = 1,
4407                 .dpl = 3,
4408                 .db = 0,
4409                 .s = 1,
4410                 .l = 0,
4411                 .g = 0,
4412                 .avl = 0,
4413                 .unusable = 0,
4414         };
4415         kvm_x86_ops->set_segment(vcpu, &segvar, seg);
4416         return 0;
4417 }
4418
4419 static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
4420 {
4421         return (seg != VCPU_SREG_LDTR) &&
4422                 (seg != VCPU_SREG_TR) &&
4423                 (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
4424 }
4425
4426 static void kvm_check_segment_descriptor(struct kvm_vcpu *vcpu, int seg,
4427                                          u16 selector)
4428 {
4429         /* NULL selector is not valid for CS and SS */
4430         if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
4431                 if (!selector)
4432                         kvm_queue_exception_e(vcpu, TS_VECTOR, selector >> 3);
4433 }
4434
4435 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4436                                 int type_bits, int seg)
4437 {
4438         struct kvm_segment kvm_seg;
4439
4440         if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE))
4441                 return kvm_load_realmode_segment(vcpu, selector, seg);
4442         if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
4443                 return 1;
4444
4445         kvm_check_segment_descriptor(vcpu, seg, selector);
4446         kvm_seg.type |= type_bits;
4447
4448         if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
4449             seg != VCPU_SREG_LDTR)
4450                 if (!kvm_seg.s)
4451                         kvm_seg.unusable = 1;
4452
4453         kvm_set_segment(vcpu, &kvm_seg, seg);
4454         return 0;
4455 }
4456
4457 static void save_state_to_tss32(struct kvm_vcpu *vcpu,
4458                                 struct tss_segment_32 *tss)
4459 {
4460         tss->cr3 = vcpu->arch.cr3;
4461         tss->eip = kvm_rip_read(vcpu);
4462         tss->eflags = kvm_get_rflags(vcpu);
4463         tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4464         tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4465         tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4466         tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4467         tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4468         tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4469         tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4470         tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
4471         tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4472         tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4473         tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4474         tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4475         tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
4476         tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
4477         tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
4478 }
4479
4480 static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4481                                   struct tss_segment_32 *tss)
4482 {
4483         kvm_set_cr3(vcpu, tss->cr3);
4484
4485         kvm_rip_write(vcpu, tss->eip);
4486         kvm_set_rflags(vcpu, tss->eflags | 2);
4487
4488         kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
4489         kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
4490         kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
4491         kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
4492         kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
4493         kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
4494         kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
4495         kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
4496
4497         if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
4498                 return 1;
4499
4500         if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
4501                 return 1;
4502
4503         if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
4504                 return 1;
4505
4506         if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
4507                 return 1;
4508
4509         if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
4510                 return 1;
4511
4512         if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
4513                 return 1;
4514
4515         if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
4516                 return 1;
4517         return 0;
4518 }
4519
4520 static void save_state_to_tss16(struct kvm_vcpu *vcpu,
4521                                 struct tss_segment_16 *tss)
4522 {
4523         tss->ip = kvm_rip_read(vcpu);
4524         tss->flag = kvm_get_rflags(vcpu);
4525         tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4526         tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4527         tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4528         tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4529         tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4530         tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4531         tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
4532         tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
4533
4534         tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4535         tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4536         tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4537         tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4538         tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
4539 }
4540
4541 static int load_state_from_tss16(struct kvm_vcpu *vcpu,
4542                                  struct tss_segment_16 *tss)
4543 {
4544         kvm_rip_write(vcpu, tss->ip);
4545         kvm_set_rflags(vcpu, tss->flag | 2);
4546         kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
4547         kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
4548         kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
4549         kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
4550         kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
4551         kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
4552         kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
4553         kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
4554
4555         if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
4556                 return 1;
4557
4558         if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
4559                 return 1;
4560
4561         if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
4562                 return 1;
4563
4564         if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
4565                 return 1;
4566
4567         if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
4568                 return 1;
4569         return 0;
4570 }
4571
4572 static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
4573                               u16 old_tss_sel, u32 old_tss_base,
4574                               struct desc_struct *nseg_desc)
4575 {
4576         struct tss_segment_16 tss_segment_16;
4577         int ret = 0;
4578
4579         if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4580                            sizeof tss_segment_16))
4581                 goto out;
4582
4583         save_state_to_tss16(vcpu, &tss_segment_16);
4584
4585         if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4586                             sizeof tss_segment_16))
4587                 goto out;
4588
4589         if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4590                            &tss_segment_16, sizeof tss_segment_16))
4591                 goto out;
4592
4593         if (old_tss_sel != 0xffff) {
4594                 tss_segment_16.prev_task_link = old_tss_sel;
4595
4596                 if (kvm_write_guest(vcpu->kvm,
4597                                     get_tss_base_addr(vcpu, nseg_desc),
4598                                     &tss_segment_16.prev_task_link,
4599                                     sizeof tss_segment_16.prev_task_link))
4600                         goto out;
4601         }
4602
4603         if (load_state_from_tss16(vcpu, &tss_segment_16))
4604                 goto out;
4605
4606         ret = 1;
4607 out:
4608         return ret;
4609 }
4610
4611 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
4612                        u16 old_tss_sel, u32 old_tss_base,
4613                        struct desc_struct *nseg_desc)
4614 {
4615         struct tss_segment_32 tss_segment_32;
4616         int ret = 0;
4617
4618         if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4619                            sizeof tss_segment_32))
4620                 goto out;
4621
4622         save_state_to_tss32(vcpu, &tss_segment_32);
4623
4624         if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4625                             sizeof tss_segment_32))
4626                 goto out;
4627
4628         if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4629                            &tss_segment_32, sizeof tss_segment_32))
4630                 goto out;
4631
4632         if (old_tss_sel != 0xffff) {
4633                 tss_segment_32.prev_task_link = old_tss_sel;
4634
4635                 if (kvm_write_guest(vcpu->kvm,
4636                                     get_tss_base_addr(vcpu, nseg_desc),
4637                                     &tss_segment_32.prev_task_link,
4638                                     sizeof tss_segment_32.prev_task_link))
4639                         goto out;
4640         }
4641
4642         if (load_state_from_tss32(vcpu, &tss_segment_32))
4643                 goto out;
4644
4645         ret = 1;
4646 out:
4647         return ret;
4648 }
4649
4650 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4651 {
4652         struct kvm_segment tr_seg;
4653         struct desc_struct cseg_desc;
4654         struct desc_struct nseg_desc;
4655         int ret = 0;
4656         u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
4657         u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
4658
4659         old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
4660
4661         /* FIXME: Handle errors. Failure to read either TSS or their
4662          * descriptors should generate a pagefault.
4663          */
4664         if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
4665                 goto out;
4666
4667         if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
4668                 goto out;
4669
4670         if (reason != TASK_SWITCH_IRET) {
4671                 int cpl;
4672
4673                 cpl = kvm_x86_ops->get_cpl(vcpu);
4674                 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
4675                         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4676                         return 1;
4677                 }
4678         }
4679
4680         if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
4681                 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
4682                 return 1;
4683         }
4684
4685         if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
4686                 cseg_desc.type &= ~(1 << 1); //clear the B flag
4687                 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
4688         }
4689
4690         if (reason == TASK_SWITCH_IRET) {
4691                 u32 eflags = kvm_get_rflags(vcpu);
4692                 kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
4693         }
4694
4695         /* set back link to prev task only if NT bit is set in eflags
4696            note that old_tss_sel is not used afetr this point */
4697         if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4698                 old_tss_sel = 0xffff;
4699
4700         if (nseg_desc.type & 8)
4701                 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
4702                                          old_tss_base, &nseg_desc);
4703         else
4704                 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
4705                                          old_tss_base, &nseg_desc);
4706
4707         if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
4708                 u32 eflags = kvm_get_rflags(vcpu);
4709                 kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT);
4710         }
4711
4712         if (reason != TASK_SWITCH_IRET) {
4713                 nseg_desc.type |= (1 << 1);
4714                 save_guest_segment_descriptor(vcpu, tss_selector,
4715                                               &nseg_desc);
4716         }
4717
4718         kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
4719         seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4720         tr_seg.type = 11;
4721         kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
4722 out:
4723         return ret;
4724 }
4725 EXPORT_SYMBOL_GPL(kvm_task_switch);
4726
4727 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4728                                   struct kvm_sregs *sregs)
4729 {
4730         int mmu_reset_needed = 0;
4731         int pending_vec, max_bits;
4732         struct descriptor_table dt;
4733
4734         vcpu_load(vcpu);
4735
4736         dt.limit = sregs->idt.limit;
4737         dt.base = sregs->idt.base;
4738         kvm_x86_ops->set_idt(vcpu, &dt);
4739         dt.limit = sregs->gdt.limit;
4740         dt.base = sregs->gdt.base;
4741         kvm_x86_ops->set_gdt(vcpu, &dt);
4742
4743         vcpu->arch.cr2 = sregs->cr2;
4744         mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
4745         vcpu->arch.cr3 = sregs->cr3;
4746
4747         kvm_set_cr8(vcpu, sregs->cr8);
4748
4749         mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
4750         kvm_x86_ops->set_efer(vcpu, sregs->efer);
4751         kvm_set_apic_base(vcpu, sregs->apic_base);
4752
4753         mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
4754         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
4755         vcpu->arch.cr0 = sregs->cr0;
4756
4757         mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
4758         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4759         if (!is_long_mode(vcpu) && is_pae(vcpu)) {
4760                 load_pdptrs(vcpu, vcpu->arch.cr3);
4761                 mmu_reset_needed = 1;
4762         }
4763
4764         if (mmu_reset_needed)
4765                 kvm_mmu_reset_context(vcpu);
4766
4767         max_bits = (sizeof sregs->interrupt_bitmap) << 3;
4768         pending_vec = find_first_bit(
4769                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
4770         if (pending_vec < max_bits) {
4771                 kvm_queue_interrupt(vcpu, pending_vec, false);
4772                 pr_debug("Set back pending irq %d\n", pending_vec);
4773                 if (irqchip_in_kernel(vcpu->kvm))
4774                         kvm_pic_clear_isr_ack(vcpu->kvm);
4775         }
4776
4777         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4778         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4779         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4780         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4781         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4782         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
4783
4784         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4785         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
4786
4787         update_cr8_intercept(vcpu);
4788
4789         /* Older userspace won't unhalt the vcpu on reset. */
4790         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
4791             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4792             !(vcpu->arch.cr0 & X86_CR0_PE))
4793                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4794
4795         vcpu_put(vcpu);
4796
4797         return 0;
4798 }
4799
4800 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4801                                         struct kvm_guest_debug *dbg)
4802 {
4803         unsigned long rflags;
4804         int i, r;
4805
4806         vcpu_load(vcpu);
4807
4808         if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
4809                 r = -EBUSY;
4810                 if (vcpu->arch.exception.pending)
4811                         goto unlock_out;
4812                 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4813                         kvm_queue_exception(vcpu, DB_VECTOR);
4814                 else
4815                         kvm_queue_exception(vcpu, BP_VECTOR);
4816         }
4817
4818         /*
4819          * Read rflags as long as potentially injected trace flags are still
4820          * filtered out.
4821          */
4822         rflags = kvm_get_rflags(vcpu);
4823
4824         vcpu->guest_debug = dbg->control;
4825         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
4826                 vcpu->guest_debug = 0;
4827
4828         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
4829                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4830                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4831                 vcpu->arch.switch_db_regs =
4832                         (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4833         } else {
4834                 for (i = 0; i < KVM_NR_DB_REGS; i++)
4835                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4836                 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4837         }
4838
4839         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
4840                 vcpu->arch.singlestep_cs =
4841                         get_segment_selector(vcpu, VCPU_SREG_CS);
4842                 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu);
4843         }
4844
4845         /*
4846          * Trigger an rflags update that will inject or remove the trace
4847          * flags.
4848          */
4849         kvm_set_rflags(vcpu, rflags);
4850
4851         kvm_x86_ops->set_guest_debug(vcpu, dbg);
4852
4853         r = 0;
4854
4855 unlock_out:
4856         vcpu_put(vcpu);
4857
4858         return r;
4859 }
4860
4861 /*
4862  * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
4863  * we have asm/x86/processor.h
4864  */
4865 struct fxsave {
4866         u16     cwd;
4867         u16     swd;
4868         u16     twd;
4869         u16     fop;
4870         u64     rip;
4871         u64     rdp;
4872         u32     mxcsr;
4873         u32     mxcsr_mask;
4874         u32     st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
4875 #ifdef CONFIG_X86_64
4876         u32     xmm_space[64];  /* 16*16 bytes for each XMM-reg = 256 bytes */
4877 #else
4878         u32     xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
4879 #endif
4880 };
4881
4882 /*
4883  * Translate a guest virtual address to a guest physical address.
4884  */
4885 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4886                                     struct kvm_translation *tr)
4887 {
4888         unsigned long vaddr = tr->linear_address;
4889         gpa_t gpa;
4890
4891         vcpu_load(vcpu);
4892         down_read(&vcpu->kvm->slots_lock);
4893         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
4894         up_read(&vcpu->kvm->slots_lock);
4895         tr->physical_address = gpa;
4896         tr->valid = gpa != UNMAPPED_GVA;
4897         tr->writeable = 1;
4898         tr->usermode = 0;
4899         vcpu_put(vcpu);
4900
4901         return 0;
4902 }
4903
4904 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4905 {
4906         struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
4907
4908         vcpu_load(vcpu);
4909
4910         memcpy(fpu->fpr, fxsave->st_space, 128);
4911         fpu->fcw = fxsave->cwd;
4912         fpu->fsw = fxsave->swd;
4913         fpu->ftwx = fxsave->twd;
4914         fpu->last_opcode = fxsave->fop;
4915         fpu->last_ip = fxsave->rip;
4916         fpu->last_dp = fxsave->rdp;
4917         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
4918
4919         vcpu_put(vcpu);
4920
4921         return 0;
4922 }
4923
4924 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4925 {
4926         struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
4927
4928         vcpu_load(vcpu);
4929
4930         memcpy(fxsave->st_space, fpu->fpr, 128);
4931         fxsave->cwd = fpu->fcw;
4932         fxsave->swd = fpu->fsw;
4933         fxsave->twd = fpu->ftwx;
4934         fxsave->fop = fpu->last_opcode;
4935         fxsave->rip = fpu->last_ip;
4936         fxsave->rdp = fpu->last_dp;
4937         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4938
4939         vcpu_put(vcpu);
4940
4941         return 0;
4942 }
4943
4944 void fx_init(struct kvm_vcpu *vcpu)
4945 {
4946         unsigned after_mxcsr_mask;
4947
4948         /*
4949          * Touch the fpu the first time in non atomic context as if
4950          * this is the first fpu instruction the exception handler
4951          * will fire before the instruction returns and it'll have to
4952          * allocate ram with GFP_KERNEL.
4953          */
4954         if (!used_math())
4955                 kvm_fx_save(&vcpu->arch.host_fx_image);
4956
4957         /* Initialize guest FPU by resetting ours and saving into guest's */
4958         preempt_disable();
4959         kvm_fx_save(&vcpu->arch.host_fx_image);
4960         kvm_fx_finit();
4961         kvm_fx_save(&vcpu->arch.guest_fx_image);
4962         kvm_fx_restore(&vcpu->arch.host_fx_image);
4963         preempt_enable();
4964
4965         vcpu->arch.cr0 |= X86_CR0_ET;
4966         after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
4967         vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4968         memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
4969                0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4970 }
4971 EXPORT_SYMBOL_GPL(fx_init);
4972
4973 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4974 {
4975         if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4976                 return;
4977
4978         vcpu->guest_fpu_loaded = 1;
4979         kvm_fx_save(&vcpu->arch.host_fx_image);
4980         kvm_fx_restore(&vcpu->arch.guest_fx_image);
4981 }
4982 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4983
4984 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4985 {
4986         if (!vcpu->guest_fpu_loaded)
4987                 return;
4988
4989         vcpu->guest_fpu_loaded = 0;
4990         kvm_fx_save(&vcpu->arch.guest_fx_image);
4991         kvm_fx_restore(&vcpu->arch.host_fx_image);
4992         ++vcpu->stat.fpu_reload;
4993 }
4994 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
4995
4996 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4997 {
4998         if (vcpu->arch.time_page) {
4999                 kvm_release_page_dirty(vcpu->arch.time_page);
5000                 vcpu->arch.time_page = NULL;
5001         }
5002
5003         kvm_x86_ops->vcpu_free(vcpu);
5004 }
5005
5006 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5007                                                 unsigned int id)
5008 {
5009         return kvm_x86_ops->vcpu_create(kvm, id);
5010 }
5011
5012 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5013 {
5014         int r;
5015
5016         /* We do fxsave: this must be aligned. */
5017         BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
5018
5019         vcpu->arch.mtrr_state.have_fixed = 1;
5020         vcpu_load(vcpu);
5021         r = kvm_arch_vcpu_reset(vcpu);
5022         if (r == 0)
5023                 r = kvm_mmu_setup(vcpu);
5024         vcpu_put(vcpu);
5025         if (r < 0)
5026                 goto free_vcpu;
5027
5028         return 0;
5029 free_vcpu:
5030         kvm_x86_ops->vcpu_free(vcpu);
5031         return r;
5032 }
5033
5034 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
5035 {
5036         vcpu_load(vcpu);
5037         kvm_mmu_unload(vcpu);
5038         vcpu_put(vcpu);
5039
5040         kvm_x86_ops->vcpu_free(vcpu);
5041 }
5042
5043 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5044 {
5045         vcpu->arch.nmi_pending = false;
5046         vcpu->arch.nmi_injected = false;
5047
5048         vcpu->arch.switch_db_regs = 0;
5049         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5050         vcpu->arch.dr6 = DR6_FIXED_1;
5051         vcpu->arch.dr7 = DR7_FIXED_1;
5052
5053         return kvm_x86_ops->vcpu_reset(vcpu);
5054 }
5055
5056 int kvm_arch_hardware_enable(void *garbage)
5057 {
5058         /*
5059          * Since this may be called from a hotplug notifcation,
5060          * we can't get the CPU frequency directly.
5061          */
5062         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5063                 int cpu = raw_smp_processor_id();
5064                 per_cpu(cpu_tsc_khz, cpu) = 0;
5065         }
5066
5067         kvm_shared_msr_cpu_online();
5068
5069         return kvm_x86_ops->hardware_enable(garbage);
5070 }
5071
5072 void kvm_arch_hardware_disable(void *garbage)
5073 {
5074         kvm_x86_ops->hardware_disable(garbage);
5075         drop_user_return_notifiers(garbage);
5076 }
5077
5078 int kvm_arch_hardware_setup(void)
5079 {
5080         return kvm_x86_ops->hardware_setup();
5081 }
5082
5083 void kvm_arch_hardware_unsetup(void)
5084 {
5085         kvm_x86_ops->hardware_unsetup();
5086 }
5087
5088 void kvm_arch_check_processor_compat(void *rtn)
5089 {
5090         kvm_x86_ops->check_processor_compatibility(rtn);
5091 }
5092
5093 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5094 {
5095         struct page *page;
5096         struct kvm *kvm;
5097         int r;
5098
5099         BUG_ON(vcpu->kvm == NULL);
5100         kvm = vcpu->kvm;
5101
5102         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
5103         if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
5104                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5105         else
5106                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
5107
5108         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
5109         if (!page) {
5110                 r = -ENOMEM;
5111                 goto fail;
5112         }
5113         vcpu->arch.pio_data = page_address(page);
5114
5115         r = kvm_mmu_create(vcpu);
5116         if (r < 0)
5117                 goto fail_free_pio_data;
5118
5119         if (irqchip_in_kernel(kvm)) {
5120                 r = kvm_create_lapic(vcpu);
5121                 if (r < 0)
5122                         goto fail_mmu_destroy;
5123         }
5124
5125         vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
5126                                        GFP_KERNEL);
5127         if (!vcpu->arch.mce_banks) {
5128                 r = -ENOMEM;
5129                 goto fail_free_lapic;
5130         }
5131         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5132
5133         return 0;
5134 fail_free_lapic:
5135         kvm_free_lapic(vcpu);
5136 fail_mmu_destroy:
5137         kvm_mmu_destroy(vcpu);
5138 fail_free_pio_data:
5139         free_page((unsigned long)vcpu->arch.pio_data);
5140 fail:
5141         return r;
5142 }
5143
5144 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5145 {
5146         kfree(vcpu->arch.mce_banks);
5147         kvm_free_lapic(vcpu);
5148         down_read(&vcpu->kvm->slots_lock);
5149         kvm_mmu_destroy(vcpu);
5150         up_read(&vcpu->kvm->slots_lock);
5151         free_page((unsigned long)vcpu->arch.pio_data);
5152 }
5153
5154 struct  kvm *kvm_arch_create_vm(void)
5155 {
5156         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
5157
5158         if (!kvm)
5159                 return ERR_PTR(-ENOMEM);
5160
5161         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
5162         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
5163
5164         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5165         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5166
5167         rdtscll(kvm->arch.vm_init_tsc);
5168
5169         return kvm;
5170 }
5171
5172 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
5173 {
5174         vcpu_load(vcpu);
5175         kvm_mmu_unload(vcpu);
5176         vcpu_put(vcpu);
5177 }
5178
5179 static void kvm_free_vcpus(struct kvm *kvm)
5180 {
5181         unsigned int i;
5182         struct kvm_vcpu *vcpu;
5183
5184         /*
5185          * Unpin any mmu pages first.
5186          */
5187         kvm_for_each_vcpu(i, vcpu, kvm)
5188                 kvm_unload_vcpu_mmu(vcpu);
5189         kvm_for_each_vcpu(i, vcpu, kvm)
5190                 kvm_arch_vcpu_free(vcpu);
5191
5192         mutex_lock(&kvm->lock);
5193         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
5194                 kvm->vcpus[i] = NULL;
5195
5196         atomic_set(&kvm->online_vcpus, 0);
5197         mutex_unlock(&kvm->lock);
5198 }
5199
5200 void kvm_arch_sync_events(struct kvm *kvm)
5201 {
5202         kvm_free_all_assigned_devices(kvm);
5203 }
5204
5205 void kvm_arch_destroy_vm(struct kvm *kvm)
5206 {
5207         kvm_iommu_unmap_guest(kvm);
5208         kvm_free_pit(kvm);
5209         kfree(kvm->arch.vpic);
5210         kfree(kvm->arch.vioapic);
5211         kvm_free_vcpus(kvm);
5212         kvm_free_physmem(kvm);
5213         if (kvm->arch.apic_access_page)
5214                 put_page(kvm->arch.apic_access_page);
5215         if (kvm->arch.ept_identity_pagetable)
5216                 put_page(kvm->arch.ept_identity_pagetable);
5217         kfree(kvm);
5218 }
5219
5220 int kvm_arch_set_memory_region(struct kvm *kvm,
5221                                 struct kvm_userspace_memory_region *mem,
5222                                 struct kvm_memory_slot old,
5223                                 int user_alloc)
5224 {
5225         int npages = mem->memory_size >> PAGE_SHIFT;
5226         struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];
5227
5228         /*To keep backward compatibility with older userspace,
5229          *x86 needs to hanlde !user_alloc case.
5230          */
5231         if (!user_alloc) {
5232                 if (npages && !old.rmap) {
5233                         unsigned long userspace_addr;
5234
5235                         down_write(&current->mm->mmap_sem);
5236                         userspace_addr = do_mmap(NULL, 0,
5237                                                  npages * PAGE_SIZE,
5238                                                  PROT_READ | PROT_WRITE,
5239                                                  MAP_PRIVATE | MAP_ANONYMOUS,
5240                                                  0);
5241                         up_write(&current->mm->mmap_sem);
5242
5243                         if (IS_ERR((void *)userspace_addr))
5244                                 return PTR_ERR((void *)userspace_addr);
5245
5246                         /* set userspace_addr atomically for kvm_hva_to_rmapp */
5247                         spin_lock(&kvm->mmu_lock);
5248                         memslot->userspace_addr = userspace_addr;
5249                         spin_unlock(&kvm->mmu_lock);
5250                 } else {
5251                         if (!old.user_alloc && old.rmap) {
5252                                 int ret;
5253
5254                                 down_write(&current->mm->mmap_sem);
5255                                 ret = do_munmap(current->mm, old.userspace_addr,
5256                                                 old.npages * PAGE_SIZE);
5257                                 up_write(&current->mm->mmap_sem);
5258                                 if (ret < 0)
5259                                         printk(KERN_WARNING
5260                                        "kvm_vm_ioctl_set_memory_region: "
5261                                        "failed to munmap memory\n");
5262                         }
5263                 }
5264         }
5265
5266         spin_lock(&kvm->mmu_lock);
5267         if (!kvm->arch.n_requested_mmu_pages) {
5268                 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
5269                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
5270         }
5271
5272         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
5273         spin_unlock(&kvm->mmu_lock);
5274
5275         return 0;
5276 }
5277
5278 void kvm_arch_flush_shadow(struct kvm *kvm)
5279 {
5280         kvm_mmu_zap_all(kvm);
5281         kvm_reload_remote_mmus(kvm);
5282 }
5283
5284 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
5285 {
5286         return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
5287                 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
5288                 || vcpu->arch.nmi_pending ||
5289                 (kvm_arch_interrupt_allowed(vcpu) &&
5290                  kvm_cpu_has_interrupt(vcpu));
5291 }
5292
5293 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
5294 {
5295         int me;
5296         int cpu = vcpu->cpu;
5297
5298         if (waitqueue_active(&vcpu->wq)) {
5299                 wake_up_interruptible(&vcpu->wq);
5300                 ++vcpu->stat.halt_wakeup;
5301         }
5302
5303         me = get_cpu();
5304         if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
5305                 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
5306                         smp_send_reschedule(cpu);
5307         put_cpu();
5308 }
5309
5310 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
5311 {
5312         return kvm_x86_ops->interrupt_allowed(vcpu);
5313 }
5314
5315 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
5316 {
5317         unsigned long rflags;
5318
5319         rflags = kvm_x86_ops->get_rflags(vcpu);
5320         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5321                 rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
5322         return rflags;
5323 }
5324 EXPORT_SYMBOL_GPL(kvm_get_rflags);
5325
5326 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
5327 {
5328         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
5329             vcpu->arch.singlestep_cs ==
5330                         get_segment_selector(vcpu, VCPU_SREG_CS) &&
5331             vcpu->arch.singlestep_rip == kvm_rip_read(vcpu))
5332                 rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
5333         kvm_x86_ops->set_rflags(vcpu, rflags);
5334 }
5335 EXPORT_SYMBOL_GPL(kvm_set_rflags);
5336
5337 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
5338 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
5339 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
5340 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
5341 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
5342 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
5343 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
5344 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
5345 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
5346 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
5347 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);