141a0166e51c7591dc0596269d4fa640149540d1
[safe/jmp/linux-2.6] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Amit Shah    <amit.shah@qumranet.com>
14  *   Ben-Ami Yassour <benami@il.ibm.com>
15  *
16  * This work is licensed under the terms of the GNU GPL, version 2.  See
17  * the COPYING file in the top-level directory.
18  *
19  */
20
21 #include <linux/kvm_host.h>
22 #include "irq.h"
23 #include "mmu.h"
24 #include "i8254.h"
25 #include "tss.h"
26 #include "kvm_cache_regs.h"
27 #include "x86.h"
28
29 #include <linux/clocksource.h>
30 #include <linux/interrupt.h>
31 #include <linux/kvm.h>
32 #include <linux/fs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/highmem.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39
40 #include <asm/uaccess.h>
41 #include <asm/msr.h>
42 #include <asm/desc.h>
43 #include <asm/mtrr.h>
44
45 #define MAX_IO_MSRS 256
46 #define CR0_RESERVED_BITS                                               \
47         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
48                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
49                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
50 #define CR4_RESERVED_BITS                                               \
51         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
52                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
53                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
54                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
55
56 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
57 /* EFER defaults:
58  * - enable syscall per default because its emulated by KVM
59  * - enable LME and LMA per default on 64 bit KVM
60  */
61 #ifdef CONFIG_X86_64
62 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
63 #else
64 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
65 #endif
66
67 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
68 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
69
70 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
71                                     struct kvm_cpuid_entry2 __user *entries);
72 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
73                                               u32 function, u32 index);
74
75 struct kvm_x86_ops *kvm_x86_ops;
76 EXPORT_SYMBOL_GPL(kvm_x86_ops);
77
78 struct kvm_stats_debugfs_item debugfs_entries[] = {
79         { "pf_fixed", VCPU_STAT(pf_fixed) },
80         { "pf_guest", VCPU_STAT(pf_guest) },
81         { "tlb_flush", VCPU_STAT(tlb_flush) },
82         { "invlpg", VCPU_STAT(invlpg) },
83         { "exits", VCPU_STAT(exits) },
84         { "io_exits", VCPU_STAT(io_exits) },
85         { "mmio_exits", VCPU_STAT(mmio_exits) },
86         { "signal_exits", VCPU_STAT(signal_exits) },
87         { "irq_window", VCPU_STAT(irq_window_exits) },
88         { "nmi_window", VCPU_STAT(nmi_window_exits) },
89         { "halt_exits", VCPU_STAT(halt_exits) },
90         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
91         { "hypercalls", VCPU_STAT(hypercalls) },
92         { "request_irq", VCPU_STAT(request_irq_exits) },
93         { "request_nmi", VCPU_STAT(request_nmi_exits) },
94         { "irq_exits", VCPU_STAT(irq_exits) },
95         { "host_state_reload", VCPU_STAT(host_state_reload) },
96         { "efer_reload", VCPU_STAT(efer_reload) },
97         { "fpu_reload", VCPU_STAT(fpu_reload) },
98         { "insn_emulation", VCPU_STAT(insn_emulation) },
99         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
100         { "irq_injections", VCPU_STAT(irq_injections) },
101         { "nmi_injections", VCPU_STAT(nmi_injections) },
102         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
103         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
104         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
105         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
106         { "mmu_flooded", VM_STAT(mmu_flooded) },
107         { "mmu_recycled", VM_STAT(mmu_recycled) },
108         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
109         { "mmu_unsync", VM_STAT(mmu_unsync) },
110         { "mmu_unsync_global", VM_STAT(mmu_unsync_global) },
111         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
112         { "largepages", VM_STAT(lpages) },
113         { NULL }
114 };
115
116 unsigned long segment_base(u16 selector)
117 {
118         struct descriptor_table gdt;
119         struct desc_struct *d;
120         unsigned long table_base;
121         unsigned long v;
122
123         if (selector == 0)
124                 return 0;
125
126         asm("sgdt %0" : "=m"(gdt));
127         table_base = gdt.base;
128
129         if (selector & 4) {           /* from ldt */
130                 u16 ldt_selector;
131
132                 asm("sldt %0" : "=g"(ldt_selector));
133                 table_base = segment_base(ldt_selector);
134         }
135         d = (struct desc_struct *)(table_base + (selector & ~7));
136         v = d->base0 | ((unsigned long)d->base1 << 16) |
137                 ((unsigned long)d->base2 << 24);
138 #ifdef CONFIG_X86_64
139         if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
140                 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
141 #endif
142         return v;
143 }
144 EXPORT_SYMBOL_GPL(segment_base);
145
146 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
147 {
148         if (irqchip_in_kernel(vcpu->kvm))
149                 return vcpu->arch.apic_base;
150         else
151                 return vcpu->arch.apic_base;
152 }
153 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
154
155 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
156 {
157         /* TODO: reserve bits check */
158         if (irqchip_in_kernel(vcpu->kvm))
159                 kvm_lapic_set_base(vcpu, data);
160         else
161                 vcpu->arch.apic_base = data;
162 }
163 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
164
165 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
166 {
167         WARN_ON(vcpu->arch.exception.pending);
168         vcpu->arch.exception.pending = true;
169         vcpu->arch.exception.has_error_code = false;
170         vcpu->arch.exception.nr = nr;
171 }
172 EXPORT_SYMBOL_GPL(kvm_queue_exception);
173
174 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
175                            u32 error_code)
176 {
177         ++vcpu->stat.pf_guest;
178
179         if (vcpu->arch.exception.pending) {
180                 if (vcpu->arch.exception.nr == PF_VECTOR) {
181                         printk(KERN_DEBUG "kvm: inject_page_fault:"
182                                         " double fault 0x%lx\n", addr);
183                         vcpu->arch.exception.nr = DF_VECTOR;
184                         vcpu->arch.exception.error_code = 0;
185                 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
186                         /* triple fault -> shutdown */
187                         set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
188                 }
189                 return;
190         }
191         vcpu->arch.cr2 = addr;
192         kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
193 }
194
195 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
196 {
197         vcpu->arch.nmi_pending = 1;
198 }
199 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
200
201 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
202 {
203         WARN_ON(vcpu->arch.exception.pending);
204         vcpu->arch.exception.pending = true;
205         vcpu->arch.exception.has_error_code = true;
206         vcpu->arch.exception.nr = nr;
207         vcpu->arch.exception.error_code = error_code;
208 }
209 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
210
211 static void __queue_exception(struct kvm_vcpu *vcpu)
212 {
213         kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
214                                      vcpu->arch.exception.has_error_code,
215                                      vcpu->arch.exception.error_code);
216 }
217
218 /*
219  * Load the pae pdptrs.  Return true is they are all valid.
220  */
221 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
222 {
223         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
224         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
225         int i;
226         int ret;
227         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
228
229         ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
230                                   offset * sizeof(u64), sizeof(pdpte));
231         if (ret < 0) {
232                 ret = 0;
233                 goto out;
234         }
235         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
236                 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
237                         ret = 0;
238                         goto out;
239                 }
240         }
241         ret = 1;
242
243         memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
244 out:
245
246         return ret;
247 }
248 EXPORT_SYMBOL_GPL(load_pdptrs);
249
250 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
251 {
252         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
253         bool changed = true;
254         int r;
255
256         if (is_long_mode(vcpu) || !is_pae(vcpu))
257                 return false;
258
259         r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
260         if (r < 0)
261                 goto out;
262         changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
263 out:
264
265         return changed;
266 }
267
268 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
269 {
270         if (cr0 & CR0_RESERVED_BITS) {
271                 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
272                        cr0, vcpu->arch.cr0);
273                 kvm_inject_gp(vcpu, 0);
274                 return;
275         }
276
277         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
278                 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
279                 kvm_inject_gp(vcpu, 0);
280                 return;
281         }
282
283         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
284                 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
285                        "and a clear PE flag\n");
286                 kvm_inject_gp(vcpu, 0);
287                 return;
288         }
289
290         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
291 #ifdef CONFIG_X86_64
292                 if ((vcpu->arch.shadow_efer & EFER_LME)) {
293                         int cs_db, cs_l;
294
295                         if (!is_pae(vcpu)) {
296                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
297                                        "in long mode while PAE is disabled\n");
298                                 kvm_inject_gp(vcpu, 0);
299                                 return;
300                         }
301                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
302                         if (cs_l) {
303                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
304                                        "in long mode while CS.L == 1\n");
305                                 kvm_inject_gp(vcpu, 0);
306                                 return;
307
308                         }
309                 } else
310 #endif
311                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
312                         printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
313                                "reserved bits\n");
314                         kvm_inject_gp(vcpu, 0);
315                         return;
316                 }
317
318         }
319
320         kvm_x86_ops->set_cr0(vcpu, cr0);
321         vcpu->arch.cr0 = cr0;
322
323         kvm_mmu_sync_global(vcpu);
324         kvm_mmu_reset_context(vcpu);
325         return;
326 }
327 EXPORT_SYMBOL_GPL(kvm_set_cr0);
328
329 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
330 {
331         kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
332         KVMTRACE_1D(LMSW, vcpu,
333                     (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
334                     handler);
335 }
336 EXPORT_SYMBOL_GPL(kvm_lmsw);
337
338 void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
339 {
340         if (cr4 & CR4_RESERVED_BITS) {
341                 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
342                 kvm_inject_gp(vcpu, 0);
343                 return;
344         }
345
346         if (is_long_mode(vcpu)) {
347                 if (!(cr4 & X86_CR4_PAE)) {
348                         printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
349                                "in long mode\n");
350                         kvm_inject_gp(vcpu, 0);
351                         return;
352                 }
353         } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
354                    && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
355                 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
356                 kvm_inject_gp(vcpu, 0);
357                 return;
358         }
359
360         if (cr4 & X86_CR4_VMXE) {
361                 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
362                 kvm_inject_gp(vcpu, 0);
363                 return;
364         }
365         kvm_x86_ops->set_cr4(vcpu, cr4);
366         vcpu->arch.cr4 = cr4;
367         vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
368         kvm_mmu_sync_global(vcpu);
369         kvm_mmu_reset_context(vcpu);
370 }
371 EXPORT_SYMBOL_GPL(kvm_set_cr4);
372
373 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
374 {
375         if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
376                 kvm_mmu_sync_roots(vcpu);
377                 kvm_mmu_flush_tlb(vcpu);
378                 return;
379         }
380
381         if (is_long_mode(vcpu)) {
382                 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
383                         printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
384                         kvm_inject_gp(vcpu, 0);
385                         return;
386                 }
387         } else {
388                 if (is_pae(vcpu)) {
389                         if (cr3 & CR3_PAE_RESERVED_BITS) {
390                                 printk(KERN_DEBUG
391                                        "set_cr3: #GP, reserved bits\n");
392                                 kvm_inject_gp(vcpu, 0);
393                                 return;
394                         }
395                         if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
396                                 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
397                                        "reserved bits\n");
398                                 kvm_inject_gp(vcpu, 0);
399                                 return;
400                         }
401                 }
402                 /*
403                  * We don't check reserved bits in nonpae mode, because
404                  * this isn't enforced, and VMware depends on this.
405                  */
406         }
407
408         /*
409          * Does the new cr3 value map to physical memory? (Note, we
410          * catch an invalid cr3 even in real-mode, because it would
411          * cause trouble later on when we turn on paging anyway.)
412          *
413          * A real CPU would silently accept an invalid cr3 and would
414          * attempt to use it - with largely undefined (and often hard
415          * to debug) behavior on the guest side.
416          */
417         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
418                 kvm_inject_gp(vcpu, 0);
419         else {
420                 vcpu->arch.cr3 = cr3;
421                 vcpu->arch.mmu.new_cr3(vcpu);
422         }
423 }
424 EXPORT_SYMBOL_GPL(kvm_set_cr3);
425
426 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
427 {
428         if (cr8 & CR8_RESERVED_BITS) {
429                 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
430                 kvm_inject_gp(vcpu, 0);
431                 return;
432         }
433         if (irqchip_in_kernel(vcpu->kvm))
434                 kvm_lapic_set_tpr(vcpu, cr8);
435         else
436                 vcpu->arch.cr8 = cr8;
437 }
438 EXPORT_SYMBOL_GPL(kvm_set_cr8);
439
440 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
441 {
442         if (irqchip_in_kernel(vcpu->kvm))
443                 return kvm_lapic_get_cr8(vcpu);
444         else
445                 return vcpu->arch.cr8;
446 }
447 EXPORT_SYMBOL_GPL(kvm_get_cr8);
448
449 static inline u32 bit(int bitno)
450 {
451         return 1 << (bitno & 31);
452 }
453
454 /*
455  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
456  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
457  *
458  * This list is modified at module load time to reflect the
459  * capabilities of the host cpu.
460  */
461 static u32 msrs_to_save[] = {
462         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
463         MSR_K6_STAR,
464 #ifdef CONFIG_X86_64
465         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
466 #endif
467         MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
468         MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
469 };
470
471 static unsigned num_msrs_to_save;
472
473 static u32 emulated_msrs[] = {
474         MSR_IA32_MISC_ENABLE,
475 };
476
477 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
478 {
479         if (efer & efer_reserved_bits) {
480                 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
481                        efer);
482                 kvm_inject_gp(vcpu, 0);
483                 return;
484         }
485
486         if (is_paging(vcpu)
487             && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
488                 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
489                 kvm_inject_gp(vcpu, 0);
490                 return;
491         }
492
493         if (efer & EFER_SVME) {
494                 struct kvm_cpuid_entry2 *feat;
495
496                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
497                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
498                         printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
499                         kvm_inject_gp(vcpu, 0);
500                         return;
501                 }
502         }
503
504         kvm_x86_ops->set_efer(vcpu, efer);
505
506         efer &= ~EFER_LMA;
507         efer |= vcpu->arch.shadow_efer & EFER_LMA;
508
509         vcpu->arch.shadow_efer = efer;
510 }
511
512 void kvm_enable_efer_bits(u64 mask)
513 {
514        efer_reserved_bits &= ~mask;
515 }
516 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
517
518
519 /*
520  * Writes msr value into into the appropriate "register".
521  * Returns 0 on success, non-0 otherwise.
522  * Assumes vcpu_load() was already called.
523  */
524 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
525 {
526         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
527 }
528
529 /*
530  * Adapt set_msr() to msr_io()'s calling convention
531  */
532 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
533 {
534         return kvm_set_msr(vcpu, index, *data);
535 }
536
537 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
538 {
539         static int version;
540         struct pvclock_wall_clock wc;
541         struct timespec now, sys, boot;
542
543         if (!wall_clock)
544                 return;
545
546         version++;
547
548         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
549
550         /*
551          * The guest calculates current wall clock time by adding
552          * system time (updated by kvm_write_guest_time below) to the
553          * wall clock specified here.  guest system time equals host
554          * system time for us, thus we must fill in host boot time here.
555          */
556         now = current_kernel_time();
557         ktime_get_ts(&sys);
558         boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
559
560         wc.sec = boot.tv_sec;
561         wc.nsec = boot.tv_nsec;
562         wc.version = version;
563
564         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
565
566         version++;
567         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
568 }
569
570 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
571 {
572         uint32_t quotient, remainder;
573
574         /* Don't try to replace with do_div(), this one calculates
575          * "(dividend << 32) / divisor" */
576         __asm__ ( "divl %4"
577                   : "=a" (quotient), "=d" (remainder)
578                   : "0" (0), "1" (dividend), "r" (divisor) );
579         return quotient;
580 }
581
582 static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
583 {
584         uint64_t nsecs = 1000000000LL;
585         int32_t  shift = 0;
586         uint64_t tps64;
587         uint32_t tps32;
588
589         tps64 = tsc_khz * 1000LL;
590         while (tps64 > nsecs*2) {
591                 tps64 >>= 1;
592                 shift--;
593         }
594
595         tps32 = (uint32_t)tps64;
596         while (tps32 <= (uint32_t)nsecs) {
597                 tps32 <<= 1;
598                 shift++;
599         }
600
601         hv_clock->tsc_shift = shift;
602         hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
603
604         pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
605                  __func__, tsc_khz, hv_clock->tsc_shift,
606                  hv_clock->tsc_to_system_mul);
607 }
608
609 static void kvm_write_guest_time(struct kvm_vcpu *v)
610 {
611         struct timespec ts;
612         unsigned long flags;
613         struct kvm_vcpu_arch *vcpu = &v->arch;
614         void *shared_kaddr;
615
616         if ((!vcpu->time_page))
617                 return;
618
619         if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) {
620                 kvm_set_time_scale(tsc_khz, &vcpu->hv_clock);
621                 vcpu->hv_clock_tsc_khz = tsc_khz;
622         }
623
624         /* Keep irq disabled to prevent changes to the clock */
625         local_irq_save(flags);
626         kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
627                           &vcpu->hv_clock.tsc_timestamp);
628         ktime_get_ts(&ts);
629         local_irq_restore(flags);
630
631         /* With all the info we got, fill in the values */
632
633         vcpu->hv_clock.system_time = ts.tv_nsec +
634                                      (NSEC_PER_SEC * (u64)ts.tv_sec);
635         /*
636          * The interface expects us to write an even number signaling that the
637          * update is finished. Since the guest won't see the intermediate
638          * state, we just increase by 2 at the end.
639          */
640         vcpu->hv_clock.version += 2;
641
642         shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
643
644         memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
645                sizeof(vcpu->hv_clock));
646
647         kunmap_atomic(shared_kaddr, KM_USER0);
648
649         mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
650 }
651
652 static bool msr_mtrr_valid(unsigned msr)
653 {
654         switch (msr) {
655         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
656         case MSR_MTRRfix64K_00000:
657         case MSR_MTRRfix16K_80000:
658         case MSR_MTRRfix16K_A0000:
659         case MSR_MTRRfix4K_C0000:
660         case MSR_MTRRfix4K_C8000:
661         case MSR_MTRRfix4K_D0000:
662         case MSR_MTRRfix4K_D8000:
663         case MSR_MTRRfix4K_E0000:
664         case MSR_MTRRfix4K_E8000:
665         case MSR_MTRRfix4K_F0000:
666         case MSR_MTRRfix4K_F8000:
667         case MSR_MTRRdefType:
668         case MSR_IA32_CR_PAT:
669                 return true;
670         case 0x2f8:
671                 return true;
672         }
673         return false;
674 }
675
676 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
677 {
678         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
679
680         if (!msr_mtrr_valid(msr))
681                 return 1;
682
683         if (msr == MSR_MTRRdefType) {
684                 vcpu->arch.mtrr_state.def_type = data;
685                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
686         } else if (msr == MSR_MTRRfix64K_00000)
687                 p[0] = data;
688         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
689                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
690         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
691                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
692         else if (msr == MSR_IA32_CR_PAT)
693                 vcpu->arch.pat = data;
694         else {  /* Variable MTRRs */
695                 int idx, is_mtrr_mask;
696                 u64 *pt;
697
698                 idx = (msr - 0x200) / 2;
699                 is_mtrr_mask = msr - 0x200 - 2 * idx;
700                 if (!is_mtrr_mask)
701                         pt =
702                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
703                 else
704                         pt =
705                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
706                 *pt = data;
707         }
708
709         kvm_mmu_reset_context(vcpu);
710         return 0;
711 }
712
713 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
714 {
715         switch (msr) {
716         case MSR_EFER:
717                 set_efer(vcpu, data);
718                 break;
719         case MSR_IA32_MC0_STATUS:
720                 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
721                        __func__, data);
722                 break;
723         case MSR_IA32_MCG_STATUS:
724                 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
725                         __func__, data);
726                 break;
727         case MSR_IA32_MCG_CTL:
728                 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
729                         __func__, data);
730                 break;
731         case MSR_IA32_DEBUGCTLMSR:
732                 if (!data) {
733                         /* We support the non-activated case already */
734                         break;
735                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
736                         /* Values other than LBR and BTF are vendor-specific,
737                            thus reserved and should throw a #GP */
738                         return 1;
739                 }
740                 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
741                         __func__, data);
742                 break;
743         case MSR_IA32_UCODE_REV:
744         case MSR_IA32_UCODE_WRITE:
745         case MSR_VM_HSAVE_PA:
746                 break;
747         case 0x200 ... 0x2ff:
748                 return set_msr_mtrr(vcpu, msr, data);
749         case MSR_IA32_APICBASE:
750                 kvm_set_apic_base(vcpu, data);
751                 break;
752         case MSR_IA32_MISC_ENABLE:
753                 vcpu->arch.ia32_misc_enable_msr = data;
754                 break;
755         case MSR_KVM_WALL_CLOCK:
756                 vcpu->kvm->arch.wall_clock = data;
757                 kvm_write_wall_clock(vcpu->kvm, data);
758                 break;
759         case MSR_KVM_SYSTEM_TIME: {
760                 if (vcpu->arch.time_page) {
761                         kvm_release_page_dirty(vcpu->arch.time_page);
762                         vcpu->arch.time_page = NULL;
763                 }
764
765                 vcpu->arch.time = data;
766
767                 /* we verify if the enable bit is set... */
768                 if (!(data & 1))
769                         break;
770
771                 /* ...but clean it before doing the actual write */
772                 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
773
774                 vcpu->arch.time_page =
775                                 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
776
777                 if (is_error_page(vcpu->arch.time_page)) {
778                         kvm_release_page_clean(vcpu->arch.time_page);
779                         vcpu->arch.time_page = NULL;
780                 }
781
782                 kvm_write_guest_time(vcpu);
783                 break;
784         }
785         default:
786                 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
787                 return 1;
788         }
789         return 0;
790 }
791 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
792
793
794 /*
795  * Reads an msr value (of 'msr_index') into 'pdata'.
796  * Returns 0 on success, non-0 otherwise.
797  * Assumes vcpu_load() was already called.
798  */
799 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
800 {
801         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
802 }
803
804 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
805 {
806         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
807
808         if (!msr_mtrr_valid(msr))
809                 return 1;
810
811         if (msr == MSR_MTRRdefType)
812                 *pdata = vcpu->arch.mtrr_state.def_type +
813                          (vcpu->arch.mtrr_state.enabled << 10);
814         else if (msr == MSR_MTRRfix64K_00000)
815                 *pdata = p[0];
816         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
817                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
818         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
819                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
820         else if (msr == MSR_IA32_CR_PAT)
821                 *pdata = vcpu->arch.pat;
822         else {  /* Variable MTRRs */
823                 int idx, is_mtrr_mask;
824                 u64 *pt;
825
826                 idx = (msr - 0x200) / 2;
827                 is_mtrr_mask = msr - 0x200 - 2 * idx;
828                 if (!is_mtrr_mask)
829                         pt =
830                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
831                 else
832                         pt =
833                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
834                 *pdata = *pt;
835         }
836
837         return 0;
838 }
839
840 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
841 {
842         u64 data;
843
844         switch (msr) {
845         case 0xc0010010: /* SYSCFG */
846         case 0xc0010015: /* HWCR */
847         case MSR_IA32_PLATFORM_ID:
848         case MSR_IA32_P5_MC_ADDR:
849         case MSR_IA32_P5_MC_TYPE:
850         case MSR_IA32_MC0_CTL:
851         case MSR_IA32_MCG_STATUS:
852         case MSR_IA32_MCG_CAP:
853         case MSR_IA32_MCG_CTL:
854         case MSR_IA32_MC0_MISC:
855         case MSR_IA32_MC0_MISC+4:
856         case MSR_IA32_MC0_MISC+8:
857         case MSR_IA32_MC0_MISC+12:
858         case MSR_IA32_MC0_MISC+16:
859         case MSR_IA32_MC0_MISC+20:
860         case MSR_IA32_UCODE_REV:
861         case MSR_IA32_EBL_CR_POWERON:
862         case MSR_IA32_DEBUGCTLMSR:
863         case MSR_IA32_LASTBRANCHFROMIP:
864         case MSR_IA32_LASTBRANCHTOIP:
865         case MSR_IA32_LASTINTFROMIP:
866         case MSR_IA32_LASTINTTOIP:
867         case MSR_VM_HSAVE_PA:
868                 data = 0;
869                 break;
870         case MSR_MTRRcap:
871                 data = 0x500 | KVM_NR_VAR_MTRR;
872                 break;
873         case 0x200 ... 0x2ff:
874                 return get_msr_mtrr(vcpu, msr, pdata);
875         case 0xcd: /* fsb frequency */
876                 data = 3;
877                 break;
878         case MSR_IA32_APICBASE:
879                 data = kvm_get_apic_base(vcpu);
880                 break;
881         case MSR_IA32_MISC_ENABLE:
882                 data = vcpu->arch.ia32_misc_enable_msr;
883                 break;
884         case MSR_IA32_PERF_STATUS:
885                 /* TSC increment by tick */
886                 data = 1000ULL;
887                 /* CPU multiplier */
888                 data |= (((uint64_t)4ULL) << 40);
889                 break;
890         case MSR_EFER:
891                 data = vcpu->arch.shadow_efer;
892                 break;
893         case MSR_KVM_WALL_CLOCK:
894                 data = vcpu->kvm->arch.wall_clock;
895                 break;
896         case MSR_KVM_SYSTEM_TIME:
897                 data = vcpu->arch.time;
898                 break;
899         default:
900                 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
901                 return 1;
902         }
903         *pdata = data;
904         return 0;
905 }
906 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
907
908 /*
909  * Read or write a bunch of msrs. All parameters are kernel addresses.
910  *
911  * @return number of msrs set successfully.
912  */
913 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
914                     struct kvm_msr_entry *entries,
915                     int (*do_msr)(struct kvm_vcpu *vcpu,
916                                   unsigned index, u64 *data))
917 {
918         int i;
919
920         vcpu_load(vcpu);
921
922         down_read(&vcpu->kvm->slots_lock);
923         for (i = 0; i < msrs->nmsrs; ++i)
924                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
925                         break;
926         up_read(&vcpu->kvm->slots_lock);
927
928         vcpu_put(vcpu);
929
930         return i;
931 }
932
933 /*
934  * Read or write a bunch of msrs. Parameters are user addresses.
935  *
936  * @return number of msrs set successfully.
937  */
938 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
939                   int (*do_msr)(struct kvm_vcpu *vcpu,
940                                 unsigned index, u64 *data),
941                   int writeback)
942 {
943         struct kvm_msrs msrs;
944         struct kvm_msr_entry *entries;
945         int r, n;
946         unsigned size;
947
948         r = -EFAULT;
949         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
950                 goto out;
951
952         r = -E2BIG;
953         if (msrs.nmsrs >= MAX_IO_MSRS)
954                 goto out;
955
956         r = -ENOMEM;
957         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
958         entries = vmalloc(size);
959         if (!entries)
960                 goto out;
961
962         r = -EFAULT;
963         if (copy_from_user(entries, user_msrs->entries, size))
964                 goto out_free;
965
966         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
967         if (r < 0)
968                 goto out_free;
969
970         r = -EFAULT;
971         if (writeback && copy_to_user(user_msrs->entries, entries, size))
972                 goto out_free;
973
974         r = n;
975
976 out_free:
977         vfree(entries);
978 out:
979         return r;
980 }
981
982 int kvm_dev_ioctl_check_extension(long ext)
983 {
984         int r;
985
986         switch (ext) {
987         case KVM_CAP_IRQCHIP:
988         case KVM_CAP_HLT:
989         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
990         case KVM_CAP_SET_TSS_ADDR:
991         case KVM_CAP_EXT_CPUID:
992         case KVM_CAP_PIT:
993         case KVM_CAP_NOP_IO_DELAY:
994         case KVM_CAP_MP_STATE:
995         case KVM_CAP_SYNC_MMU:
996         case KVM_CAP_REINJECT_CONTROL:
997                 r = 1;
998                 break;
999         case KVM_CAP_COALESCED_MMIO:
1000                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1001                 break;
1002         case KVM_CAP_VAPIC:
1003                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1004                 break;
1005         case KVM_CAP_NR_VCPUS:
1006                 r = KVM_MAX_VCPUS;
1007                 break;
1008         case KVM_CAP_NR_MEMSLOTS:
1009                 r = KVM_MEMORY_SLOTS;
1010                 break;
1011         case KVM_CAP_PV_MMU:
1012                 r = !tdp_enabled;
1013                 break;
1014         case KVM_CAP_IOMMU:
1015                 r = iommu_found();
1016                 break;
1017         case KVM_CAP_CLOCKSOURCE:
1018                 r = boot_cpu_has(X86_FEATURE_CONSTANT_TSC);
1019                 break;
1020         default:
1021                 r = 0;
1022                 break;
1023         }
1024         return r;
1025
1026 }
1027
1028 long kvm_arch_dev_ioctl(struct file *filp,
1029                         unsigned int ioctl, unsigned long arg)
1030 {
1031         void __user *argp = (void __user *)arg;
1032         long r;
1033
1034         switch (ioctl) {
1035         case KVM_GET_MSR_INDEX_LIST: {
1036                 struct kvm_msr_list __user *user_msr_list = argp;
1037                 struct kvm_msr_list msr_list;
1038                 unsigned n;
1039
1040                 r = -EFAULT;
1041                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1042                         goto out;
1043                 n = msr_list.nmsrs;
1044                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1045                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1046                         goto out;
1047                 r = -E2BIG;
1048                 if (n < num_msrs_to_save)
1049                         goto out;
1050                 r = -EFAULT;
1051                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1052                                  num_msrs_to_save * sizeof(u32)))
1053                         goto out;
1054                 if (copy_to_user(user_msr_list->indices
1055                                  + num_msrs_to_save * sizeof(u32),
1056                                  &emulated_msrs,
1057                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1058                         goto out;
1059                 r = 0;
1060                 break;
1061         }
1062         case KVM_GET_SUPPORTED_CPUID: {
1063                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1064                 struct kvm_cpuid2 cpuid;
1065
1066                 r = -EFAULT;
1067                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1068                         goto out;
1069                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
1070                                                       cpuid_arg->entries);
1071                 if (r)
1072                         goto out;
1073
1074                 r = -EFAULT;
1075                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1076                         goto out;
1077                 r = 0;
1078                 break;
1079         }
1080         default:
1081                 r = -EINVAL;
1082         }
1083 out:
1084         return r;
1085 }
1086
1087 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1088 {
1089         kvm_x86_ops->vcpu_load(vcpu, cpu);
1090         kvm_write_guest_time(vcpu);
1091 }
1092
1093 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1094 {
1095         kvm_x86_ops->vcpu_put(vcpu);
1096         kvm_put_guest_fpu(vcpu);
1097 }
1098
1099 static int is_efer_nx(void)
1100 {
1101         u64 efer;
1102
1103         rdmsrl(MSR_EFER, efer);
1104         return efer & EFER_NX;
1105 }
1106
1107 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1108 {
1109         int i;
1110         struct kvm_cpuid_entry2 *e, *entry;
1111
1112         entry = NULL;
1113         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1114                 e = &vcpu->arch.cpuid_entries[i];
1115                 if (e->function == 0x80000001) {
1116                         entry = e;
1117                         break;
1118                 }
1119         }
1120         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
1121                 entry->edx &= ~(1 << 20);
1122                 printk(KERN_INFO "kvm: guest NX capability removed\n");
1123         }
1124 }
1125
1126 /* when an old userspace process fills a new kernel module */
1127 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1128                                     struct kvm_cpuid *cpuid,
1129                                     struct kvm_cpuid_entry __user *entries)
1130 {
1131         int r, i;
1132         struct kvm_cpuid_entry *cpuid_entries;
1133
1134         r = -E2BIG;
1135         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1136                 goto out;
1137         r = -ENOMEM;
1138         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1139         if (!cpuid_entries)
1140                 goto out;
1141         r = -EFAULT;
1142         if (copy_from_user(cpuid_entries, entries,
1143                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1144                 goto out_free;
1145         for (i = 0; i < cpuid->nent; i++) {
1146                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1147                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1148                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1149                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1150                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1151                 vcpu->arch.cpuid_entries[i].index = 0;
1152                 vcpu->arch.cpuid_entries[i].flags = 0;
1153                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1154                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1155                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1156         }
1157         vcpu->arch.cpuid_nent = cpuid->nent;
1158         cpuid_fix_nx_cap(vcpu);
1159         r = 0;
1160
1161 out_free:
1162         vfree(cpuid_entries);
1163 out:
1164         return r;
1165 }
1166
1167 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1168                                      struct kvm_cpuid2 *cpuid,
1169                                      struct kvm_cpuid_entry2 __user *entries)
1170 {
1171         int r;
1172
1173         r = -E2BIG;
1174         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1175                 goto out;
1176         r = -EFAULT;
1177         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
1178                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
1179                 goto out;
1180         vcpu->arch.cpuid_nent = cpuid->nent;
1181         return 0;
1182
1183 out:
1184         return r;
1185 }
1186
1187 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1188                                      struct kvm_cpuid2 *cpuid,
1189                                      struct kvm_cpuid_entry2 __user *entries)
1190 {
1191         int r;
1192
1193         r = -E2BIG;
1194         if (cpuid->nent < vcpu->arch.cpuid_nent)
1195                 goto out;
1196         r = -EFAULT;
1197         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1198                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
1199                 goto out;
1200         return 0;
1201
1202 out:
1203         cpuid->nent = vcpu->arch.cpuid_nent;
1204         return r;
1205 }
1206
1207 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1208                            u32 index)
1209 {
1210         entry->function = function;
1211         entry->index = index;
1212         cpuid_count(entry->function, entry->index,
1213                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1214         entry->flags = 0;
1215 }
1216
1217 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1218                          u32 index, int *nent, int maxnent)
1219 {
1220         const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
1221                 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1222                 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1223                 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1224                 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1225                 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
1226                 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1227                 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
1228                 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
1229                 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
1230         const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
1231                 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1232                 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1233                 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1234                 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1235                 bit(X86_FEATURE_PGE) |
1236                 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1237                 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
1238                 bit(X86_FEATURE_SYSCALL) |
1239                 (bit(X86_FEATURE_NX) && is_efer_nx()) |
1240 #ifdef CONFIG_X86_64
1241                 bit(X86_FEATURE_LM) |
1242 #endif
1243                 bit(X86_FEATURE_MMXEXT) |
1244                 bit(X86_FEATURE_3DNOWEXT) |
1245                 bit(X86_FEATURE_3DNOW);
1246         const u32 kvm_supported_word3_x86_features =
1247                 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
1248         const u32 kvm_supported_word6_x86_features =
1249                 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) |
1250                 bit(X86_FEATURE_SVM);
1251
1252         /* all calls to cpuid_count() should be made on the same cpu */
1253         get_cpu();
1254         do_cpuid_1_ent(entry, function, index);
1255         ++*nent;
1256
1257         switch (function) {
1258         case 0:
1259                 entry->eax = min(entry->eax, (u32)0xb);
1260                 break;
1261         case 1:
1262                 entry->edx &= kvm_supported_word0_x86_features;
1263                 entry->ecx &= kvm_supported_word3_x86_features;
1264                 break;
1265         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1266          * may return different values. This forces us to get_cpu() before
1267          * issuing the first command, and also to emulate this annoying behavior
1268          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1269         case 2: {
1270                 int t, times = entry->eax & 0xff;
1271
1272                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1273                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
1274                 for (t = 1; t < times && *nent < maxnent; ++t) {
1275                         do_cpuid_1_ent(&entry[t], function, 0);
1276                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1277                         ++*nent;
1278                 }
1279                 break;
1280         }
1281         /* function 4 and 0xb have additional index. */
1282         case 4: {
1283                 int i, cache_type;
1284
1285                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1286                 /* read more entries until cache_type is zero */
1287                 for (i = 1; *nent < maxnent; ++i) {
1288                         cache_type = entry[i - 1].eax & 0x1f;
1289                         if (!cache_type)
1290                                 break;
1291                         do_cpuid_1_ent(&entry[i], function, i);
1292                         entry[i].flags |=
1293                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1294                         ++*nent;
1295                 }
1296                 break;
1297         }
1298         case 0xb: {
1299                 int i, level_type;
1300
1301                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1302                 /* read more entries until level_type is zero */
1303                 for (i = 1; *nent < maxnent; ++i) {
1304                         level_type = entry[i - 1].ecx & 0xff00;
1305                         if (!level_type)
1306                                 break;
1307                         do_cpuid_1_ent(&entry[i], function, i);
1308                         entry[i].flags |=
1309                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1310                         ++*nent;
1311                 }
1312                 break;
1313         }
1314         case 0x80000000:
1315                 entry->eax = min(entry->eax, 0x8000001a);
1316                 break;
1317         case 0x80000001:
1318                 entry->edx &= kvm_supported_word1_x86_features;
1319                 entry->ecx &= kvm_supported_word6_x86_features;
1320                 break;
1321         }
1322         put_cpu();
1323 }
1324
1325 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1326                                      struct kvm_cpuid_entry2 __user *entries)
1327 {
1328         struct kvm_cpuid_entry2 *cpuid_entries;
1329         int limit, nent = 0, r = -E2BIG;
1330         u32 func;
1331
1332         if (cpuid->nent < 1)
1333                 goto out;
1334         r = -ENOMEM;
1335         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1336         if (!cpuid_entries)
1337                 goto out;
1338
1339         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1340         limit = cpuid_entries[0].eax;
1341         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1342                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1343                              &nent, cpuid->nent);
1344         r = -E2BIG;
1345         if (nent >= cpuid->nent)
1346                 goto out_free;
1347
1348         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1349         limit = cpuid_entries[nent - 1].eax;
1350         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1351                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1352                              &nent, cpuid->nent);
1353         r = -EFAULT;
1354         if (copy_to_user(entries, cpuid_entries,
1355                          nent * sizeof(struct kvm_cpuid_entry2)))
1356                 goto out_free;
1357         cpuid->nent = nent;
1358         r = 0;
1359
1360 out_free:
1361         vfree(cpuid_entries);
1362 out:
1363         return r;
1364 }
1365
1366 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1367                                     struct kvm_lapic_state *s)
1368 {
1369         vcpu_load(vcpu);
1370         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
1371         vcpu_put(vcpu);
1372
1373         return 0;
1374 }
1375
1376 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1377                                     struct kvm_lapic_state *s)
1378 {
1379         vcpu_load(vcpu);
1380         memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1381         kvm_apic_post_state_restore(vcpu);
1382         vcpu_put(vcpu);
1383
1384         return 0;
1385 }
1386
1387 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1388                                     struct kvm_interrupt *irq)
1389 {
1390         if (irq->irq < 0 || irq->irq >= 256)
1391                 return -EINVAL;
1392         if (irqchip_in_kernel(vcpu->kvm))
1393                 return -ENXIO;
1394         vcpu_load(vcpu);
1395
1396         set_bit(irq->irq, vcpu->arch.irq_pending);
1397         set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
1398
1399         vcpu_put(vcpu);
1400
1401         return 0;
1402 }
1403
1404 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1405 {
1406         vcpu_load(vcpu);
1407         kvm_inject_nmi(vcpu);
1408         vcpu_put(vcpu);
1409
1410         return 0;
1411 }
1412
1413 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1414                                            struct kvm_tpr_access_ctl *tac)
1415 {
1416         if (tac->flags)
1417                 return -EINVAL;
1418         vcpu->arch.tpr_access_reporting = !!tac->enabled;
1419         return 0;
1420 }
1421
1422 long kvm_arch_vcpu_ioctl(struct file *filp,
1423                          unsigned int ioctl, unsigned long arg)
1424 {
1425         struct kvm_vcpu *vcpu = filp->private_data;
1426         void __user *argp = (void __user *)arg;
1427         int r;
1428         struct kvm_lapic_state *lapic = NULL;
1429
1430         switch (ioctl) {
1431         case KVM_GET_LAPIC: {
1432                 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1433
1434                 r = -ENOMEM;
1435                 if (!lapic)
1436                         goto out;
1437                 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
1438                 if (r)
1439                         goto out;
1440                 r = -EFAULT;
1441                 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
1442                         goto out;
1443                 r = 0;
1444                 break;
1445         }
1446         case KVM_SET_LAPIC: {
1447                 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1448                 r = -ENOMEM;
1449                 if (!lapic)
1450                         goto out;
1451                 r = -EFAULT;
1452                 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
1453                         goto out;
1454                 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
1455                 if (r)
1456                         goto out;
1457                 r = 0;
1458                 break;
1459         }
1460         case KVM_INTERRUPT: {
1461                 struct kvm_interrupt irq;
1462
1463                 r = -EFAULT;
1464                 if (copy_from_user(&irq, argp, sizeof irq))
1465                         goto out;
1466                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1467                 if (r)
1468                         goto out;
1469                 r = 0;
1470                 break;
1471         }
1472         case KVM_NMI: {
1473                 r = kvm_vcpu_ioctl_nmi(vcpu);
1474                 if (r)
1475                         goto out;
1476                 r = 0;
1477                 break;
1478         }
1479         case KVM_SET_CPUID: {
1480                 struct kvm_cpuid __user *cpuid_arg = argp;
1481                 struct kvm_cpuid cpuid;
1482
1483                 r = -EFAULT;
1484                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1485                         goto out;
1486                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1487                 if (r)
1488                         goto out;
1489                 break;
1490         }
1491         case KVM_SET_CPUID2: {
1492                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1493                 struct kvm_cpuid2 cpuid;
1494
1495                 r = -EFAULT;
1496                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1497                         goto out;
1498                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1499                                               cpuid_arg->entries);
1500                 if (r)
1501                         goto out;
1502                 break;
1503         }
1504         case KVM_GET_CPUID2: {
1505                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1506                 struct kvm_cpuid2 cpuid;
1507
1508                 r = -EFAULT;
1509                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1510                         goto out;
1511                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1512                                               cpuid_arg->entries);
1513                 if (r)
1514                         goto out;
1515                 r = -EFAULT;
1516                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1517                         goto out;
1518                 r = 0;
1519                 break;
1520         }
1521         case KVM_GET_MSRS:
1522                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1523                 break;
1524         case KVM_SET_MSRS:
1525                 r = msr_io(vcpu, argp, do_set_msr, 0);
1526                 break;
1527         case KVM_TPR_ACCESS_REPORTING: {
1528                 struct kvm_tpr_access_ctl tac;
1529
1530                 r = -EFAULT;
1531                 if (copy_from_user(&tac, argp, sizeof tac))
1532                         goto out;
1533                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1534                 if (r)
1535                         goto out;
1536                 r = -EFAULT;
1537                 if (copy_to_user(argp, &tac, sizeof tac))
1538                         goto out;
1539                 r = 0;
1540                 break;
1541         };
1542         case KVM_SET_VAPIC_ADDR: {
1543                 struct kvm_vapic_addr va;
1544
1545                 r = -EINVAL;
1546                 if (!irqchip_in_kernel(vcpu->kvm))
1547                         goto out;
1548                 r = -EFAULT;
1549                 if (copy_from_user(&va, argp, sizeof va))
1550                         goto out;
1551                 r = 0;
1552                 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1553                 break;
1554         }
1555         default:
1556                 r = -EINVAL;
1557         }
1558 out:
1559         if (lapic)
1560                 kfree(lapic);
1561         return r;
1562 }
1563
1564 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1565 {
1566         int ret;
1567
1568         if (addr > (unsigned int)(-3 * PAGE_SIZE))
1569                 return -1;
1570         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1571         return ret;
1572 }
1573
1574 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1575                                           u32 kvm_nr_mmu_pages)
1576 {
1577         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1578                 return -EINVAL;
1579
1580         down_write(&kvm->slots_lock);
1581
1582         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
1583         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1584
1585         up_write(&kvm->slots_lock);
1586         return 0;
1587 }
1588
1589 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1590 {
1591         return kvm->arch.n_alloc_mmu_pages;
1592 }
1593
1594 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1595 {
1596         int i;
1597         struct kvm_mem_alias *alias;
1598
1599         for (i = 0; i < kvm->arch.naliases; ++i) {
1600                 alias = &kvm->arch.aliases[i];
1601                 if (gfn >= alias->base_gfn
1602                     && gfn < alias->base_gfn + alias->npages)
1603                         return alias->target_gfn + gfn - alias->base_gfn;
1604         }
1605         return gfn;
1606 }
1607
1608 /*
1609  * Set a new alias region.  Aliases map a portion of physical memory into
1610  * another portion.  This is useful for memory windows, for example the PC
1611  * VGA region.
1612  */
1613 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1614                                          struct kvm_memory_alias *alias)
1615 {
1616         int r, n;
1617         struct kvm_mem_alias *p;
1618
1619         r = -EINVAL;
1620         /* General sanity checks */
1621         if (alias->memory_size & (PAGE_SIZE - 1))
1622                 goto out;
1623         if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1624                 goto out;
1625         if (alias->slot >= KVM_ALIAS_SLOTS)
1626                 goto out;
1627         if (alias->guest_phys_addr + alias->memory_size
1628             < alias->guest_phys_addr)
1629                 goto out;
1630         if (alias->target_phys_addr + alias->memory_size
1631             < alias->target_phys_addr)
1632                 goto out;
1633
1634         down_write(&kvm->slots_lock);
1635         spin_lock(&kvm->mmu_lock);
1636
1637         p = &kvm->arch.aliases[alias->slot];
1638         p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1639         p->npages = alias->memory_size >> PAGE_SHIFT;
1640         p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1641
1642         for (n = KVM_ALIAS_SLOTS; n > 0; --n)
1643                 if (kvm->arch.aliases[n - 1].npages)
1644                         break;
1645         kvm->arch.naliases = n;
1646
1647         spin_unlock(&kvm->mmu_lock);
1648         kvm_mmu_zap_all(kvm);
1649
1650         up_write(&kvm->slots_lock);
1651
1652         return 0;
1653
1654 out:
1655         return r;
1656 }
1657
1658 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1659 {
1660         int r;
1661
1662         r = 0;
1663         switch (chip->chip_id) {
1664         case KVM_IRQCHIP_PIC_MASTER:
1665                 memcpy(&chip->chip.pic,
1666                         &pic_irqchip(kvm)->pics[0],
1667                         sizeof(struct kvm_pic_state));
1668                 break;
1669         case KVM_IRQCHIP_PIC_SLAVE:
1670                 memcpy(&chip->chip.pic,
1671                         &pic_irqchip(kvm)->pics[1],
1672                         sizeof(struct kvm_pic_state));
1673                 break;
1674         case KVM_IRQCHIP_IOAPIC:
1675                 memcpy(&chip->chip.ioapic,
1676                         ioapic_irqchip(kvm),
1677                         sizeof(struct kvm_ioapic_state));
1678                 break;
1679         default:
1680                 r = -EINVAL;
1681                 break;
1682         }
1683         return r;
1684 }
1685
1686 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1687 {
1688         int r;
1689
1690         r = 0;
1691         switch (chip->chip_id) {
1692         case KVM_IRQCHIP_PIC_MASTER:
1693                 memcpy(&pic_irqchip(kvm)->pics[0],
1694                         &chip->chip.pic,
1695                         sizeof(struct kvm_pic_state));
1696                 break;
1697         case KVM_IRQCHIP_PIC_SLAVE:
1698                 memcpy(&pic_irqchip(kvm)->pics[1],
1699                         &chip->chip.pic,
1700                         sizeof(struct kvm_pic_state));
1701                 break;
1702         case KVM_IRQCHIP_IOAPIC:
1703                 memcpy(ioapic_irqchip(kvm),
1704                         &chip->chip.ioapic,
1705                         sizeof(struct kvm_ioapic_state));
1706                 break;
1707         default:
1708                 r = -EINVAL;
1709                 break;
1710         }
1711         kvm_pic_update_irq(pic_irqchip(kvm));
1712         return r;
1713 }
1714
1715 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1716 {
1717         int r = 0;
1718
1719         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1720         return r;
1721 }
1722
1723 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1724 {
1725         int r = 0;
1726
1727         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1728         kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1729         return r;
1730 }
1731
1732 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
1733                                  struct kvm_reinject_control *control)
1734 {
1735         if (!kvm->arch.vpit)
1736                 return -ENXIO;
1737         kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
1738         return 0;
1739 }
1740
1741 /*
1742  * Get (and clear) the dirty memory log for a memory slot.
1743  */
1744 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1745                                       struct kvm_dirty_log *log)
1746 {
1747         int r;
1748         int n;
1749         struct kvm_memory_slot *memslot;
1750         int is_dirty = 0;
1751
1752         down_write(&kvm->slots_lock);
1753
1754         r = kvm_get_dirty_log(kvm, log, &is_dirty);
1755         if (r)
1756                 goto out;
1757
1758         /* If nothing is dirty, don't bother messing with page tables. */
1759         if (is_dirty) {
1760                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1761                 kvm_flush_remote_tlbs(kvm);
1762                 memslot = &kvm->memslots[log->slot];
1763                 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1764                 memset(memslot->dirty_bitmap, 0, n);
1765         }
1766         r = 0;
1767 out:
1768         up_write(&kvm->slots_lock);
1769         return r;
1770 }
1771
1772 long kvm_arch_vm_ioctl(struct file *filp,
1773                        unsigned int ioctl, unsigned long arg)
1774 {
1775         struct kvm *kvm = filp->private_data;
1776         void __user *argp = (void __user *)arg;
1777         int r = -EINVAL;
1778         /*
1779          * This union makes it completely explicit to gcc-3.x
1780          * that these two variables' stack usage should be
1781          * combined, not added together.
1782          */
1783         union {
1784                 struct kvm_pit_state ps;
1785                 struct kvm_memory_alias alias;
1786         } u;
1787
1788         switch (ioctl) {
1789         case KVM_SET_TSS_ADDR:
1790                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1791                 if (r < 0)
1792                         goto out;
1793                 break;
1794         case KVM_SET_MEMORY_REGION: {
1795                 struct kvm_memory_region kvm_mem;
1796                 struct kvm_userspace_memory_region kvm_userspace_mem;
1797
1798                 r = -EFAULT;
1799                 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1800                         goto out;
1801                 kvm_userspace_mem.slot = kvm_mem.slot;
1802                 kvm_userspace_mem.flags = kvm_mem.flags;
1803                 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1804                 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1805                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1806                 if (r)
1807                         goto out;
1808                 break;
1809         }
1810         case KVM_SET_NR_MMU_PAGES:
1811                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1812                 if (r)
1813                         goto out;
1814                 break;
1815         case KVM_GET_NR_MMU_PAGES:
1816                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1817                 break;
1818         case KVM_SET_MEMORY_ALIAS:
1819                 r = -EFAULT;
1820                 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
1821                         goto out;
1822                 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
1823                 if (r)
1824                         goto out;
1825                 break;
1826         case KVM_CREATE_IRQCHIP:
1827                 r = -ENOMEM;
1828                 kvm->arch.vpic = kvm_create_pic(kvm);
1829                 if (kvm->arch.vpic) {
1830                         r = kvm_ioapic_init(kvm);
1831                         if (r) {
1832                                 kfree(kvm->arch.vpic);
1833                                 kvm->arch.vpic = NULL;
1834                                 goto out;
1835                         }
1836                 } else
1837                         goto out;
1838                 break;
1839         case KVM_CREATE_PIT:
1840                 mutex_lock(&kvm->lock);
1841                 r = -EEXIST;
1842                 if (kvm->arch.vpit)
1843                         goto create_pit_unlock;
1844                 r = -ENOMEM;
1845                 kvm->arch.vpit = kvm_create_pit(kvm);
1846                 if (kvm->arch.vpit)
1847                         r = 0;
1848         create_pit_unlock:
1849                 mutex_unlock(&kvm->lock);
1850                 break;
1851         case KVM_IRQ_LINE: {
1852                 struct kvm_irq_level irq_event;
1853
1854                 r = -EFAULT;
1855                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1856                         goto out;
1857                 if (irqchip_in_kernel(kvm)) {
1858                         mutex_lock(&kvm->lock);
1859                         kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1860                                     irq_event.irq, irq_event.level);
1861                         mutex_unlock(&kvm->lock);
1862                         r = 0;
1863                 }
1864                 break;
1865         }
1866         case KVM_GET_IRQCHIP: {
1867                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1868                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1869
1870                 r = -ENOMEM;
1871                 if (!chip)
1872                         goto out;
1873                 r = -EFAULT;
1874                 if (copy_from_user(chip, argp, sizeof *chip))
1875                         goto get_irqchip_out;
1876                 r = -ENXIO;
1877                 if (!irqchip_in_kernel(kvm))
1878                         goto get_irqchip_out;
1879                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
1880                 if (r)
1881                         goto get_irqchip_out;
1882                 r = -EFAULT;
1883                 if (copy_to_user(argp, chip, sizeof *chip))
1884                         goto get_irqchip_out;
1885                 r = 0;
1886         get_irqchip_out:
1887                 kfree(chip);
1888                 if (r)
1889                         goto out;
1890                 break;
1891         }
1892         case KVM_SET_IRQCHIP: {
1893                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1894                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1895
1896                 r = -ENOMEM;
1897                 if (!chip)
1898                         goto out;
1899                 r = -EFAULT;
1900                 if (copy_from_user(chip, argp, sizeof *chip))
1901                         goto set_irqchip_out;
1902                 r = -ENXIO;
1903                 if (!irqchip_in_kernel(kvm))
1904                         goto set_irqchip_out;
1905                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
1906                 if (r)
1907                         goto set_irqchip_out;
1908                 r = 0;
1909         set_irqchip_out:
1910                 kfree(chip);
1911                 if (r)
1912                         goto out;
1913                 break;
1914         }
1915         case KVM_GET_PIT: {
1916                 r = -EFAULT;
1917                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
1918                         goto out;
1919                 r = -ENXIO;
1920                 if (!kvm->arch.vpit)
1921                         goto out;
1922                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
1923                 if (r)
1924                         goto out;
1925                 r = -EFAULT;
1926                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
1927                         goto out;
1928                 r = 0;
1929                 break;
1930         }
1931         case KVM_SET_PIT: {
1932                 r = -EFAULT;
1933                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
1934                         goto out;
1935                 r = -ENXIO;
1936                 if (!kvm->arch.vpit)
1937                         goto out;
1938                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
1939                 if (r)
1940                         goto out;
1941                 r = 0;
1942                 break;
1943         }
1944         case KVM_REINJECT_CONTROL: {
1945                 struct kvm_reinject_control control;
1946                 r =  -EFAULT;
1947                 if (copy_from_user(&control, argp, sizeof(control)))
1948                         goto out;
1949                 r = kvm_vm_ioctl_reinject(kvm, &control);
1950                 if (r)
1951                         goto out;
1952                 r = 0;
1953                 break;
1954         }
1955         default:
1956                 ;
1957         }
1958 out:
1959         return r;
1960 }
1961
1962 static void kvm_init_msr_list(void)
1963 {
1964         u32 dummy[2];
1965         unsigned i, j;
1966
1967         for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1968                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1969                         continue;
1970                 if (j < i)
1971                         msrs_to_save[j] = msrs_to_save[i];
1972                 j++;
1973         }
1974         num_msrs_to_save = j;
1975 }
1976
1977 /*
1978  * Only apic need an MMIO device hook, so shortcut now..
1979  */
1980 static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1981                                                 gpa_t addr, int len,
1982                                                 int is_write)
1983 {
1984         struct kvm_io_device *dev;
1985
1986         if (vcpu->arch.apic) {
1987                 dev = &vcpu->arch.apic->dev;
1988                 if (dev->in_range(dev, addr, len, is_write))
1989                         return dev;
1990         }
1991         return NULL;
1992 }
1993
1994
1995 static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1996                                                 gpa_t addr, int len,
1997                                                 int is_write)
1998 {
1999         struct kvm_io_device *dev;
2000
2001         dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
2002         if (dev == NULL)
2003                 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
2004                                           is_write);
2005         return dev;
2006 }
2007
2008 int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2009                         struct kvm_vcpu *vcpu)
2010 {
2011         void *data = val;
2012         int r = X86EMUL_CONTINUE;
2013
2014         while (bytes) {
2015                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2016                 unsigned offset = addr & (PAGE_SIZE-1);
2017                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
2018                 int ret;
2019
2020                 if (gpa == UNMAPPED_GVA) {
2021                         r = X86EMUL_PROPAGATE_FAULT;
2022                         goto out;
2023                 }
2024                 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
2025                 if (ret < 0) {
2026                         r = X86EMUL_UNHANDLEABLE;
2027                         goto out;
2028                 }
2029
2030                 bytes -= toread;
2031                 data += toread;
2032                 addr += toread;
2033         }
2034 out:
2035         return r;
2036 }
2037
2038 int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2039                          struct kvm_vcpu *vcpu)
2040 {
2041         void *data = val;
2042         int r = X86EMUL_CONTINUE;
2043
2044         while (bytes) {
2045                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2046                 unsigned offset = addr & (PAGE_SIZE-1);
2047                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2048                 int ret;
2049
2050                 if (gpa == UNMAPPED_GVA) {
2051                         r = X86EMUL_PROPAGATE_FAULT;
2052                         goto out;
2053                 }
2054                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2055                 if (ret < 0) {
2056                         r = X86EMUL_UNHANDLEABLE;
2057                         goto out;
2058                 }
2059
2060                 bytes -= towrite;
2061                 data += towrite;
2062                 addr += towrite;
2063         }
2064 out:
2065         return r;
2066 }
2067
2068
2069 static int emulator_read_emulated(unsigned long addr,
2070                                   void *val,
2071                                   unsigned int bytes,
2072                                   struct kvm_vcpu *vcpu)
2073 {
2074         struct kvm_io_device *mmio_dev;
2075         gpa_t                 gpa;
2076
2077         if (vcpu->mmio_read_completed) {
2078                 memcpy(val, vcpu->mmio_data, bytes);
2079                 vcpu->mmio_read_completed = 0;
2080                 return X86EMUL_CONTINUE;
2081         }
2082
2083         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2084
2085         /* For APIC access vmexit */
2086         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2087                 goto mmio;
2088
2089         if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2090                                 == X86EMUL_CONTINUE)
2091                 return X86EMUL_CONTINUE;
2092         if (gpa == UNMAPPED_GVA)
2093                 return X86EMUL_PROPAGATE_FAULT;
2094
2095 mmio:
2096         /*
2097          * Is this MMIO handled locally?
2098          */
2099         mutex_lock(&vcpu->kvm->lock);
2100         mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
2101         if (mmio_dev) {
2102                 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
2103                 mutex_unlock(&vcpu->kvm->lock);
2104                 return X86EMUL_CONTINUE;
2105         }
2106         mutex_unlock(&vcpu->kvm->lock);
2107
2108         vcpu->mmio_needed = 1;
2109         vcpu->mmio_phys_addr = gpa;
2110         vcpu->mmio_size = bytes;
2111         vcpu->mmio_is_write = 0;
2112
2113         return X86EMUL_UNHANDLEABLE;
2114 }
2115
2116 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
2117                           const void *val, int bytes)
2118 {
2119         int ret;
2120
2121         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
2122         if (ret < 0)
2123                 return 0;
2124         kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
2125         return 1;
2126 }
2127
2128 static int emulator_write_emulated_onepage(unsigned long addr,
2129                                            const void *val,
2130                                            unsigned int bytes,
2131                                            struct kvm_vcpu *vcpu)
2132 {
2133         struct kvm_io_device *mmio_dev;
2134         gpa_t                 gpa;
2135
2136         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2137
2138         if (gpa == UNMAPPED_GVA) {
2139                 kvm_inject_page_fault(vcpu, addr, 2);
2140                 return X86EMUL_PROPAGATE_FAULT;
2141         }
2142
2143         /* For APIC access vmexit */
2144         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2145                 goto mmio;
2146
2147         if (emulator_write_phys(vcpu, gpa, val, bytes))
2148                 return X86EMUL_CONTINUE;
2149
2150 mmio:
2151         /*
2152          * Is this MMIO handled locally?
2153          */
2154         mutex_lock(&vcpu->kvm->lock);
2155         mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
2156         if (mmio_dev) {
2157                 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
2158                 mutex_unlock(&vcpu->kvm->lock);
2159                 return X86EMUL_CONTINUE;
2160         }
2161         mutex_unlock(&vcpu->kvm->lock);
2162
2163         vcpu->mmio_needed = 1;
2164         vcpu->mmio_phys_addr = gpa;
2165         vcpu->mmio_size = bytes;
2166         vcpu->mmio_is_write = 1;
2167         memcpy(vcpu->mmio_data, val, bytes);
2168
2169         return X86EMUL_CONTINUE;
2170 }
2171
2172 int emulator_write_emulated(unsigned long addr,
2173                                    const void *val,
2174                                    unsigned int bytes,
2175                                    struct kvm_vcpu *vcpu)
2176 {
2177         /* Crossing a page boundary? */
2178         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2179                 int rc, now;
2180
2181                 now = -addr & ~PAGE_MASK;
2182                 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2183                 if (rc != X86EMUL_CONTINUE)
2184                         return rc;
2185                 addr += now;
2186                 val += now;
2187                 bytes -= now;
2188         }
2189         return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2190 }
2191 EXPORT_SYMBOL_GPL(emulator_write_emulated);
2192
2193 static int emulator_cmpxchg_emulated(unsigned long addr,
2194                                      const void *old,
2195                                      const void *new,
2196                                      unsigned int bytes,
2197                                      struct kvm_vcpu *vcpu)
2198 {
2199         static int reported;
2200
2201         if (!reported) {
2202                 reported = 1;
2203                 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2204         }
2205 #ifndef CONFIG_X86_64
2206         /* guests cmpxchg8b have to be emulated atomically */
2207         if (bytes == 8) {
2208                 gpa_t gpa;
2209                 struct page *page;
2210                 char *kaddr;
2211                 u64 val;
2212
2213                 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2214
2215                 if (gpa == UNMAPPED_GVA ||
2216                    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2217                         goto emul_write;
2218
2219                 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2220                         goto emul_write;
2221
2222                 val = *(u64 *)new;
2223
2224                 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2225
2226                 kaddr = kmap_atomic(page, KM_USER0);
2227                 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2228                 kunmap_atomic(kaddr, KM_USER0);
2229                 kvm_release_page_dirty(page);
2230         }
2231 emul_write:
2232 #endif
2233
2234         return emulator_write_emulated(addr, new, bytes, vcpu);
2235 }
2236
2237 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2238 {
2239         return kvm_x86_ops->get_segment_base(vcpu, seg);
2240 }
2241
2242 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2243 {
2244         kvm_mmu_invlpg(vcpu, address);
2245         return X86EMUL_CONTINUE;
2246 }
2247
2248 int emulate_clts(struct kvm_vcpu *vcpu)
2249 {
2250         KVMTRACE_0D(CLTS, vcpu, handler);
2251         kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
2252         return X86EMUL_CONTINUE;
2253 }
2254
2255 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2256 {
2257         struct kvm_vcpu *vcpu = ctxt->vcpu;
2258
2259         switch (dr) {
2260         case 0 ... 3:
2261                 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2262                 return X86EMUL_CONTINUE;
2263         default:
2264                 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
2265                 return X86EMUL_UNHANDLEABLE;
2266         }
2267 }
2268
2269 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2270 {
2271         unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2272         int exception;
2273
2274         kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2275         if (exception) {
2276                 /* FIXME: better handling */
2277                 return X86EMUL_UNHANDLEABLE;
2278         }
2279         return X86EMUL_CONTINUE;
2280 }
2281
2282 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2283 {
2284         u8 opcodes[4];
2285         unsigned long rip = kvm_rip_read(vcpu);
2286         unsigned long rip_linear;
2287
2288         if (!printk_ratelimit())
2289                 return;
2290
2291         rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2292
2293         kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
2294
2295         printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2296                context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
2297 }
2298 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2299
2300 static struct x86_emulate_ops emulate_ops = {
2301         .read_std            = kvm_read_guest_virt,
2302         .read_emulated       = emulator_read_emulated,
2303         .write_emulated      = emulator_write_emulated,
2304         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
2305 };
2306
2307 static void cache_all_regs(struct kvm_vcpu *vcpu)
2308 {
2309         kvm_register_read(vcpu, VCPU_REGS_RAX);
2310         kvm_register_read(vcpu, VCPU_REGS_RSP);
2311         kvm_register_read(vcpu, VCPU_REGS_RIP);
2312         vcpu->arch.regs_dirty = ~0;
2313 }
2314
2315 int emulate_instruction(struct kvm_vcpu *vcpu,
2316                         struct kvm_run *run,
2317                         unsigned long cr2,
2318                         u16 error_code,
2319                         int emulation_type)
2320 {
2321         int r;
2322         struct decode_cache *c;
2323
2324         kvm_clear_exception_queue(vcpu);
2325         vcpu->arch.mmio_fault_cr2 = cr2;
2326         /*
2327          * TODO: fix x86_emulate.c to use guest_read/write_register
2328          * instead of direct ->regs accesses, can save hundred cycles
2329          * on Intel for instructions that don't read/change RSP, for
2330          * for example.
2331          */
2332         cache_all_regs(vcpu);
2333
2334         vcpu->mmio_is_write = 0;
2335         vcpu->arch.pio.string = 0;
2336
2337         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
2338                 int cs_db, cs_l;
2339                 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2340
2341                 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2342                 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2343                 vcpu->arch.emulate_ctxt.mode =
2344                         (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
2345                         ? X86EMUL_MODE_REAL : cs_l
2346                         ? X86EMUL_MODE_PROT64 : cs_db
2347                         ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2348
2349                 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
2350
2351                 /* Reject the instructions other than VMCALL/VMMCALL when
2352                  * try to emulate invalid opcode */
2353                 c = &vcpu->arch.emulate_ctxt.decode;
2354                 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2355                     (!(c->twobyte && c->b == 0x01 &&
2356                       (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2357                        c->modrm_mod == 3 && c->modrm_rm == 1)))
2358                         return EMULATE_FAIL;
2359
2360                 ++vcpu->stat.insn_emulation;
2361                 if (r)  {
2362                         ++vcpu->stat.insn_emulation_fail;
2363                         if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2364                                 return EMULATE_DONE;
2365                         return EMULATE_FAIL;
2366                 }
2367         }
2368
2369         r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
2370
2371         if (vcpu->arch.pio.string)
2372                 return EMULATE_DO_MMIO;
2373
2374         if ((r || vcpu->mmio_is_write) && run) {
2375                 run->exit_reason = KVM_EXIT_MMIO;
2376                 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2377                 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2378                 run->mmio.len = vcpu->mmio_size;
2379                 run->mmio.is_write = vcpu->mmio_is_write;
2380         }
2381
2382         if (r) {
2383                 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2384                         return EMULATE_DONE;
2385                 if (!vcpu->mmio_needed) {
2386                         kvm_report_emulation_failure(vcpu, "mmio");
2387                         return EMULATE_FAIL;
2388                 }
2389                 return EMULATE_DO_MMIO;
2390         }
2391
2392         kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
2393
2394         if (vcpu->mmio_is_write) {
2395                 vcpu->mmio_needed = 0;
2396                 return EMULATE_DO_MMIO;
2397         }
2398
2399         return EMULATE_DONE;
2400 }
2401 EXPORT_SYMBOL_GPL(emulate_instruction);
2402
2403 static int pio_copy_data(struct kvm_vcpu *vcpu)
2404 {
2405         void *p = vcpu->arch.pio_data;
2406         gva_t q = vcpu->arch.pio.guest_gva;
2407         unsigned bytes;
2408         int ret;
2409
2410         bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2411         if (vcpu->arch.pio.in)
2412                 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
2413         else
2414                 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2415         return ret;
2416 }
2417
2418 int complete_pio(struct kvm_vcpu *vcpu)
2419 {
2420         struct kvm_pio_request *io = &vcpu->arch.pio;
2421         long delta;
2422         int r;
2423         unsigned long val;
2424
2425         if (!io->string) {
2426                 if (io->in) {
2427                         val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2428                         memcpy(&val, vcpu->arch.pio_data, io->size);
2429                         kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2430                 }
2431         } else {
2432                 if (io->in) {
2433                         r = pio_copy_data(vcpu);
2434                         if (r)
2435                                 return r;
2436                 }
2437
2438                 delta = 1;
2439                 if (io->rep) {
2440                         delta *= io->cur_count;
2441                         /*
2442                          * The size of the register should really depend on
2443                          * current address size.
2444                          */
2445                         val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2446                         val -= delta;
2447                         kvm_register_write(vcpu, VCPU_REGS_RCX, val);
2448                 }
2449                 if (io->down)
2450                         delta = -delta;
2451                 delta *= io->size;
2452                 if (io->in) {
2453                         val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2454                         val += delta;
2455                         kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2456                 } else {
2457                         val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2458                         val += delta;
2459                         kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2460                 }
2461         }
2462
2463         io->count -= io->cur_count;
2464         io->cur_count = 0;
2465
2466         return 0;
2467 }
2468
2469 static void kernel_pio(struct kvm_io_device *pio_dev,
2470                        struct kvm_vcpu *vcpu,
2471                        void *pd)
2472 {
2473         /* TODO: String I/O for in kernel device */
2474
2475         mutex_lock(&vcpu->kvm->lock);
2476         if (vcpu->arch.pio.in)
2477                 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2478                                   vcpu->arch.pio.size,
2479                                   pd);
2480         else
2481                 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2482                                    vcpu->arch.pio.size,
2483                                    pd);
2484         mutex_unlock(&vcpu->kvm->lock);
2485 }
2486
2487 static void pio_string_write(struct kvm_io_device *pio_dev,
2488                              struct kvm_vcpu *vcpu)
2489 {
2490         struct kvm_pio_request *io = &vcpu->arch.pio;
2491         void *pd = vcpu->arch.pio_data;
2492         int i;
2493
2494         mutex_lock(&vcpu->kvm->lock);
2495         for (i = 0; i < io->cur_count; i++) {
2496                 kvm_iodevice_write(pio_dev, io->port,
2497                                    io->size,
2498                                    pd);
2499                 pd += io->size;
2500         }
2501         mutex_unlock(&vcpu->kvm->lock);
2502 }
2503
2504 static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
2505                                                gpa_t addr, int len,
2506                                                int is_write)
2507 {
2508         return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
2509 }
2510
2511 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2512                   int size, unsigned port)
2513 {
2514         struct kvm_io_device *pio_dev;
2515         unsigned long val;
2516
2517         vcpu->run->exit_reason = KVM_EXIT_IO;
2518         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2519         vcpu->run->io.size = vcpu->arch.pio.size = size;
2520         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2521         vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2522         vcpu->run->io.port = vcpu->arch.pio.port = port;
2523         vcpu->arch.pio.in = in;
2524         vcpu->arch.pio.string = 0;
2525         vcpu->arch.pio.down = 0;
2526         vcpu->arch.pio.rep = 0;
2527
2528         if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2529                 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2530                             handler);
2531         else
2532                 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2533                             handler);
2534
2535         val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2536         memcpy(vcpu->arch.pio_data, &val, 4);
2537
2538         pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
2539         if (pio_dev) {
2540                 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
2541                 complete_pio(vcpu);
2542                 return 1;
2543         }
2544         return 0;
2545 }
2546 EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2547
2548 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2549                   int size, unsigned long count, int down,
2550                   gva_t address, int rep, unsigned port)
2551 {
2552         unsigned now, in_page;
2553         int ret = 0;
2554         struct kvm_io_device *pio_dev;
2555
2556         vcpu->run->exit_reason = KVM_EXIT_IO;
2557         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2558         vcpu->run->io.size = vcpu->arch.pio.size = size;
2559         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2560         vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2561         vcpu->run->io.port = vcpu->arch.pio.port = port;
2562         vcpu->arch.pio.in = in;
2563         vcpu->arch.pio.string = 1;
2564         vcpu->arch.pio.down = down;
2565         vcpu->arch.pio.rep = rep;
2566
2567         if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2568                 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2569                             handler);
2570         else
2571                 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2572                             handler);
2573
2574         if (!count) {
2575                 kvm_x86_ops->skip_emulated_instruction(vcpu);
2576                 return 1;
2577         }
2578
2579         if (!down)
2580                 in_page = PAGE_SIZE - offset_in_page(address);
2581         else
2582                 in_page = offset_in_page(address) + size;
2583         now = min(count, (unsigned long)in_page / size);
2584         if (!now)
2585                 now = 1;
2586         if (down) {
2587                 /*
2588                  * String I/O in reverse.  Yuck.  Kill the guest, fix later.
2589                  */
2590                 pr_unimpl(vcpu, "guest string pio down\n");
2591                 kvm_inject_gp(vcpu, 0);
2592                 return 1;
2593         }
2594         vcpu->run->io.count = now;
2595         vcpu->arch.pio.cur_count = now;
2596
2597         if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
2598                 kvm_x86_ops->skip_emulated_instruction(vcpu);
2599
2600         vcpu->arch.pio.guest_gva = address;
2601
2602         pio_dev = vcpu_find_pio_dev(vcpu, port,
2603                                     vcpu->arch.pio.cur_count,
2604                                     !vcpu->arch.pio.in);
2605         if (!vcpu->arch.pio.in) {
2606                 /* string PIO write */
2607                 ret = pio_copy_data(vcpu);
2608                 if (ret == X86EMUL_PROPAGATE_FAULT) {
2609                         kvm_inject_gp(vcpu, 0);
2610                         return 1;
2611                 }
2612                 if (ret == 0 && pio_dev) {
2613                         pio_string_write(pio_dev, vcpu);
2614                         complete_pio(vcpu);
2615                         if (vcpu->arch.pio.count == 0)
2616                                 ret = 1;
2617                 }
2618         } else if (pio_dev)
2619                 pr_unimpl(vcpu, "no string pio read support yet, "
2620                        "port %x size %d count %ld\n",
2621                         port, size, count);
2622
2623         return ret;
2624 }
2625 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2626
2627 int kvm_arch_init(void *opaque)
2628 {
2629         int r;
2630         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2631
2632         if (kvm_x86_ops) {
2633                 printk(KERN_ERR "kvm: already loaded the other module\n");
2634                 r = -EEXIST;
2635                 goto out;
2636         }
2637
2638         if (!ops->cpu_has_kvm_support()) {
2639                 printk(KERN_ERR "kvm: no hardware support\n");
2640                 r = -EOPNOTSUPP;
2641                 goto out;
2642         }
2643         if (ops->disabled_by_bios()) {
2644                 printk(KERN_ERR "kvm: disabled by bios\n");
2645                 r = -EOPNOTSUPP;
2646                 goto out;
2647         }
2648
2649         r = kvm_mmu_module_init();
2650         if (r)
2651                 goto out;
2652
2653         kvm_init_msr_list();
2654
2655         kvm_x86_ops = ops;
2656         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
2657         kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2658         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
2659                         PT_DIRTY_MASK, PT64_NX_MASK, 0, 0);
2660         return 0;
2661
2662 out:
2663         return r;
2664 }
2665
2666 void kvm_arch_exit(void)
2667 {
2668         kvm_x86_ops = NULL;
2669         kvm_mmu_module_exit();
2670 }
2671
2672 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2673 {
2674         ++vcpu->stat.halt_exits;
2675         KVMTRACE_0D(HLT, vcpu, handler);
2676         if (irqchip_in_kernel(vcpu->kvm)) {
2677                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
2678                 return 1;
2679         } else {
2680                 vcpu->run->exit_reason = KVM_EXIT_HLT;
2681                 return 0;
2682         }
2683 }
2684 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2685
2686 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2687                            unsigned long a1)
2688 {
2689         if (is_long_mode(vcpu))
2690                 return a0;
2691         else
2692                 return a0 | ((gpa_t)a1 << 32);
2693 }
2694
2695 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2696 {
2697         unsigned long nr, a0, a1, a2, a3, ret;
2698         int r = 1;
2699
2700         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
2701         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
2702         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
2703         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
2704         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
2705
2706         KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2707
2708         if (!is_long_mode(vcpu)) {
2709                 nr &= 0xFFFFFFFF;
2710                 a0 &= 0xFFFFFFFF;
2711                 a1 &= 0xFFFFFFFF;
2712                 a2 &= 0xFFFFFFFF;
2713                 a3 &= 0xFFFFFFFF;
2714         }
2715
2716         switch (nr) {
2717         case KVM_HC_VAPIC_POLL_IRQ:
2718                 ret = 0;
2719                 break;
2720         case KVM_HC_MMU_OP:
2721                 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2722                 break;
2723         default:
2724                 ret = -KVM_ENOSYS;
2725                 break;
2726         }
2727         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
2728         ++vcpu->stat.hypercalls;
2729         return r;
2730 }
2731 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2732
2733 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2734 {
2735         char instruction[3];
2736         int ret = 0;
2737         unsigned long rip = kvm_rip_read(vcpu);
2738
2739
2740         /*
2741          * Blow out the MMU to ensure that no other VCPU has an active mapping
2742          * to ensure that the updated hypercall appears atomically across all
2743          * VCPUs.
2744          */
2745         kvm_mmu_zap_all(vcpu->kvm);
2746
2747         kvm_x86_ops->patch_hypercall(vcpu, instruction);
2748         if (emulator_write_emulated(rip, instruction, 3, vcpu)
2749             != X86EMUL_CONTINUE)
2750                 ret = -EFAULT;
2751
2752         return ret;
2753 }
2754
2755 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2756 {
2757         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2758 }
2759
2760 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2761 {
2762         struct descriptor_table dt = { limit, base };
2763
2764         kvm_x86_ops->set_gdt(vcpu, &dt);
2765 }
2766
2767 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2768 {
2769         struct descriptor_table dt = { limit, base };
2770
2771         kvm_x86_ops->set_idt(vcpu, &dt);
2772 }
2773
2774 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2775                    unsigned long *rflags)
2776 {
2777         kvm_lmsw(vcpu, msw);
2778         *rflags = kvm_x86_ops->get_rflags(vcpu);
2779 }
2780
2781 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2782 {
2783         unsigned long value;
2784
2785         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2786         switch (cr) {
2787         case 0:
2788                 value = vcpu->arch.cr0;
2789                 break;
2790         case 2:
2791                 value = vcpu->arch.cr2;
2792                 break;
2793         case 3:
2794                 value = vcpu->arch.cr3;
2795                 break;
2796         case 4:
2797                 value = vcpu->arch.cr4;
2798                 break;
2799         case 8:
2800                 value = kvm_get_cr8(vcpu);
2801                 break;
2802         default:
2803                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
2804                 return 0;
2805         }
2806         KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
2807                     (u32)((u64)value >> 32), handler);
2808
2809         return value;
2810 }
2811
2812 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2813                      unsigned long *rflags)
2814 {
2815         KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
2816                     (u32)((u64)val >> 32), handler);
2817
2818         switch (cr) {
2819         case 0:
2820                 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
2821                 *rflags = kvm_x86_ops->get_rflags(vcpu);
2822                 break;
2823         case 2:
2824                 vcpu->arch.cr2 = val;
2825                 break;
2826         case 3:
2827                 kvm_set_cr3(vcpu, val);
2828                 break;
2829         case 4:
2830                 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
2831                 break;
2832         case 8:
2833                 kvm_set_cr8(vcpu, val & 0xfUL);
2834                 break;
2835         default:
2836                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
2837         }
2838 }
2839
2840 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2841 {
2842         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2843         int j, nent = vcpu->arch.cpuid_nent;
2844
2845         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2846         /* when no next entry is found, the current entry[i] is reselected */
2847         for (j = i + 1; ; j = (j + 1) % nent) {
2848                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
2849                 if (ej->function == e->function) {
2850                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2851                         return j;
2852                 }
2853         }
2854         return 0; /* silence gcc, even though control never reaches here */
2855 }
2856
2857 /* find an entry with matching function, matching index (if needed), and that
2858  * should be read next (if it's stateful) */
2859 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2860         u32 function, u32 index)
2861 {
2862         if (e->function != function)
2863                 return 0;
2864         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2865                 return 0;
2866         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2867             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2868                 return 0;
2869         return 1;
2870 }
2871
2872 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
2873                                               u32 function, u32 index)
2874 {
2875         int i;
2876         struct kvm_cpuid_entry2 *best = NULL;
2877
2878         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2879                 struct kvm_cpuid_entry2 *e;
2880
2881                 e = &vcpu->arch.cpuid_entries[i];
2882                 if (is_matching_cpuid_entry(e, function, index)) {
2883                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2884                                 move_to_next_stateful_cpuid_entry(vcpu, i);
2885                         best = e;
2886                         break;
2887                 }
2888                 /*
2889                  * Both basic or both extended?
2890                  */
2891                 if (((e->function ^ function) & 0x80000000) == 0)
2892                         if (!best || e->function > best->function)
2893                                 best = e;
2894         }
2895         return best;
2896 }
2897
2898 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2899 {
2900         u32 function, index;
2901         struct kvm_cpuid_entry2 *best;
2902
2903         function = kvm_register_read(vcpu, VCPU_REGS_RAX);
2904         index = kvm_register_read(vcpu, VCPU_REGS_RCX);
2905         kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
2906         kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
2907         kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
2908         kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
2909         best = kvm_find_cpuid_entry(vcpu, function, index);
2910         if (best) {
2911                 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
2912                 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
2913                 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
2914                 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
2915         }
2916         kvm_x86_ops->skip_emulated_instruction(vcpu);
2917         KVMTRACE_5D(CPUID, vcpu, function,
2918                     (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
2919                     (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
2920                     (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
2921                     (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
2922 }
2923 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
2924
2925 /*
2926  * Check if userspace requested an interrupt window, and that the
2927  * interrupt window is open.
2928  *
2929  * No need to exit to userspace if we already have an interrupt queued.
2930  */
2931 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2932                                           struct kvm_run *kvm_run)
2933 {
2934         return (!vcpu->arch.irq_summary &&
2935                 kvm_run->request_interrupt_window &&
2936                 vcpu->arch.interrupt_window_open &&
2937                 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2938 }
2939
2940 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2941                               struct kvm_run *kvm_run)
2942 {
2943         kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2944         kvm_run->cr8 = kvm_get_cr8(vcpu);
2945         kvm_run->apic_base = kvm_get_apic_base(vcpu);
2946         if (irqchip_in_kernel(vcpu->kvm))
2947                 kvm_run->ready_for_interrupt_injection = 1;
2948         else
2949                 kvm_run->ready_for_interrupt_injection =
2950                                         (vcpu->arch.interrupt_window_open &&
2951                                          vcpu->arch.irq_summary == 0);
2952 }
2953
2954 static void vapic_enter(struct kvm_vcpu *vcpu)
2955 {
2956         struct kvm_lapic *apic = vcpu->arch.apic;
2957         struct page *page;
2958
2959         if (!apic || !apic->vapic_addr)
2960                 return;
2961
2962         page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
2963
2964         vcpu->arch.apic->vapic_page = page;
2965 }
2966
2967 static void vapic_exit(struct kvm_vcpu *vcpu)
2968 {
2969         struct kvm_lapic *apic = vcpu->arch.apic;
2970
2971         if (!apic || !apic->vapic_addr)
2972                 return;
2973
2974         down_read(&vcpu->kvm->slots_lock);
2975         kvm_release_page_dirty(apic->vapic_page);
2976         mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
2977         up_read(&vcpu->kvm->slots_lock);
2978 }
2979
2980 static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2981 {
2982         int r;
2983
2984         if (vcpu->requests)
2985                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
2986                         kvm_mmu_unload(vcpu);
2987
2988         r = kvm_mmu_reload(vcpu);
2989         if (unlikely(r))
2990                 goto out;
2991
2992         if (vcpu->requests) {
2993                 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2994                         __kvm_migrate_timers(vcpu);
2995                 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
2996                         kvm_mmu_sync_roots(vcpu);
2997                 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2998                         kvm_x86_ops->tlb_flush(vcpu);
2999                 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3000                                        &vcpu->requests)) {
3001                         kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
3002                         r = 0;
3003                         goto out;
3004                 }
3005                 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3006                         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
3007                         r = 0;
3008                         goto out;
3009                 }
3010         }
3011
3012         clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3013         kvm_inject_pending_timer_irqs(vcpu);
3014
3015         preempt_disable();
3016
3017         kvm_x86_ops->prepare_guest_switch(vcpu);
3018         kvm_load_guest_fpu(vcpu);
3019
3020         local_irq_disable();
3021
3022         if (vcpu->requests || need_resched() || signal_pending(current)) {
3023                 local_irq_enable();
3024                 preempt_enable();
3025                 r = 1;
3026                 goto out;
3027         }
3028
3029         vcpu->guest_mode = 1;
3030         /*
3031          * Make sure that guest_mode assignment won't happen after
3032          * testing the pending IRQ vector bitmap.
3033          */
3034         smp_wmb();
3035
3036         if (vcpu->arch.exception.pending)
3037                 __queue_exception(vcpu);
3038         else if (irqchip_in_kernel(vcpu->kvm))
3039                 kvm_x86_ops->inject_pending_irq(vcpu);
3040         else
3041                 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
3042
3043         kvm_lapic_sync_to_vapic(vcpu);
3044
3045         up_read(&vcpu->kvm->slots_lock);
3046
3047         kvm_guest_enter();
3048
3049         get_debugreg(vcpu->arch.host_dr6, 6);
3050         get_debugreg(vcpu->arch.host_dr7, 7);
3051         if (unlikely(vcpu->arch.switch_db_regs)) {
3052                 get_debugreg(vcpu->arch.host_db[0], 0);
3053                 get_debugreg(vcpu->arch.host_db[1], 1);
3054                 get_debugreg(vcpu->arch.host_db[2], 2);
3055                 get_debugreg(vcpu->arch.host_db[3], 3);
3056
3057                 set_debugreg(0, 7);
3058                 set_debugreg(vcpu->arch.eff_db[0], 0);
3059                 set_debugreg(vcpu->arch.eff_db[1], 1);
3060                 set_debugreg(vcpu->arch.eff_db[2], 2);
3061                 set_debugreg(vcpu->arch.eff_db[3], 3);
3062         }
3063
3064         KVMTRACE_0D(VMENTRY, vcpu, entryexit);
3065         kvm_x86_ops->run(vcpu, kvm_run);
3066
3067         if (unlikely(vcpu->arch.switch_db_regs)) {
3068                 set_debugreg(0, 7);
3069                 set_debugreg(vcpu->arch.host_db[0], 0);
3070                 set_debugreg(vcpu->arch.host_db[1], 1);
3071                 set_debugreg(vcpu->arch.host_db[2], 2);
3072                 set_debugreg(vcpu->arch.host_db[3], 3);
3073         }
3074         set_debugreg(vcpu->arch.host_dr6, 6);
3075         set_debugreg(vcpu->arch.host_dr7, 7);
3076
3077         vcpu->guest_mode = 0;
3078         local_irq_enable();
3079
3080         ++vcpu->stat.exits;
3081
3082         /*
3083          * We must have an instruction between local_irq_enable() and
3084          * kvm_guest_exit(), so the timer interrupt isn't delayed by
3085          * the interrupt shadow.  The stat.exits increment will do nicely.
3086          * But we need to prevent reordering, hence this barrier():
3087          */
3088         barrier();
3089
3090         kvm_guest_exit();
3091
3092         preempt_enable();
3093
3094         down_read(&vcpu->kvm->slots_lock);
3095
3096         /*
3097          * Profile KVM exit RIPs:
3098          */
3099         if (unlikely(prof_on == KVM_PROFILING)) {
3100                 unsigned long rip = kvm_rip_read(vcpu);
3101                 profile_hit(KVM_PROFILING, (void *)rip);
3102         }
3103
3104         if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
3105                 vcpu->arch.exception.pending = false;
3106
3107         kvm_lapic_sync_from_vapic(vcpu);
3108
3109         r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
3110 out:
3111         return r;
3112 }
3113
3114 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3115 {
3116         int r;
3117
3118         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
3119                 pr_debug("vcpu %d received sipi with vector # %x\n",
3120                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
3121                 kvm_lapic_reset(vcpu);
3122                 r = kvm_arch_vcpu_reset(vcpu);
3123                 if (r)
3124                         return r;
3125                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3126         }
3127
3128         down_read(&vcpu->kvm->slots_lock);
3129         vapic_enter(vcpu);
3130
3131         r = 1;
3132         while (r > 0) {
3133                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
3134                         r = vcpu_enter_guest(vcpu, kvm_run);
3135                 else {
3136                         up_read(&vcpu->kvm->slots_lock);
3137                         kvm_vcpu_block(vcpu);
3138                         down_read(&vcpu->kvm->slots_lock);
3139                         if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
3140                                 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
3141                                         vcpu->arch.mp_state =
3142                                                         KVM_MP_STATE_RUNNABLE;
3143                         if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
3144                                 r = -EINTR;
3145                 }
3146
3147                 if (r > 0) {
3148                         if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3149                                 r = -EINTR;
3150                                 kvm_run->exit_reason = KVM_EXIT_INTR;
3151                                 ++vcpu->stat.request_irq_exits;
3152                         }
3153                         if (signal_pending(current)) {
3154                                 r = -EINTR;
3155                                 kvm_run->exit_reason = KVM_EXIT_INTR;
3156                                 ++vcpu->stat.signal_exits;
3157                         }
3158                         if (need_resched()) {
3159                                 up_read(&vcpu->kvm->slots_lock);
3160                                 kvm_resched(vcpu);
3161                                 down_read(&vcpu->kvm->slots_lock);
3162                         }
3163                 }
3164         }
3165
3166         up_read(&vcpu->kvm->slots_lock);
3167         post_kvm_run_save(vcpu, kvm_run);
3168
3169         vapic_exit(vcpu);
3170
3171         return r;
3172 }
3173
3174 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3175 {
3176         int r;
3177         sigset_t sigsaved;
3178
3179         vcpu_load(vcpu);
3180
3181         if (vcpu->sigset_active)
3182                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3183
3184         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
3185                 kvm_vcpu_block(vcpu);
3186                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
3187                 r = -EAGAIN;
3188                 goto out;
3189         }
3190
3191         /* re-sync apic's tpr */
3192         if (!irqchip_in_kernel(vcpu->kvm))
3193                 kvm_set_cr8(vcpu, kvm_run->cr8);
3194
3195         if (vcpu->arch.pio.cur_count) {
3196                 r = complete_pio(vcpu);
3197                 if (r)
3198                         goto out;
3199         }
3200 #if CONFIG_HAS_IOMEM
3201         if (vcpu->mmio_needed) {
3202                 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3203                 vcpu->mmio_read_completed = 1;
3204                 vcpu->mmio_needed = 0;
3205
3206                 down_read(&vcpu->kvm->slots_lock);
3207                 r = emulate_instruction(vcpu, kvm_run,
3208                                         vcpu->arch.mmio_fault_cr2, 0,
3209                                         EMULTYPE_NO_DECODE);
3210                 up_read(&vcpu->kvm->slots_lock);
3211                 if (r == EMULATE_DO_MMIO) {
3212                         /*
3213                          * Read-modify-write.  Back to userspace.
3214                          */
3215                         r = 0;
3216                         goto out;
3217                 }
3218         }
3219 #endif
3220         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3221                 kvm_register_write(vcpu, VCPU_REGS_RAX,
3222                                      kvm_run->hypercall.ret);
3223
3224         r = __vcpu_run(vcpu, kvm_run);
3225
3226 out:
3227         if (vcpu->sigset_active)
3228                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3229
3230         vcpu_put(vcpu);
3231         return r;
3232 }
3233
3234 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3235 {
3236         vcpu_load(vcpu);
3237
3238         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3239         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3240         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3241         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3242         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3243         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3244         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3245         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3246 #ifdef CONFIG_X86_64
3247         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3248         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3249         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3250         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3251         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3252         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3253         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3254         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
3255 #endif
3256
3257         regs->rip = kvm_rip_read(vcpu);
3258         regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3259
3260         /*
3261          * Don't leak debug flags in case they were set for guest debugging
3262          */
3263         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3264                 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3265
3266         vcpu_put(vcpu);
3267
3268         return 0;
3269 }
3270
3271 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3272 {
3273         vcpu_load(vcpu);
3274
3275         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3276         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3277         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3278         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3279         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3280         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3281         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3282         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
3283 #ifdef CONFIG_X86_64
3284         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3285         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3286         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3287         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3288         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3289         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3290         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3291         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3292
3293 #endif
3294
3295         kvm_rip_write(vcpu, regs->rip);
3296         kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3297
3298
3299         vcpu->arch.exception.pending = false;
3300
3301         vcpu_put(vcpu);
3302
3303         return 0;
3304 }
3305
3306 void kvm_get_segment(struct kvm_vcpu *vcpu,
3307                      struct kvm_segment *var, int seg)
3308 {
3309         kvm_x86_ops->get_segment(vcpu, var, seg);
3310 }
3311
3312 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3313 {
3314         struct kvm_segment cs;
3315
3316         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
3317         *db = cs.db;
3318         *l = cs.l;
3319 }
3320 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3321
3322 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3323                                   struct kvm_sregs *sregs)
3324 {
3325         struct descriptor_table dt;
3326         int pending_vec;
3327
3328         vcpu_load(vcpu);
3329
3330         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3331         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3332         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3333         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3334         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3335         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3336
3337         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3338         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3339
3340         kvm_x86_ops->get_idt(vcpu, &dt);
3341         sregs->idt.limit = dt.limit;
3342         sregs->idt.base = dt.base;
3343         kvm_x86_ops->get_gdt(vcpu, &dt);
3344         sregs->gdt.limit = dt.limit;
3345         sregs->gdt.base = dt.base;
3346
3347         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3348         sregs->cr0 = vcpu->arch.cr0;
3349         sregs->cr2 = vcpu->arch.cr2;
3350         sregs->cr3 = vcpu->arch.cr3;
3351         sregs->cr4 = vcpu->arch.cr4;
3352         sregs->cr8 = kvm_get_cr8(vcpu);
3353         sregs->efer = vcpu->arch.shadow_efer;
3354         sregs->apic_base = kvm_get_apic_base(vcpu);
3355
3356         if (irqchip_in_kernel(vcpu->kvm)) {
3357                 memset(sregs->interrupt_bitmap, 0,
3358                        sizeof sregs->interrupt_bitmap);
3359                 pending_vec = kvm_x86_ops->get_irq(vcpu);
3360                 if (pending_vec >= 0)
3361                         set_bit(pending_vec,
3362                                 (unsigned long *)sregs->interrupt_bitmap);
3363         } else
3364                 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
3365                        sizeof sregs->interrupt_bitmap);
3366
3367         vcpu_put(vcpu);
3368
3369         return 0;
3370 }
3371
3372 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3373                                     struct kvm_mp_state *mp_state)
3374 {
3375         vcpu_load(vcpu);
3376         mp_state->mp_state = vcpu->arch.mp_state;
3377         vcpu_put(vcpu);
3378         return 0;
3379 }
3380
3381 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3382                                     struct kvm_mp_state *mp_state)
3383 {
3384         vcpu_load(vcpu);
3385         vcpu->arch.mp_state = mp_state->mp_state;
3386         vcpu_put(vcpu);
3387         return 0;
3388 }
3389
3390 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3391                         struct kvm_segment *var, int seg)
3392 {
3393         kvm_x86_ops->set_segment(vcpu, var, seg);
3394 }
3395
3396 static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3397                                    struct kvm_segment *kvm_desct)
3398 {
3399         kvm_desct->base = seg_desc->base0;
3400         kvm_desct->base |= seg_desc->base1 << 16;
3401         kvm_desct->base |= seg_desc->base2 << 24;
3402         kvm_desct->limit = seg_desc->limit0;
3403         kvm_desct->limit |= seg_desc->limit << 16;
3404         if (seg_desc->g) {
3405                 kvm_desct->limit <<= 12;
3406                 kvm_desct->limit |= 0xfff;
3407         }
3408         kvm_desct->selector = selector;
3409         kvm_desct->type = seg_desc->type;
3410         kvm_desct->present = seg_desc->p;
3411         kvm_desct->dpl = seg_desc->dpl;
3412         kvm_desct->db = seg_desc->d;
3413         kvm_desct->s = seg_desc->s;
3414         kvm_desct->l = seg_desc->l;
3415         kvm_desct->g = seg_desc->g;
3416         kvm_desct->avl = seg_desc->avl;
3417         if (!selector)
3418                 kvm_desct->unusable = 1;
3419         else
3420                 kvm_desct->unusable = 0;
3421         kvm_desct->padding = 0;
3422 }
3423
3424 static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
3425                                           u16 selector,
3426                                           struct descriptor_table *dtable)
3427 {
3428         if (selector & 1 << 2) {
3429                 struct kvm_segment kvm_seg;
3430
3431                 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
3432
3433                 if (kvm_seg.unusable)
3434                         dtable->limit = 0;
3435                 else
3436                         dtable->limit = kvm_seg.limit;
3437                 dtable->base = kvm_seg.base;
3438         }
3439         else
3440                 kvm_x86_ops->get_gdt(vcpu, dtable);
3441 }
3442
3443 /* allowed just for 8 bytes segments */
3444 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3445                                          struct desc_struct *seg_desc)
3446 {
3447         gpa_t gpa;
3448         struct descriptor_table dtable;
3449         u16 index = selector >> 3;
3450
3451         get_segment_descriptor_dtable(vcpu, selector, &dtable);
3452
3453         if (dtable.limit < index * 8 + 7) {
3454                 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3455                 return 1;
3456         }
3457         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3458         gpa += index * 8;
3459         return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
3460 }
3461
3462 /* allowed just for 8 bytes segments */
3463 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3464                                          struct desc_struct *seg_desc)
3465 {
3466         gpa_t gpa;
3467         struct descriptor_table dtable;
3468         u16 index = selector >> 3;
3469
3470         get_segment_descriptor_dtable(vcpu, selector, &dtable);
3471
3472         if (dtable.limit < index * 8 + 7)
3473                 return 1;
3474         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3475         gpa += index * 8;
3476         return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
3477 }
3478
3479 static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3480                              struct desc_struct *seg_desc)
3481 {
3482         u32 base_addr;
3483
3484         base_addr = seg_desc->base0;
3485         base_addr |= (seg_desc->base1 << 16);
3486         base_addr |= (seg_desc->base2 << 24);
3487
3488         return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
3489 }
3490
3491 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3492 {
3493         struct kvm_segment kvm_seg;
3494
3495         kvm_get_segment(vcpu, &kvm_seg, seg);
3496         return kvm_seg.selector;
3497 }
3498
3499 static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3500                                                 u16 selector,
3501                                                 struct kvm_segment *kvm_seg)
3502 {
3503         struct desc_struct seg_desc;
3504
3505         if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3506                 return 1;
3507         seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3508         return 0;
3509 }
3510
3511 static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
3512 {
3513         struct kvm_segment segvar = {
3514                 .base = selector << 4,
3515                 .limit = 0xffff,
3516                 .selector = selector,
3517                 .type = 3,
3518                 .present = 1,
3519                 .dpl = 3,
3520                 .db = 0,
3521                 .s = 1,
3522                 .l = 0,
3523                 .g = 0,
3524                 .avl = 0,
3525                 .unusable = 0,
3526         };
3527         kvm_x86_ops->set_segment(vcpu, &segvar, seg);
3528         return 0;
3529 }
3530
3531 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3532                                 int type_bits, int seg)
3533 {
3534         struct kvm_segment kvm_seg;
3535
3536         if (!(vcpu->arch.cr0 & X86_CR0_PE))
3537                 return kvm_load_realmode_segment(vcpu, selector, seg);
3538         if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3539                 return 1;
3540         kvm_seg.type |= type_bits;
3541
3542         if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3543             seg != VCPU_SREG_LDTR)
3544                 if (!kvm_seg.s)
3545                         kvm_seg.unusable = 1;
3546
3547         kvm_set_segment(vcpu, &kvm_seg, seg);
3548         return 0;
3549 }
3550
3551 static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3552                                 struct tss_segment_32 *tss)
3553 {
3554         tss->cr3 = vcpu->arch.cr3;
3555         tss->eip = kvm_rip_read(vcpu);
3556         tss->eflags = kvm_x86_ops->get_rflags(vcpu);
3557         tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3558         tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3559         tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3560         tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3561         tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3562         tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3563         tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3564         tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3565         tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3566         tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3567         tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3568         tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3569         tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3570         tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3571         tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3572         tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3573 }
3574
3575 static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3576                                   struct tss_segment_32 *tss)
3577 {
3578         kvm_set_cr3(vcpu, tss->cr3);
3579
3580         kvm_rip_write(vcpu, tss->eip);
3581         kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3582
3583         kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
3584         kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
3585         kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
3586         kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
3587         kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
3588         kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
3589         kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
3590         kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
3591
3592         if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
3593                 return 1;
3594
3595         if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3596                 return 1;
3597
3598         if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3599                 return 1;
3600
3601         if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3602                 return 1;
3603
3604         if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3605                 return 1;
3606
3607         if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
3608                 return 1;
3609
3610         if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
3611                 return 1;
3612         return 0;
3613 }
3614
3615 static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3616                                 struct tss_segment_16 *tss)
3617 {
3618         tss->ip = kvm_rip_read(vcpu);
3619         tss->flag = kvm_x86_ops->get_rflags(vcpu);
3620         tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3621         tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3622         tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3623         tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3624         tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3625         tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3626         tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
3627         tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
3628
3629         tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3630         tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3631         tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3632         tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3633         tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3634         tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3635 }
3636
3637 static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3638                                  struct tss_segment_16 *tss)
3639 {
3640         kvm_rip_write(vcpu, tss->ip);
3641         kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
3642         kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
3643         kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
3644         kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
3645         kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
3646         kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
3647         kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
3648         kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
3649         kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
3650
3651         if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
3652                 return 1;
3653
3654         if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3655                 return 1;
3656
3657         if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3658                 return 1;
3659
3660         if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3661                 return 1;
3662
3663         if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3664                 return 1;
3665         return 0;
3666 }
3667
3668 static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
3669                        u32 old_tss_base,
3670                        struct desc_struct *nseg_desc)
3671 {
3672         struct tss_segment_16 tss_segment_16;
3673         int ret = 0;
3674
3675         if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3676                            sizeof tss_segment_16))
3677                 goto out;
3678
3679         save_state_to_tss16(vcpu, &tss_segment_16);
3680
3681         if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3682                             sizeof tss_segment_16))
3683                 goto out;
3684
3685         if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3686                            &tss_segment_16, sizeof tss_segment_16))
3687                 goto out;
3688
3689         if (load_state_from_tss16(vcpu, &tss_segment_16))
3690                 goto out;
3691
3692         ret = 1;
3693 out:
3694         return ret;
3695 }
3696
3697 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
3698                        u32 old_tss_base,
3699                        struct desc_struct *nseg_desc)
3700 {
3701         struct tss_segment_32 tss_segment_32;
3702         int ret = 0;
3703
3704         if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3705                            sizeof tss_segment_32))
3706                 goto out;
3707
3708         save_state_to_tss32(vcpu, &tss_segment_32);
3709
3710         if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3711                             sizeof tss_segment_32))
3712                 goto out;
3713
3714         if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3715                            &tss_segment_32, sizeof tss_segment_32))
3716                 goto out;
3717
3718         if (load_state_from_tss32(vcpu, &tss_segment_32))
3719                 goto out;
3720
3721         ret = 1;
3722 out:
3723         return ret;
3724 }
3725
3726 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3727 {
3728         struct kvm_segment tr_seg;
3729         struct desc_struct cseg_desc;
3730         struct desc_struct nseg_desc;
3731         int ret = 0;
3732         u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3733         u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
3734
3735         old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
3736
3737         /* FIXME: Handle errors. Failure to read either TSS or their
3738          * descriptors should generate a pagefault.
3739          */
3740         if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3741                 goto out;
3742
3743         if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
3744                 goto out;
3745
3746         if (reason != TASK_SWITCH_IRET) {
3747                 int cpl;
3748
3749                 cpl = kvm_x86_ops->get_cpl(vcpu);
3750                 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3751                         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3752                         return 1;
3753                 }
3754         }
3755
3756         if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
3757                 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
3758                 return 1;
3759         }
3760
3761         if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3762                 cseg_desc.type &= ~(1 << 1); //clear the B flag
3763                 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
3764         }
3765
3766         if (reason == TASK_SWITCH_IRET) {
3767                 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3768                 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
3769         }
3770
3771         kvm_x86_ops->skip_emulated_instruction(vcpu);
3772
3773         if (nseg_desc.type & 8)
3774                 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
3775                                          &nseg_desc);
3776         else
3777                 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
3778                                          &nseg_desc);
3779
3780         if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
3781                 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3782                 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
3783         }
3784
3785         if (reason != TASK_SWITCH_IRET) {
3786                 nseg_desc.type |= (1 << 1);
3787                 save_guest_segment_descriptor(vcpu, tss_selector,
3788                                               &nseg_desc);
3789         }
3790
3791         kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
3792         seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
3793         tr_seg.type = 11;
3794         kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3795 out:
3796         return ret;
3797 }
3798 EXPORT_SYMBOL_GPL(kvm_task_switch);
3799
3800 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3801                                   struct kvm_sregs *sregs)
3802 {
3803         int mmu_reset_needed = 0;
3804         int i, pending_vec, max_bits;
3805         struct descriptor_table dt;
3806
3807         vcpu_load(vcpu);
3808
3809         dt.limit = sregs->idt.limit;
3810         dt.base = sregs->idt.base;
3811         kvm_x86_ops->set_idt(vcpu, &dt);
3812         dt.limit = sregs->gdt.limit;
3813         dt.base = sregs->gdt.base;
3814         kvm_x86_ops->set_gdt(vcpu, &dt);
3815
3816         vcpu->arch.cr2 = sregs->cr2;
3817         mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
3818         vcpu->arch.cr3 = sregs->cr3;
3819
3820         kvm_set_cr8(vcpu, sregs->cr8);
3821
3822         mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
3823         kvm_x86_ops->set_efer(vcpu, sregs->efer);
3824         kvm_set_apic_base(vcpu, sregs->apic_base);
3825
3826         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3827
3828         mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
3829         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
3830         vcpu->arch.cr0 = sregs->cr0;
3831
3832         mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
3833         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
3834         if (!is_long_mode(vcpu) && is_pae(vcpu))
3835                 load_pdptrs(vcpu, vcpu->arch.cr3);
3836
3837         if (mmu_reset_needed)
3838                 kvm_mmu_reset_context(vcpu);
3839
3840         if (!irqchip_in_kernel(vcpu->kvm)) {
3841                 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
3842                        sizeof vcpu->arch.irq_pending);
3843                 vcpu->arch.irq_summary = 0;
3844                 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
3845                         if (vcpu->arch.irq_pending[i])
3846                                 __set_bit(i, &vcpu->arch.irq_summary);
3847         } else {
3848                 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
3849                 pending_vec = find_first_bit(
3850                         (const unsigned long *)sregs->interrupt_bitmap,
3851                         max_bits);
3852                 /* Only pending external irq is handled here */
3853                 if (pending_vec < max_bits) {
3854                         kvm_x86_ops->set_irq(vcpu, pending_vec);
3855                         pr_debug("Set back pending irq %d\n",
3856                                  pending_vec);
3857                 }
3858                 kvm_pic_clear_isr_ack(vcpu->kvm);
3859         }
3860
3861         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3862         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3863         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3864         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3865         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3866         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3867
3868         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3869         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3870
3871         /* Older userspace won't unhalt the vcpu on reset. */
3872         if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
3873             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
3874             !(vcpu->arch.cr0 & X86_CR0_PE))
3875                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3876
3877         vcpu_put(vcpu);
3878
3879         return 0;
3880 }
3881
3882 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3883                                         struct kvm_guest_debug *dbg)
3884 {
3885         int i, r;
3886
3887         vcpu_load(vcpu);
3888
3889         if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
3890             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
3891                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
3892                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
3893                 vcpu->arch.switch_db_regs =
3894                         (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
3895         } else {
3896                 for (i = 0; i < KVM_NR_DB_REGS; i++)
3897                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
3898                 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
3899         }
3900
3901         r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
3902
3903         if (dbg->control & KVM_GUESTDBG_INJECT_DB)
3904                 kvm_queue_exception(vcpu, DB_VECTOR);
3905         else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
3906                 kvm_queue_exception(vcpu, BP_VECTOR);
3907
3908         vcpu_put(vcpu);
3909
3910         return r;
3911 }
3912
3913 /*
3914  * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
3915  * we have asm/x86/processor.h
3916  */
3917 struct fxsave {
3918         u16     cwd;
3919         u16     swd;
3920         u16     twd;
3921         u16     fop;
3922         u64     rip;
3923         u64     rdp;
3924         u32     mxcsr;
3925         u32     mxcsr_mask;
3926         u32     st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
3927 #ifdef CONFIG_X86_64
3928         u32     xmm_space[64];  /* 16*16 bytes for each XMM-reg = 256 bytes */
3929 #else
3930         u32     xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
3931 #endif
3932 };
3933
3934 /*
3935  * Translate a guest virtual address to a guest physical address.
3936  */
3937 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3938                                     struct kvm_translation *tr)
3939 {
3940         unsigned long vaddr = tr->linear_address;
3941         gpa_t gpa;
3942
3943         vcpu_load(vcpu);
3944         down_read(&vcpu->kvm->slots_lock);
3945         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
3946         up_read(&vcpu->kvm->slots_lock);
3947         tr->physical_address = gpa;
3948         tr->valid = gpa != UNMAPPED_GVA;
3949         tr->writeable = 1;
3950         tr->usermode = 0;
3951         vcpu_put(vcpu);
3952
3953         return 0;
3954 }
3955
3956 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3957 {
3958         struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
3959
3960         vcpu_load(vcpu);
3961
3962         memcpy(fpu->fpr, fxsave->st_space, 128);
3963         fpu->fcw = fxsave->cwd;
3964         fpu->fsw = fxsave->swd;
3965         fpu->ftwx = fxsave->twd;
3966         fpu->last_opcode = fxsave->fop;
3967         fpu->last_ip = fxsave->rip;
3968         fpu->last_dp = fxsave->rdp;
3969         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
3970
3971         vcpu_put(vcpu);
3972
3973         return 0;
3974 }
3975
3976 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3977 {
3978         struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
3979
3980         vcpu_load(vcpu);
3981
3982         memcpy(fxsave->st_space, fpu->fpr, 128);
3983         fxsave->cwd = fpu->fcw;
3984         fxsave->swd = fpu->fsw;
3985         fxsave->twd = fpu->ftwx;
3986         fxsave->fop = fpu->last_opcode;
3987         fxsave->rip = fpu->last_ip;
3988         fxsave->rdp = fpu->last_dp;
3989         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
3990
3991         vcpu_put(vcpu);
3992
3993         return 0;
3994 }
3995
3996 void fx_init(struct kvm_vcpu *vcpu)
3997 {
3998         unsigned after_mxcsr_mask;
3999
4000         /*
4001          * Touch the fpu the first time in non atomic context as if
4002          * this is the first fpu instruction the exception handler
4003          * will fire before the instruction returns and it'll have to
4004          * allocate ram with GFP_KERNEL.
4005          */
4006         if (!used_math())
4007                 kvm_fx_save(&vcpu->arch.host_fx_image);
4008
4009         /* Initialize guest FPU by resetting ours and saving into guest's */
4010         preempt_disable();
4011         kvm_fx_save(&vcpu->arch.host_fx_image);
4012         kvm_fx_finit();
4013         kvm_fx_save(&vcpu->arch.guest_fx_image);
4014         kvm_fx_restore(&vcpu->arch.host_fx_image);
4015         preempt_enable();
4016
4017         vcpu->arch.cr0 |= X86_CR0_ET;
4018         after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
4019         vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4020         memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
4021                0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4022 }
4023 EXPORT_SYMBOL_GPL(fx_init);
4024
4025 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4026 {
4027         if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4028                 return;
4029
4030         vcpu->guest_fpu_loaded = 1;
4031         kvm_fx_save(&vcpu->arch.host_fx_image);
4032         kvm_fx_restore(&vcpu->arch.guest_fx_image);
4033 }
4034 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4035
4036 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4037 {
4038         if (!vcpu->guest_fpu_loaded)
4039                 return;
4040
4041         vcpu->guest_fpu_loaded = 0;
4042         kvm_fx_save(&vcpu->arch.guest_fx_image);
4043         kvm_fx_restore(&vcpu->arch.host_fx_image);
4044         ++vcpu->stat.fpu_reload;
4045 }
4046 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
4047
4048 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4049 {
4050         kvm_x86_ops->vcpu_free(vcpu);
4051 }
4052
4053 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
4054                                                 unsigned int id)
4055 {
4056         return kvm_x86_ops->vcpu_create(kvm, id);
4057 }
4058
4059 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
4060 {
4061         int r;
4062
4063         /* We do fxsave: this must be aligned. */
4064         BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
4065
4066         vcpu->arch.mtrr_state.have_fixed = 1;
4067         vcpu_load(vcpu);
4068         r = kvm_arch_vcpu_reset(vcpu);
4069         if (r == 0)
4070                 r = kvm_mmu_setup(vcpu);
4071         vcpu_put(vcpu);
4072         if (r < 0)
4073                 goto free_vcpu;
4074
4075         return 0;
4076 free_vcpu:
4077         kvm_x86_ops->vcpu_free(vcpu);
4078         return r;
4079 }
4080
4081 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
4082 {
4083         vcpu_load(vcpu);
4084         kvm_mmu_unload(vcpu);
4085         vcpu_put(vcpu);
4086
4087         kvm_x86_ops->vcpu_free(vcpu);
4088 }
4089
4090 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4091 {
4092         vcpu->arch.nmi_pending = false;
4093         vcpu->arch.nmi_injected = false;
4094
4095         vcpu->arch.switch_db_regs = 0;
4096         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4097         vcpu->arch.dr6 = DR6_FIXED_1;
4098         vcpu->arch.dr7 = DR7_FIXED_1;
4099
4100         return kvm_x86_ops->vcpu_reset(vcpu);
4101 }
4102
4103 void kvm_arch_hardware_enable(void *garbage)
4104 {
4105         kvm_x86_ops->hardware_enable(garbage);
4106 }
4107
4108 void kvm_arch_hardware_disable(void *garbage)
4109 {
4110         kvm_x86_ops->hardware_disable(garbage);
4111 }
4112
4113 int kvm_arch_hardware_setup(void)
4114 {
4115         return kvm_x86_ops->hardware_setup();
4116 }
4117
4118 void kvm_arch_hardware_unsetup(void)
4119 {
4120         kvm_x86_ops->hardware_unsetup();
4121 }
4122
4123 void kvm_arch_check_processor_compat(void *rtn)
4124 {
4125         kvm_x86_ops->check_processor_compatibility(rtn);
4126 }
4127
4128 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4129 {
4130         struct page *page;
4131         struct kvm *kvm;
4132         int r;
4133
4134         BUG_ON(vcpu->kvm == NULL);
4135         kvm = vcpu->kvm;
4136
4137         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
4138         if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
4139                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4140         else
4141                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
4142
4143         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4144         if (!page) {
4145                 r = -ENOMEM;
4146                 goto fail;
4147         }
4148         vcpu->arch.pio_data = page_address(page);
4149
4150         r = kvm_mmu_create(vcpu);
4151         if (r < 0)
4152                 goto fail_free_pio_data;
4153
4154         if (irqchip_in_kernel(kvm)) {
4155                 r = kvm_create_lapic(vcpu);
4156                 if (r < 0)
4157                         goto fail_mmu_destroy;
4158         }
4159
4160         return 0;
4161
4162 fail_mmu_destroy:
4163         kvm_mmu_destroy(vcpu);
4164 fail_free_pio_data:
4165         free_page((unsigned long)vcpu->arch.pio_data);
4166 fail:
4167         return r;
4168 }
4169
4170 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
4171 {
4172         kvm_free_lapic(vcpu);
4173         down_read(&vcpu->kvm->slots_lock);
4174         kvm_mmu_destroy(vcpu);
4175         up_read(&vcpu->kvm->slots_lock);
4176         free_page((unsigned long)vcpu->arch.pio_data);
4177 }
4178
4179 struct  kvm *kvm_arch_create_vm(void)
4180 {
4181         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4182
4183         if (!kvm)
4184                 return ERR_PTR(-ENOMEM);
4185
4186         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4187         INIT_LIST_HEAD(&kvm->arch.oos_global_pages);
4188         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
4189
4190         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4191         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4192
4193         rdtscll(kvm->arch.vm_init_tsc);
4194
4195         return kvm;
4196 }
4197
4198 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4199 {
4200         vcpu_load(vcpu);
4201         kvm_mmu_unload(vcpu);
4202         vcpu_put(vcpu);
4203 }
4204
4205 static void kvm_free_vcpus(struct kvm *kvm)
4206 {
4207         unsigned int i;
4208
4209         /*
4210          * Unpin any mmu pages first.
4211          */
4212         for (i = 0; i < KVM_MAX_VCPUS; ++i)
4213                 if (kvm->vcpus[i])
4214                         kvm_unload_vcpu_mmu(kvm->vcpus[i]);
4215         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
4216                 if (kvm->vcpus[i]) {
4217                         kvm_arch_vcpu_free(kvm->vcpus[i]);
4218                         kvm->vcpus[i] = NULL;
4219                 }
4220         }
4221
4222 }
4223
4224 void kvm_arch_sync_events(struct kvm *kvm)
4225 {
4226         kvm_free_all_assigned_devices(kvm);
4227 }
4228
4229 void kvm_arch_destroy_vm(struct kvm *kvm)
4230 {
4231         kvm_iommu_unmap_guest(kvm);
4232         kvm_free_pit(kvm);
4233         kfree(kvm->arch.vpic);
4234         kfree(kvm->arch.vioapic);
4235         kvm_free_vcpus(kvm);
4236         kvm_free_physmem(kvm);
4237         if (kvm->arch.apic_access_page)
4238                 put_page(kvm->arch.apic_access_page);
4239         if (kvm->arch.ept_identity_pagetable)
4240                 put_page(kvm->arch.ept_identity_pagetable);
4241         kfree(kvm);
4242 }
4243
4244 int kvm_arch_set_memory_region(struct kvm *kvm,
4245                                 struct kvm_userspace_memory_region *mem,
4246                                 struct kvm_memory_slot old,
4247                                 int user_alloc)
4248 {
4249         int npages = mem->memory_size >> PAGE_SHIFT;
4250         struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4251
4252         /*To keep backward compatibility with older userspace,
4253          *x86 needs to hanlde !user_alloc case.
4254          */
4255         if (!user_alloc) {
4256                 if (npages && !old.rmap) {
4257                         unsigned long userspace_addr;
4258
4259                         down_write(&current->mm->mmap_sem);
4260                         userspace_addr = do_mmap(NULL, 0,
4261                                                  npages * PAGE_SIZE,
4262                                                  PROT_READ | PROT_WRITE,
4263                                                  MAP_PRIVATE | MAP_ANONYMOUS,
4264                                                  0);
4265                         up_write(&current->mm->mmap_sem);
4266
4267                         if (IS_ERR((void *)userspace_addr))
4268                                 return PTR_ERR((void *)userspace_addr);
4269
4270                         /* set userspace_addr atomically for kvm_hva_to_rmapp */
4271                         spin_lock(&kvm->mmu_lock);
4272                         memslot->userspace_addr = userspace_addr;
4273                         spin_unlock(&kvm->mmu_lock);
4274                 } else {
4275                         if (!old.user_alloc && old.rmap) {
4276                                 int ret;
4277
4278                                 down_write(&current->mm->mmap_sem);
4279                                 ret = do_munmap(current->mm, old.userspace_addr,
4280                                                 old.npages * PAGE_SIZE);
4281                                 up_write(&current->mm->mmap_sem);
4282                                 if (ret < 0)
4283                                         printk(KERN_WARNING
4284                                        "kvm_vm_ioctl_set_memory_region: "
4285                                        "failed to munmap memory\n");
4286                         }
4287                 }
4288         }
4289
4290         if (!kvm->arch.n_requested_mmu_pages) {
4291                 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4292                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4293         }
4294
4295         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4296         kvm_flush_remote_tlbs(kvm);
4297
4298         return 0;
4299 }
4300
4301 void kvm_arch_flush_shadow(struct kvm *kvm)
4302 {
4303         kvm_mmu_zap_all(kvm);
4304 }
4305
4306 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4307 {
4308         return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
4309                || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4310                || vcpu->arch.nmi_pending;
4311 }
4312
4313 static void vcpu_kick_intr(void *info)
4314 {
4315 #ifdef DEBUG
4316         struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
4317         printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
4318 #endif
4319 }
4320
4321 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4322 {
4323         int ipi_pcpu = vcpu->cpu;
4324         int cpu = get_cpu();
4325
4326         if (waitqueue_active(&vcpu->wq)) {
4327                 wake_up_interruptible(&vcpu->wq);
4328                 ++vcpu->stat.halt_wakeup;
4329         }
4330         /*
4331          * We may be called synchronously with irqs disabled in guest mode,
4332          * So need not to call smp_call_function_single() in that case.
4333          */
4334         if (vcpu->guest_mode && vcpu->cpu != cpu)
4335                 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
4336         put_cpu();
4337 }