KVM: x86 emulator: Make a distinction between repeat prefixes F3 and F2
[safe/jmp/linux-2.6] / drivers / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  *
8  * Authors:
9  *   Avi Kivity   <avi@qumranet.com>
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  *
12  * This work is licensed under the terms of the GNU GPL, version 2.  See
13  * the COPYING file in the top-level directory.
14  *
15  */
16
17 #include "kvm.h"
18 #include "x86.h"
19 #include "x86_emulate.h"
20 #include "segment_descriptor.h"
21 #include "irq.h"
22
23 #include <linux/kvm.h>
24 #include <linux/fs.h>
25 #include <linux/vmalloc.h>
26 #include <linux/module.h>
27 #include <linux/mman.h>
28
29 #include <asm/uaccess.h>
30 #include <asm/msr.h>
31
32 #define MAX_IO_MSRS 256
33 #define CR0_RESERVED_BITS                                               \
34         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
35                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
36                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
37 #define CR4_RESERVED_BITS                                               \
38         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
39                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
40                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
41                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
42
43 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
44 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
45
46 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
47 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
48
49 struct kvm_x86_ops *kvm_x86_ops;
50
51 struct kvm_stats_debugfs_item debugfs_entries[] = {
52         { "pf_fixed", VCPU_STAT(pf_fixed) },
53         { "pf_guest", VCPU_STAT(pf_guest) },
54         { "tlb_flush", VCPU_STAT(tlb_flush) },
55         { "invlpg", VCPU_STAT(invlpg) },
56         { "exits", VCPU_STAT(exits) },
57         { "io_exits", VCPU_STAT(io_exits) },
58         { "mmio_exits", VCPU_STAT(mmio_exits) },
59         { "signal_exits", VCPU_STAT(signal_exits) },
60         { "irq_window", VCPU_STAT(irq_window_exits) },
61         { "halt_exits", VCPU_STAT(halt_exits) },
62         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63         { "request_irq", VCPU_STAT(request_irq_exits) },
64         { "irq_exits", VCPU_STAT(irq_exits) },
65         { "host_state_reload", VCPU_STAT(host_state_reload) },
66         { "efer_reload", VCPU_STAT(efer_reload) },
67         { "fpu_reload", VCPU_STAT(fpu_reload) },
68         { "insn_emulation", VCPU_STAT(insn_emulation) },
69         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
70         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
71         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
72         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
73         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
74         { "mmu_flooded", VM_STAT(mmu_flooded) },
75         { "mmu_recycled", VM_STAT(mmu_recycled) },
76         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
77         { NULL }
78 };
79
80
81 unsigned long segment_base(u16 selector)
82 {
83         struct descriptor_table gdt;
84         struct segment_descriptor *d;
85         unsigned long table_base;
86         unsigned long v;
87
88         if (selector == 0)
89                 return 0;
90
91         asm("sgdt %0" : "=m"(gdt));
92         table_base = gdt.base;
93
94         if (selector & 4) {           /* from ldt */
95                 u16 ldt_selector;
96
97                 asm("sldt %0" : "=g"(ldt_selector));
98                 table_base = segment_base(ldt_selector);
99         }
100         d = (struct segment_descriptor *)(table_base + (selector & ~7));
101         v = d->base_low | ((unsigned long)d->base_mid << 16) |
102                 ((unsigned long)d->base_high << 24);
103 #ifdef CONFIG_X86_64
104         if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
105                 v |= ((unsigned long) \
106                       ((struct segment_descriptor_64 *)d)->base_higher) << 32;
107 #endif
108         return v;
109 }
110 EXPORT_SYMBOL_GPL(segment_base);
111
112 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
113 {
114         if (irqchip_in_kernel(vcpu->kvm))
115                 return vcpu->apic_base;
116         else
117                 return vcpu->apic_base;
118 }
119 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
120
121 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
122 {
123         /* TODO: reserve bits check */
124         if (irqchip_in_kernel(vcpu->kvm))
125                 kvm_lapic_set_base(vcpu, data);
126         else
127                 vcpu->apic_base = data;
128 }
129 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
130
131 static void inject_gp(struct kvm_vcpu *vcpu)
132 {
133         kvm_x86_ops->inject_gp(vcpu, 0);
134 }
135
136 /*
137  * Load the pae pdptrs.  Return true is they are all valid.
138  */
139 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
140 {
141         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
142         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
143         int i;
144         int ret;
145         u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
146
147         mutex_lock(&vcpu->kvm->lock);
148         ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
149                                   offset * sizeof(u64), sizeof(pdpte));
150         if (ret < 0) {
151                 ret = 0;
152                 goto out;
153         }
154         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
155                 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
156                         ret = 0;
157                         goto out;
158                 }
159         }
160         ret = 1;
161
162         memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
163 out:
164         mutex_unlock(&vcpu->kvm->lock);
165
166         return ret;
167 }
168
169 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
170 {
171         u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
172         bool changed = true;
173         int r;
174
175         if (is_long_mode(vcpu) || !is_pae(vcpu))
176                 return false;
177
178         mutex_lock(&vcpu->kvm->lock);
179         r = kvm_read_guest(vcpu->kvm, vcpu->cr3 & ~31u, pdpte, sizeof(pdpte));
180         if (r < 0)
181                 goto out;
182         changed = memcmp(pdpte, vcpu->pdptrs, sizeof(pdpte)) != 0;
183 out:
184         mutex_unlock(&vcpu->kvm->lock);
185
186         return changed;
187 }
188
189 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
190 {
191         if (cr0 & CR0_RESERVED_BITS) {
192                 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
193                        cr0, vcpu->cr0);
194                 inject_gp(vcpu);
195                 return;
196         }
197
198         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
199                 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
200                 inject_gp(vcpu);
201                 return;
202         }
203
204         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
205                 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
206                        "and a clear PE flag\n");
207                 inject_gp(vcpu);
208                 return;
209         }
210
211         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
212 #ifdef CONFIG_X86_64
213                 if ((vcpu->shadow_efer & EFER_LME)) {
214                         int cs_db, cs_l;
215
216                         if (!is_pae(vcpu)) {
217                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
218                                        "in long mode while PAE is disabled\n");
219                                 inject_gp(vcpu);
220                                 return;
221                         }
222                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
223                         if (cs_l) {
224                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
225                                        "in long mode while CS.L == 1\n");
226                                 inject_gp(vcpu);
227                                 return;
228
229                         }
230                 } else
231 #endif
232                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
233                         printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
234                                "reserved bits\n");
235                         inject_gp(vcpu);
236                         return;
237                 }
238
239         }
240
241         kvm_x86_ops->set_cr0(vcpu, cr0);
242         vcpu->cr0 = cr0;
243
244         mutex_lock(&vcpu->kvm->lock);
245         kvm_mmu_reset_context(vcpu);
246         mutex_unlock(&vcpu->kvm->lock);
247         return;
248 }
249 EXPORT_SYMBOL_GPL(set_cr0);
250
251 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
252 {
253         set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
254 }
255 EXPORT_SYMBOL_GPL(lmsw);
256
257 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
258 {
259         if (cr4 & CR4_RESERVED_BITS) {
260                 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
261                 inject_gp(vcpu);
262                 return;
263         }
264
265         if (is_long_mode(vcpu)) {
266                 if (!(cr4 & X86_CR4_PAE)) {
267                         printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
268                                "in long mode\n");
269                         inject_gp(vcpu);
270                         return;
271                 }
272         } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
273                    && !load_pdptrs(vcpu, vcpu->cr3)) {
274                 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
275                 inject_gp(vcpu);
276                 return;
277         }
278
279         if (cr4 & X86_CR4_VMXE) {
280                 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
281                 inject_gp(vcpu);
282                 return;
283         }
284         kvm_x86_ops->set_cr4(vcpu, cr4);
285         vcpu->cr4 = cr4;
286         mutex_lock(&vcpu->kvm->lock);
287         kvm_mmu_reset_context(vcpu);
288         mutex_unlock(&vcpu->kvm->lock);
289 }
290 EXPORT_SYMBOL_GPL(set_cr4);
291
292 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
293 {
294         if (cr3 == vcpu->cr3 && !pdptrs_changed(vcpu)) {
295                 kvm_mmu_flush_tlb(vcpu);
296                 return;
297         }
298
299         if (is_long_mode(vcpu)) {
300                 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
301                         printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
302                         inject_gp(vcpu);
303                         return;
304                 }
305         } else {
306                 if (is_pae(vcpu)) {
307                         if (cr3 & CR3_PAE_RESERVED_BITS) {
308                                 printk(KERN_DEBUG
309                                        "set_cr3: #GP, reserved bits\n");
310                                 inject_gp(vcpu);
311                                 return;
312                         }
313                         if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
314                                 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
315                                        "reserved bits\n");
316                                 inject_gp(vcpu);
317                                 return;
318                         }
319                 }
320                 /*
321                  * We don't check reserved bits in nonpae mode, because
322                  * this isn't enforced, and VMware depends on this.
323                  */
324         }
325
326         mutex_lock(&vcpu->kvm->lock);
327         /*
328          * Does the new cr3 value map to physical memory? (Note, we
329          * catch an invalid cr3 even in real-mode, because it would
330          * cause trouble later on when we turn on paging anyway.)
331          *
332          * A real CPU would silently accept an invalid cr3 and would
333          * attempt to use it - with largely undefined (and often hard
334          * to debug) behavior on the guest side.
335          */
336         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
337                 inject_gp(vcpu);
338         else {
339                 vcpu->cr3 = cr3;
340                 vcpu->mmu.new_cr3(vcpu);
341         }
342         mutex_unlock(&vcpu->kvm->lock);
343 }
344 EXPORT_SYMBOL_GPL(set_cr3);
345
346 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
347 {
348         if (cr8 & CR8_RESERVED_BITS) {
349                 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
350                 inject_gp(vcpu);
351                 return;
352         }
353         if (irqchip_in_kernel(vcpu->kvm))
354                 kvm_lapic_set_tpr(vcpu, cr8);
355         else
356                 vcpu->cr8 = cr8;
357 }
358 EXPORT_SYMBOL_GPL(set_cr8);
359
360 unsigned long get_cr8(struct kvm_vcpu *vcpu)
361 {
362         if (irqchip_in_kernel(vcpu->kvm))
363                 return kvm_lapic_get_cr8(vcpu);
364         else
365                 return vcpu->cr8;
366 }
367 EXPORT_SYMBOL_GPL(get_cr8);
368
369 /*
370  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
371  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
372  *
373  * This list is modified at module load time to reflect the
374  * capabilities of the host cpu.
375  */
376 static u32 msrs_to_save[] = {
377         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
378         MSR_K6_STAR,
379 #ifdef CONFIG_X86_64
380         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
381 #endif
382         MSR_IA32_TIME_STAMP_COUNTER,
383 };
384
385 static unsigned num_msrs_to_save;
386
387 static u32 emulated_msrs[] = {
388         MSR_IA32_MISC_ENABLE,
389 };
390
391 #ifdef CONFIG_X86_64
392
393 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
394 {
395         if (efer & EFER_RESERVED_BITS) {
396                 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
397                        efer);
398                 inject_gp(vcpu);
399                 return;
400         }
401
402         if (is_paging(vcpu)
403             && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
404                 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
405                 inject_gp(vcpu);
406                 return;
407         }
408
409         kvm_x86_ops->set_efer(vcpu, efer);
410
411         efer &= ~EFER_LMA;
412         efer |= vcpu->shadow_efer & EFER_LMA;
413
414         vcpu->shadow_efer = efer;
415 }
416
417 #endif
418
419 /*
420  * Writes msr value into into the appropriate "register".
421  * Returns 0 on success, non-0 otherwise.
422  * Assumes vcpu_load() was already called.
423  */
424 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
425 {
426         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
427 }
428
429 /*
430  * Adapt set_msr() to msr_io()'s calling convention
431  */
432 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
433 {
434         return kvm_set_msr(vcpu, index, *data);
435 }
436
437
438 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
439 {
440         switch (msr) {
441 #ifdef CONFIG_X86_64
442         case MSR_EFER:
443                 set_efer(vcpu, data);
444                 break;
445 #endif
446         case MSR_IA32_MC0_STATUS:
447                 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
448                        __FUNCTION__, data);
449                 break;
450         case MSR_IA32_MCG_STATUS:
451                 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
452                         __FUNCTION__, data);
453                 break;
454         case MSR_IA32_UCODE_REV:
455         case MSR_IA32_UCODE_WRITE:
456         case 0x200 ... 0x2ff: /* MTRRs */
457                 break;
458         case MSR_IA32_APICBASE:
459                 kvm_set_apic_base(vcpu, data);
460                 break;
461         case MSR_IA32_MISC_ENABLE:
462                 vcpu->ia32_misc_enable_msr = data;
463                 break;
464         default:
465                 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
466                 return 1;
467         }
468         return 0;
469 }
470 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
471
472
473 /*
474  * Reads an msr value (of 'msr_index') into 'pdata'.
475  * Returns 0 on success, non-0 otherwise.
476  * Assumes vcpu_load() was already called.
477  */
478 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
479 {
480         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
481 }
482
483 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
484 {
485         u64 data;
486
487         switch (msr) {
488         case 0xc0010010: /* SYSCFG */
489         case 0xc0010015: /* HWCR */
490         case MSR_IA32_PLATFORM_ID:
491         case MSR_IA32_P5_MC_ADDR:
492         case MSR_IA32_P5_MC_TYPE:
493         case MSR_IA32_MC0_CTL:
494         case MSR_IA32_MCG_STATUS:
495         case MSR_IA32_MCG_CAP:
496         case MSR_IA32_MC0_MISC:
497         case MSR_IA32_MC0_MISC+4:
498         case MSR_IA32_MC0_MISC+8:
499         case MSR_IA32_MC0_MISC+12:
500         case MSR_IA32_MC0_MISC+16:
501         case MSR_IA32_UCODE_REV:
502         case MSR_IA32_PERF_STATUS:
503         case MSR_IA32_EBL_CR_POWERON:
504                 /* MTRR registers */
505         case 0xfe:
506         case 0x200 ... 0x2ff:
507                 data = 0;
508                 break;
509         case 0xcd: /* fsb frequency */
510                 data = 3;
511                 break;
512         case MSR_IA32_APICBASE:
513                 data = kvm_get_apic_base(vcpu);
514                 break;
515         case MSR_IA32_MISC_ENABLE:
516                 data = vcpu->ia32_misc_enable_msr;
517                 break;
518 #ifdef CONFIG_X86_64
519         case MSR_EFER:
520                 data = vcpu->shadow_efer;
521                 break;
522 #endif
523         default:
524                 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
525                 return 1;
526         }
527         *pdata = data;
528         return 0;
529 }
530 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
531
532 /*
533  * Read or write a bunch of msrs. All parameters are kernel addresses.
534  *
535  * @return number of msrs set successfully.
536  */
537 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
538                     struct kvm_msr_entry *entries,
539                     int (*do_msr)(struct kvm_vcpu *vcpu,
540                                   unsigned index, u64 *data))
541 {
542         int i;
543
544         vcpu_load(vcpu);
545
546         for (i = 0; i < msrs->nmsrs; ++i)
547                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
548                         break;
549
550         vcpu_put(vcpu);
551
552         return i;
553 }
554
555 /*
556  * Read or write a bunch of msrs. Parameters are user addresses.
557  *
558  * @return number of msrs set successfully.
559  */
560 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
561                   int (*do_msr)(struct kvm_vcpu *vcpu,
562                                 unsigned index, u64 *data),
563                   int writeback)
564 {
565         struct kvm_msrs msrs;
566         struct kvm_msr_entry *entries;
567         int r, n;
568         unsigned size;
569
570         r = -EFAULT;
571         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
572                 goto out;
573
574         r = -E2BIG;
575         if (msrs.nmsrs >= MAX_IO_MSRS)
576                 goto out;
577
578         r = -ENOMEM;
579         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
580         entries = vmalloc(size);
581         if (!entries)
582                 goto out;
583
584         r = -EFAULT;
585         if (copy_from_user(entries, user_msrs->entries, size))
586                 goto out_free;
587
588         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
589         if (r < 0)
590                 goto out_free;
591
592         r = -EFAULT;
593         if (writeback && copy_to_user(user_msrs->entries, entries, size))
594                 goto out_free;
595
596         r = n;
597
598 out_free:
599         vfree(entries);
600 out:
601         return r;
602 }
603
604 /*
605  * Make sure that a cpu that is being hot-unplugged does not have any vcpus
606  * cached on it.
607  */
608 void decache_vcpus_on_cpu(int cpu)
609 {
610         struct kvm *vm;
611         struct kvm_vcpu *vcpu;
612         int i;
613
614         spin_lock(&kvm_lock);
615         list_for_each_entry(vm, &vm_list, vm_list)
616                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
617                         vcpu = vm->vcpus[i];
618                         if (!vcpu)
619                                 continue;
620                         /*
621                          * If the vcpu is locked, then it is running on some
622                          * other cpu and therefore it is not cached on the
623                          * cpu in question.
624                          *
625                          * If it's not locked, check the last cpu it executed
626                          * on.
627                          */
628                         if (mutex_trylock(&vcpu->mutex)) {
629                                 if (vcpu->cpu == cpu) {
630                                         kvm_x86_ops->vcpu_decache(vcpu);
631                                         vcpu->cpu = -1;
632                                 }
633                                 mutex_unlock(&vcpu->mutex);
634                         }
635                 }
636         spin_unlock(&kvm_lock);
637 }
638
639 int kvm_dev_ioctl_check_extension(long ext)
640 {
641         int r;
642
643         switch (ext) {
644         case KVM_CAP_IRQCHIP:
645         case KVM_CAP_HLT:
646         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
647         case KVM_CAP_USER_MEMORY:
648         case KVM_CAP_SET_TSS_ADDR:
649         case KVM_CAP_EXT_CPUID:
650                 r = 1;
651                 break;
652         default:
653                 r = 0;
654                 break;
655         }
656         return r;
657
658 }
659
660 long kvm_arch_dev_ioctl(struct file *filp,
661                         unsigned int ioctl, unsigned long arg)
662 {
663         void __user *argp = (void __user *)arg;
664         long r;
665
666         switch (ioctl) {
667         case KVM_GET_MSR_INDEX_LIST: {
668                 struct kvm_msr_list __user *user_msr_list = argp;
669                 struct kvm_msr_list msr_list;
670                 unsigned n;
671
672                 r = -EFAULT;
673                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
674                         goto out;
675                 n = msr_list.nmsrs;
676                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
677                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
678                         goto out;
679                 r = -E2BIG;
680                 if (n < num_msrs_to_save)
681                         goto out;
682                 r = -EFAULT;
683                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
684                                  num_msrs_to_save * sizeof(u32)))
685                         goto out;
686                 if (copy_to_user(user_msr_list->indices
687                                  + num_msrs_to_save * sizeof(u32),
688                                  &emulated_msrs,
689                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
690                         goto out;
691                 r = 0;
692                 break;
693         }
694         default:
695                 r = -EINVAL;
696         }
697 out:
698         return r;
699 }
700
701 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
702 {
703         kvm_x86_ops->vcpu_load(vcpu, cpu);
704 }
705
706 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
707 {
708         kvm_x86_ops->vcpu_put(vcpu);
709         kvm_put_guest_fpu(vcpu);
710 }
711
712 static int is_efer_nx(void)
713 {
714         u64 efer;
715
716         rdmsrl(MSR_EFER, efer);
717         return efer & EFER_NX;
718 }
719
720 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
721 {
722         int i;
723         struct kvm_cpuid_entry2 *e, *entry;
724
725         entry = NULL;
726         for (i = 0; i < vcpu->cpuid_nent; ++i) {
727                 e = &vcpu->cpuid_entries[i];
728                 if (e->function == 0x80000001) {
729                         entry = e;
730                         break;
731                 }
732         }
733         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
734                 entry->edx &= ~(1 << 20);
735                 printk(KERN_INFO "kvm: guest NX capability removed\n");
736         }
737 }
738
739 /* when an old userspace process fills a new kernel module */
740 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
741                                     struct kvm_cpuid *cpuid,
742                                     struct kvm_cpuid_entry __user *entries)
743 {
744         int r, i;
745         struct kvm_cpuid_entry *cpuid_entries;
746
747         r = -E2BIG;
748         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
749                 goto out;
750         r = -ENOMEM;
751         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
752         if (!cpuid_entries)
753                 goto out;
754         r = -EFAULT;
755         if (copy_from_user(cpuid_entries, entries,
756                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
757                 goto out_free;
758         for (i = 0; i < cpuid->nent; i++) {
759                 vcpu->cpuid_entries[i].function = cpuid_entries[i].function;
760                 vcpu->cpuid_entries[i].eax = cpuid_entries[i].eax;
761                 vcpu->cpuid_entries[i].ebx = cpuid_entries[i].ebx;
762                 vcpu->cpuid_entries[i].ecx = cpuid_entries[i].ecx;
763                 vcpu->cpuid_entries[i].edx = cpuid_entries[i].edx;
764                 vcpu->cpuid_entries[i].index = 0;
765                 vcpu->cpuid_entries[i].flags = 0;
766                 vcpu->cpuid_entries[i].padding[0] = 0;
767                 vcpu->cpuid_entries[i].padding[1] = 0;
768                 vcpu->cpuid_entries[i].padding[2] = 0;
769         }
770         vcpu->cpuid_nent = cpuid->nent;
771         cpuid_fix_nx_cap(vcpu);
772         r = 0;
773
774 out_free:
775         vfree(cpuid_entries);
776 out:
777         return r;
778 }
779
780 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
781                                     struct kvm_cpuid2 *cpuid,
782                                     struct kvm_cpuid_entry2 __user *entries)
783 {
784         int r;
785
786         r = -E2BIG;
787         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
788                 goto out;
789         r = -EFAULT;
790         if (copy_from_user(&vcpu->cpuid_entries, entries,
791                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
792                 goto out;
793         vcpu->cpuid_nent = cpuid->nent;
794         return 0;
795
796 out:
797         return r;
798 }
799
800 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
801                                     struct kvm_cpuid2 *cpuid,
802                                     struct kvm_cpuid_entry2 __user *entries)
803 {
804         int r;
805
806         r = -E2BIG;
807         if (cpuid->nent < vcpu->cpuid_nent)
808                 goto out;
809         r = -EFAULT;
810         if (copy_to_user(entries, &vcpu->cpuid_entries,
811                            vcpu->cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
812                 goto out;
813         return 0;
814
815 out:
816         cpuid->nent = vcpu->cpuid_nent;
817         return r;
818 }
819
820 static inline u32 bit(int bitno)
821 {
822         return 1 << (bitno & 31);
823 }
824
825 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
826                           u32 index)
827 {
828         entry->function = function;
829         entry->index = index;
830         cpuid_count(entry->function, entry->index,
831                 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
832         entry->flags = 0;
833 }
834
835 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
836                          u32 index, int *nent, int maxnent)
837 {
838         const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
839                 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
840                 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
841                 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
842                 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
843                 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
844                 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
845                 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
846                 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
847                 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
848         const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
849                 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
850                 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
851                 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
852                 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
853                 bit(X86_FEATURE_PGE) |
854                 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
855                 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
856                 bit(X86_FEATURE_SYSCALL) |
857                 (bit(X86_FEATURE_NX) && is_efer_nx()) |
858 #ifdef CONFIG_X86_64
859                 bit(X86_FEATURE_LM) |
860 #endif
861                 bit(X86_FEATURE_MMXEXT) |
862                 bit(X86_FEATURE_3DNOWEXT) |
863                 bit(X86_FEATURE_3DNOW);
864         const u32 kvm_supported_word3_x86_features =
865                 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
866         const u32 kvm_supported_word6_x86_features =
867                 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
868
869         /* all func 2 cpuid_count() should be called on the same cpu */
870         get_cpu();
871         do_cpuid_1_ent(entry, function, index);
872         ++*nent;
873
874         switch (function) {
875         case 0:
876                 entry->eax = min(entry->eax, (u32)0xb);
877                 break;
878         case 1:
879                 entry->edx &= kvm_supported_word0_x86_features;
880                 entry->ecx &= kvm_supported_word3_x86_features;
881                 break;
882         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
883          * may return different values. This forces us to get_cpu() before
884          * issuing the first command, and also to emulate this annoying behavior
885          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
886         case 2: {
887                 int t, times = entry->eax & 0xff;
888
889                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
890                 for (t = 1; t < times && *nent < maxnent; ++t) {
891                         do_cpuid_1_ent(&entry[t], function, 0);
892                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
893                         ++*nent;
894                 }
895                 break;
896         }
897         /* function 4 and 0xb have additional index. */
898         case 4: {
899                 int index, cache_type;
900
901                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
902                 /* read more entries until cache_type is zero */
903                 for (index = 1; *nent < maxnent; ++index) {
904                         cache_type = entry[index - 1].eax & 0x1f;
905                         if (!cache_type)
906                                 break;
907                         do_cpuid_1_ent(&entry[index], function, index);
908                         entry[index].flags |=
909                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
910                         ++*nent;
911                 }
912                 break;
913         }
914         case 0xb: {
915                 int index, level_type;
916
917                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
918                 /* read more entries until level_type is zero */
919                 for (index = 1; *nent < maxnent; ++index) {
920                         level_type = entry[index - 1].ecx & 0xff;
921                         if (!level_type)
922                                 break;
923                         do_cpuid_1_ent(&entry[index], function, index);
924                         entry[index].flags |=
925                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
926                         ++*nent;
927                 }
928                 break;
929         }
930         case 0x80000000:
931                 entry->eax = min(entry->eax, 0x8000001a);
932                 break;
933         case 0x80000001:
934                 entry->edx &= kvm_supported_word1_x86_features;
935                 entry->ecx &= kvm_supported_word6_x86_features;
936                 break;
937         }
938         put_cpu();
939 }
940
941 static int kvm_vm_ioctl_get_supported_cpuid(struct kvm *kvm,
942                                     struct kvm_cpuid2 *cpuid,
943                                     struct kvm_cpuid_entry2 __user *entries)
944 {
945         struct kvm_cpuid_entry2 *cpuid_entries;
946         int limit, nent = 0, r = -E2BIG;
947         u32 func;
948
949         if (cpuid->nent < 1)
950                 goto out;
951         r = -ENOMEM;
952         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
953         if (!cpuid_entries)
954                 goto out;
955
956         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
957         limit = cpuid_entries[0].eax;
958         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
959                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
960                                 &nent, cpuid->nent);
961         r = -E2BIG;
962         if (nent >= cpuid->nent)
963                 goto out_free;
964
965         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
966         limit = cpuid_entries[nent - 1].eax;
967         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
968                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
969                                &nent, cpuid->nent);
970         r = -EFAULT;
971         if (copy_to_user(entries, cpuid_entries,
972                         nent * sizeof(struct kvm_cpuid_entry2)))
973                 goto out_free;
974         cpuid->nent = nent;
975         r = 0;
976
977 out_free:
978         vfree(cpuid_entries);
979 out:
980         return r;
981 }
982
983 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
984                                     struct kvm_lapic_state *s)
985 {
986         vcpu_load(vcpu);
987         memcpy(s->regs, vcpu->apic->regs, sizeof *s);
988         vcpu_put(vcpu);
989
990         return 0;
991 }
992
993 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
994                                     struct kvm_lapic_state *s)
995 {
996         vcpu_load(vcpu);
997         memcpy(vcpu->apic->regs, s->regs, sizeof *s);
998         kvm_apic_post_state_restore(vcpu);
999         vcpu_put(vcpu);
1000
1001         return 0;
1002 }
1003
1004 long kvm_arch_vcpu_ioctl(struct file *filp,
1005                          unsigned int ioctl, unsigned long arg)
1006 {
1007         struct kvm_vcpu *vcpu = filp->private_data;
1008         void __user *argp = (void __user *)arg;
1009         int r;
1010
1011         switch (ioctl) {
1012         case KVM_GET_LAPIC: {
1013                 struct kvm_lapic_state lapic;
1014
1015                 memset(&lapic, 0, sizeof lapic);
1016                 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
1017                 if (r)
1018                         goto out;
1019                 r = -EFAULT;
1020                 if (copy_to_user(argp, &lapic, sizeof lapic))
1021                         goto out;
1022                 r = 0;
1023                 break;
1024         }
1025         case KVM_SET_LAPIC: {
1026                 struct kvm_lapic_state lapic;
1027
1028                 r = -EFAULT;
1029                 if (copy_from_user(&lapic, argp, sizeof lapic))
1030                         goto out;
1031                 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
1032                 if (r)
1033                         goto out;
1034                 r = 0;
1035                 break;
1036         }
1037         case KVM_SET_CPUID: {
1038                 struct kvm_cpuid __user *cpuid_arg = argp;
1039                 struct kvm_cpuid cpuid;
1040
1041                 r = -EFAULT;
1042                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1043                         goto out;
1044                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1045                 if (r)
1046                         goto out;
1047                 break;
1048         }
1049         case KVM_SET_CPUID2: {
1050                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1051                 struct kvm_cpuid2 cpuid;
1052
1053                 r = -EFAULT;
1054                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1055                         goto out;
1056                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1057                                 cpuid_arg->entries);
1058                 if (r)
1059                         goto out;
1060                 break;
1061         }
1062         case KVM_GET_CPUID2: {
1063                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1064                 struct kvm_cpuid2 cpuid;
1065
1066                 r = -EFAULT;
1067                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1068                         goto out;
1069                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1070                                 cpuid_arg->entries);
1071                 if (r)
1072                         goto out;
1073                 r = -EFAULT;
1074                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1075                         goto out;
1076                 r = 0;
1077                 break;
1078         }
1079         case KVM_GET_MSRS:
1080                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1081                 break;
1082         case KVM_SET_MSRS:
1083                 r = msr_io(vcpu, argp, do_set_msr, 0);
1084                 break;
1085         default:
1086                 r = -EINVAL;
1087         }
1088 out:
1089         return r;
1090 }
1091
1092 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1093 {
1094         int ret;
1095
1096         if (addr > (unsigned int)(-3 * PAGE_SIZE))
1097                 return -1;
1098         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1099         return ret;
1100 }
1101
1102 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1103                                           u32 kvm_nr_mmu_pages)
1104 {
1105         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1106                 return -EINVAL;
1107
1108         mutex_lock(&kvm->lock);
1109
1110         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
1111         kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
1112
1113         mutex_unlock(&kvm->lock);
1114         return 0;
1115 }
1116
1117 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1118 {
1119         return kvm->n_alloc_mmu_pages;
1120 }
1121
1122 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1123 {
1124         int i;
1125         struct kvm_mem_alias *alias;
1126
1127         for (i = 0; i < kvm->naliases; ++i) {
1128                 alias = &kvm->aliases[i];
1129                 if (gfn >= alias->base_gfn
1130                     && gfn < alias->base_gfn + alias->npages)
1131                         return alias->target_gfn + gfn - alias->base_gfn;
1132         }
1133         return gfn;
1134 }
1135
1136 /*
1137  * Set a new alias region.  Aliases map a portion of physical memory into
1138  * another portion.  This is useful for memory windows, for example the PC
1139  * VGA region.
1140  */
1141 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1142                                          struct kvm_memory_alias *alias)
1143 {
1144         int r, n;
1145         struct kvm_mem_alias *p;
1146
1147         r = -EINVAL;
1148         /* General sanity checks */
1149         if (alias->memory_size & (PAGE_SIZE - 1))
1150                 goto out;
1151         if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1152                 goto out;
1153         if (alias->slot >= KVM_ALIAS_SLOTS)
1154                 goto out;
1155         if (alias->guest_phys_addr + alias->memory_size
1156             < alias->guest_phys_addr)
1157                 goto out;
1158         if (alias->target_phys_addr + alias->memory_size
1159             < alias->target_phys_addr)
1160                 goto out;
1161
1162         mutex_lock(&kvm->lock);
1163
1164         p = &kvm->aliases[alias->slot];
1165         p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1166         p->npages = alias->memory_size >> PAGE_SHIFT;
1167         p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1168
1169         for (n = KVM_ALIAS_SLOTS; n > 0; --n)
1170                 if (kvm->aliases[n - 1].npages)
1171                         break;
1172         kvm->naliases = n;
1173
1174         kvm_mmu_zap_all(kvm);
1175
1176         mutex_unlock(&kvm->lock);
1177
1178         return 0;
1179
1180 out:
1181         return r;
1182 }
1183
1184 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1185 {
1186         int r;
1187
1188         r = 0;
1189         switch (chip->chip_id) {
1190         case KVM_IRQCHIP_PIC_MASTER:
1191                 memcpy(&chip->chip.pic,
1192                         &pic_irqchip(kvm)->pics[0],
1193                         sizeof(struct kvm_pic_state));
1194                 break;
1195         case KVM_IRQCHIP_PIC_SLAVE:
1196                 memcpy(&chip->chip.pic,
1197                         &pic_irqchip(kvm)->pics[1],
1198                         sizeof(struct kvm_pic_state));
1199                 break;
1200         case KVM_IRQCHIP_IOAPIC:
1201                 memcpy(&chip->chip.ioapic,
1202                         ioapic_irqchip(kvm),
1203                         sizeof(struct kvm_ioapic_state));
1204                 break;
1205         default:
1206                 r = -EINVAL;
1207                 break;
1208         }
1209         return r;
1210 }
1211
1212 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1213 {
1214         int r;
1215
1216         r = 0;
1217         switch (chip->chip_id) {
1218         case KVM_IRQCHIP_PIC_MASTER:
1219                 memcpy(&pic_irqchip(kvm)->pics[0],
1220                         &chip->chip.pic,
1221                         sizeof(struct kvm_pic_state));
1222                 break;
1223         case KVM_IRQCHIP_PIC_SLAVE:
1224                 memcpy(&pic_irqchip(kvm)->pics[1],
1225                         &chip->chip.pic,
1226                         sizeof(struct kvm_pic_state));
1227                 break;
1228         case KVM_IRQCHIP_IOAPIC:
1229                 memcpy(ioapic_irqchip(kvm),
1230                         &chip->chip.ioapic,
1231                         sizeof(struct kvm_ioapic_state));
1232                 break;
1233         default:
1234                 r = -EINVAL;
1235                 break;
1236         }
1237         kvm_pic_update_irq(pic_irqchip(kvm));
1238         return r;
1239 }
1240
1241 /*
1242  * Get (and clear) the dirty memory log for a memory slot.
1243  */
1244 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1245                                       struct kvm_dirty_log *log)
1246 {
1247         int r;
1248         int n;
1249         struct kvm_memory_slot *memslot;
1250         int is_dirty = 0;
1251
1252         mutex_lock(&kvm->lock);
1253
1254         r = kvm_get_dirty_log(kvm, log, &is_dirty);
1255         if (r)
1256                 goto out;
1257
1258         /* If nothing is dirty, don't bother messing with page tables. */
1259         if (is_dirty) {
1260                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1261                 kvm_flush_remote_tlbs(kvm);
1262                 memslot = &kvm->memslots[log->slot];
1263                 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1264                 memset(memslot->dirty_bitmap, 0, n);
1265         }
1266         r = 0;
1267 out:
1268         mutex_unlock(&kvm->lock);
1269         return r;
1270 }
1271
1272 long kvm_arch_vm_ioctl(struct file *filp,
1273                        unsigned int ioctl, unsigned long arg)
1274 {
1275         struct kvm *kvm = filp->private_data;
1276         void __user *argp = (void __user *)arg;
1277         int r = -EINVAL;
1278
1279         switch (ioctl) {
1280         case KVM_SET_TSS_ADDR:
1281                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1282                 if (r < 0)
1283                         goto out;
1284                 break;
1285         case KVM_SET_MEMORY_REGION: {
1286                 struct kvm_memory_region kvm_mem;
1287                 struct kvm_userspace_memory_region kvm_userspace_mem;
1288
1289                 r = -EFAULT;
1290                 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1291                         goto out;
1292                 kvm_userspace_mem.slot = kvm_mem.slot;
1293                 kvm_userspace_mem.flags = kvm_mem.flags;
1294                 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1295                 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1296                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1297                 if (r)
1298                         goto out;
1299                 break;
1300         }
1301         case KVM_SET_NR_MMU_PAGES:
1302                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1303                 if (r)
1304                         goto out;
1305                 break;
1306         case KVM_GET_NR_MMU_PAGES:
1307                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1308                 break;
1309         case KVM_SET_MEMORY_ALIAS: {
1310                 struct kvm_memory_alias alias;
1311
1312                 r = -EFAULT;
1313                 if (copy_from_user(&alias, argp, sizeof alias))
1314                         goto out;
1315                 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
1316                 if (r)
1317                         goto out;
1318                 break;
1319         }
1320         case KVM_CREATE_IRQCHIP:
1321                 r = -ENOMEM;
1322                 kvm->vpic = kvm_create_pic(kvm);
1323                 if (kvm->vpic) {
1324                         r = kvm_ioapic_init(kvm);
1325                         if (r) {
1326                                 kfree(kvm->vpic);
1327                                 kvm->vpic = NULL;
1328                                 goto out;
1329                         }
1330                 } else
1331                         goto out;
1332                 break;
1333         case KVM_IRQ_LINE: {
1334                 struct kvm_irq_level irq_event;
1335
1336                 r = -EFAULT;
1337                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1338                         goto out;
1339                 if (irqchip_in_kernel(kvm)) {
1340                         mutex_lock(&kvm->lock);
1341                         if (irq_event.irq < 16)
1342                                 kvm_pic_set_irq(pic_irqchip(kvm),
1343                                         irq_event.irq,
1344                                         irq_event.level);
1345                         kvm_ioapic_set_irq(kvm->vioapic,
1346                                         irq_event.irq,
1347                                         irq_event.level);
1348                         mutex_unlock(&kvm->lock);
1349                         r = 0;
1350                 }
1351                 break;
1352         }
1353         case KVM_GET_IRQCHIP: {
1354                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1355                 struct kvm_irqchip chip;
1356
1357                 r = -EFAULT;
1358                 if (copy_from_user(&chip, argp, sizeof chip))
1359                         goto out;
1360                 r = -ENXIO;
1361                 if (!irqchip_in_kernel(kvm))
1362                         goto out;
1363                 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1364                 if (r)
1365                         goto out;
1366                 r = -EFAULT;
1367                 if (copy_to_user(argp, &chip, sizeof chip))
1368                         goto out;
1369                 r = 0;
1370                 break;
1371         }
1372         case KVM_SET_IRQCHIP: {
1373                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1374                 struct kvm_irqchip chip;
1375
1376                 r = -EFAULT;
1377                 if (copy_from_user(&chip, argp, sizeof chip))
1378                         goto out;
1379                 r = -ENXIO;
1380                 if (!irqchip_in_kernel(kvm))
1381                         goto out;
1382                 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1383                 if (r)
1384                         goto out;
1385                 r = 0;
1386                 break;
1387         }
1388         case KVM_GET_SUPPORTED_CPUID: {
1389                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1390                 struct kvm_cpuid2 cpuid;
1391
1392                 r = -EFAULT;
1393                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1394                         goto out;
1395                 r = kvm_vm_ioctl_get_supported_cpuid(kvm, &cpuid,
1396                         cpuid_arg->entries);
1397                 if (r)
1398                         goto out;
1399
1400                 r = -EFAULT;
1401                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1402                         goto out;
1403                 r = 0;
1404                 break;
1405         }
1406         default:
1407                 ;
1408         }
1409 out:
1410         return r;
1411 }
1412
1413 static void kvm_init_msr_list(void)
1414 {
1415         u32 dummy[2];
1416         unsigned i, j;
1417
1418         for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1419                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1420                         continue;
1421                 if (j < i)
1422                         msrs_to_save[j] = msrs_to_save[i];
1423                 j++;
1424         }
1425         num_msrs_to_save = j;
1426 }
1427
1428 /*
1429  * Only apic need an MMIO device hook, so shortcut now..
1430  */
1431 static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1432                                                 gpa_t addr)
1433 {
1434         struct kvm_io_device *dev;
1435
1436         if (vcpu->apic) {
1437                 dev = &vcpu->apic->dev;
1438                 if (dev->in_range(dev, addr))
1439                         return dev;
1440         }
1441         return NULL;
1442 }
1443
1444
1445 static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1446                                                 gpa_t addr)
1447 {
1448         struct kvm_io_device *dev;
1449
1450         dev = vcpu_find_pervcpu_dev(vcpu, addr);
1451         if (dev == NULL)
1452                 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1453         return dev;
1454 }
1455
1456 int emulator_read_std(unsigned long addr,
1457                              void *val,
1458                              unsigned int bytes,
1459                              struct kvm_vcpu *vcpu)
1460 {
1461         void *data = val;
1462
1463         while (bytes) {
1464                 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1465                 unsigned offset = addr & (PAGE_SIZE-1);
1466                 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1467                 int ret;
1468
1469                 if (gpa == UNMAPPED_GVA)
1470                         return X86EMUL_PROPAGATE_FAULT;
1471                 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
1472                 if (ret < 0)
1473                         return X86EMUL_UNHANDLEABLE;
1474
1475                 bytes -= tocopy;
1476                 data += tocopy;
1477                 addr += tocopy;
1478         }
1479
1480         return X86EMUL_CONTINUE;
1481 }
1482 EXPORT_SYMBOL_GPL(emulator_read_std);
1483
1484 static int emulator_read_emulated(unsigned long addr,
1485                                   void *val,
1486                                   unsigned int bytes,
1487                                   struct kvm_vcpu *vcpu)
1488 {
1489         struct kvm_io_device *mmio_dev;
1490         gpa_t                 gpa;
1491
1492         if (vcpu->mmio_read_completed) {
1493                 memcpy(val, vcpu->mmio_data, bytes);
1494                 vcpu->mmio_read_completed = 0;
1495                 return X86EMUL_CONTINUE;
1496         }
1497
1498         gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1499
1500         /* For APIC access vmexit */
1501         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1502                 goto mmio;
1503
1504         if (emulator_read_std(addr, val, bytes, vcpu)
1505                         == X86EMUL_CONTINUE)
1506                 return X86EMUL_CONTINUE;
1507         if (gpa == UNMAPPED_GVA)
1508                 return X86EMUL_PROPAGATE_FAULT;
1509
1510 mmio:
1511         /*
1512          * Is this MMIO handled locally?
1513          */
1514         mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1515         if (mmio_dev) {
1516                 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1517                 return X86EMUL_CONTINUE;
1518         }
1519
1520         vcpu->mmio_needed = 1;
1521         vcpu->mmio_phys_addr = gpa;
1522         vcpu->mmio_size = bytes;
1523         vcpu->mmio_is_write = 0;
1524
1525         return X86EMUL_UNHANDLEABLE;
1526 }
1527
1528 static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1529                                const void *val, int bytes)
1530 {
1531         int ret;
1532
1533         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
1534         if (ret < 0)
1535                 return 0;
1536         kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1537         return 1;
1538 }
1539
1540 static int emulator_write_emulated_onepage(unsigned long addr,
1541                                            const void *val,
1542                                            unsigned int bytes,
1543                                            struct kvm_vcpu *vcpu)
1544 {
1545         struct kvm_io_device *mmio_dev;
1546         gpa_t                 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1547
1548         if (gpa == UNMAPPED_GVA) {
1549                 kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
1550                 return X86EMUL_PROPAGATE_FAULT;
1551         }
1552
1553         /* For APIC access vmexit */
1554         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1555                 goto mmio;
1556
1557         if (emulator_write_phys(vcpu, gpa, val, bytes))
1558                 return X86EMUL_CONTINUE;
1559
1560 mmio:
1561         /*
1562          * Is this MMIO handled locally?
1563          */
1564         mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1565         if (mmio_dev) {
1566                 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1567                 return X86EMUL_CONTINUE;
1568         }
1569
1570         vcpu->mmio_needed = 1;
1571         vcpu->mmio_phys_addr = gpa;
1572         vcpu->mmio_size = bytes;
1573         vcpu->mmio_is_write = 1;
1574         memcpy(vcpu->mmio_data, val, bytes);
1575
1576         return X86EMUL_CONTINUE;
1577 }
1578
1579 int emulator_write_emulated(unsigned long addr,
1580                                    const void *val,
1581                                    unsigned int bytes,
1582                                    struct kvm_vcpu *vcpu)
1583 {
1584         /* Crossing a page boundary? */
1585         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1586                 int rc, now;
1587
1588                 now = -addr & ~PAGE_MASK;
1589                 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1590                 if (rc != X86EMUL_CONTINUE)
1591                         return rc;
1592                 addr += now;
1593                 val += now;
1594                 bytes -= now;
1595         }
1596         return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1597 }
1598 EXPORT_SYMBOL_GPL(emulator_write_emulated);
1599
1600 static int emulator_cmpxchg_emulated(unsigned long addr,
1601                                      const void *old,
1602                                      const void *new,
1603                                      unsigned int bytes,
1604                                      struct kvm_vcpu *vcpu)
1605 {
1606         static int reported;
1607
1608         if (!reported) {
1609                 reported = 1;
1610                 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1611         }
1612         return emulator_write_emulated(addr, new, bytes, vcpu);
1613 }
1614
1615 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1616 {
1617         return kvm_x86_ops->get_segment_base(vcpu, seg);
1618 }
1619
1620 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1621 {
1622         return X86EMUL_CONTINUE;
1623 }
1624
1625 int emulate_clts(struct kvm_vcpu *vcpu)
1626 {
1627         kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
1628         return X86EMUL_CONTINUE;
1629 }
1630
1631 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
1632 {
1633         struct kvm_vcpu *vcpu = ctxt->vcpu;
1634
1635         switch (dr) {
1636         case 0 ... 3:
1637                 *dest = kvm_x86_ops->get_dr(vcpu, dr);
1638                 return X86EMUL_CONTINUE;
1639         default:
1640                 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
1641                 return X86EMUL_UNHANDLEABLE;
1642         }
1643 }
1644
1645 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1646 {
1647         unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1648         int exception;
1649
1650         kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1651         if (exception) {
1652                 /* FIXME: better handling */
1653                 return X86EMUL_UNHANDLEABLE;
1654         }
1655         return X86EMUL_CONTINUE;
1656 }
1657
1658 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1659 {
1660         static int reported;
1661         u8 opcodes[4];
1662         unsigned long rip = vcpu->rip;
1663         unsigned long rip_linear;
1664
1665         rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
1666
1667         if (reported)
1668                 return;
1669
1670         emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
1671
1672         printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1673                context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1674         reported = 1;
1675 }
1676 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
1677
1678 struct x86_emulate_ops emulate_ops = {
1679         .read_std            = emulator_read_std,
1680         .read_emulated       = emulator_read_emulated,
1681         .write_emulated      = emulator_write_emulated,
1682         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
1683 };
1684
1685 int emulate_instruction(struct kvm_vcpu *vcpu,
1686                         struct kvm_run *run,
1687                         unsigned long cr2,
1688                         u16 error_code,
1689                         int no_decode)
1690 {
1691         int r;
1692
1693         vcpu->mmio_fault_cr2 = cr2;
1694         kvm_x86_ops->cache_regs(vcpu);
1695
1696         vcpu->mmio_is_write = 0;
1697         vcpu->pio.string = 0;
1698
1699         if (!no_decode) {
1700                 int cs_db, cs_l;
1701                 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1702
1703                 vcpu->emulate_ctxt.vcpu = vcpu;
1704                 vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1705                 vcpu->emulate_ctxt.cr2 = cr2;
1706                 vcpu->emulate_ctxt.mode =
1707                         (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
1708                         ? X86EMUL_MODE_REAL : cs_l
1709                         ? X86EMUL_MODE_PROT64 : cs_db
1710                         ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1711
1712                 if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1713                         vcpu->emulate_ctxt.cs_base = 0;
1714                         vcpu->emulate_ctxt.ds_base = 0;
1715                         vcpu->emulate_ctxt.es_base = 0;
1716                         vcpu->emulate_ctxt.ss_base = 0;
1717                 } else {
1718                         vcpu->emulate_ctxt.cs_base =
1719                                         get_segment_base(vcpu, VCPU_SREG_CS);
1720                         vcpu->emulate_ctxt.ds_base =
1721                                         get_segment_base(vcpu, VCPU_SREG_DS);
1722                         vcpu->emulate_ctxt.es_base =
1723                                         get_segment_base(vcpu, VCPU_SREG_ES);
1724                         vcpu->emulate_ctxt.ss_base =
1725                                         get_segment_base(vcpu, VCPU_SREG_SS);
1726                 }
1727
1728                 vcpu->emulate_ctxt.gs_base =
1729                                         get_segment_base(vcpu, VCPU_SREG_GS);
1730                 vcpu->emulate_ctxt.fs_base =
1731                                         get_segment_base(vcpu, VCPU_SREG_FS);
1732
1733                 r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
1734                 ++vcpu->stat.insn_emulation;
1735                 if (r)  {
1736                         ++vcpu->stat.insn_emulation_fail;
1737                         if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1738                                 return EMULATE_DONE;
1739                         return EMULATE_FAIL;
1740                 }
1741         }
1742
1743         r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
1744
1745         if (vcpu->pio.string)
1746                 return EMULATE_DO_MMIO;
1747
1748         if ((r || vcpu->mmio_is_write) && run) {
1749                 run->exit_reason = KVM_EXIT_MMIO;
1750                 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1751                 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1752                 run->mmio.len = vcpu->mmio_size;
1753                 run->mmio.is_write = vcpu->mmio_is_write;
1754         }
1755
1756         if (r) {
1757                 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1758                         return EMULATE_DONE;
1759                 if (!vcpu->mmio_needed) {
1760                         kvm_report_emulation_failure(vcpu, "mmio");
1761                         return EMULATE_FAIL;
1762                 }
1763                 return EMULATE_DO_MMIO;
1764         }
1765
1766         kvm_x86_ops->decache_regs(vcpu);
1767         kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
1768
1769         if (vcpu->mmio_is_write) {
1770                 vcpu->mmio_needed = 0;
1771                 return EMULATE_DO_MMIO;
1772         }
1773
1774         return EMULATE_DONE;
1775 }
1776 EXPORT_SYMBOL_GPL(emulate_instruction);
1777
1778 static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
1779 {
1780         int i;
1781
1782         for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
1783                 if (vcpu->pio.guest_pages[i]) {
1784                         kvm_release_page_dirty(vcpu->pio.guest_pages[i]);
1785                         vcpu->pio.guest_pages[i] = NULL;
1786                 }
1787 }
1788
1789 static int pio_copy_data(struct kvm_vcpu *vcpu)
1790 {
1791         void *p = vcpu->pio_data;
1792         void *q;
1793         unsigned bytes;
1794         int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1795
1796         q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1797                  PAGE_KERNEL);
1798         if (!q) {
1799                 free_pio_guest_pages(vcpu);
1800                 return -ENOMEM;
1801         }
1802         q += vcpu->pio.guest_page_offset;
1803         bytes = vcpu->pio.size * vcpu->pio.cur_count;
1804         if (vcpu->pio.in)
1805                 memcpy(q, p, bytes);
1806         else
1807                 memcpy(p, q, bytes);
1808         q -= vcpu->pio.guest_page_offset;
1809         vunmap(q);
1810         free_pio_guest_pages(vcpu);
1811         return 0;
1812 }
1813
1814 int complete_pio(struct kvm_vcpu *vcpu)
1815 {
1816         struct kvm_pio_request *io = &vcpu->pio;
1817         long delta;
1818         int r;
1819
1820         kvm_x86_ops->cache_regs(vcpu);
1821
1822         if (!io->string) {
1823                 if (io->in)
1824                         memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
1825                                io->size);
1826         } else {
1827                 if (io->in) {
1828                         r = pio_copy_data(vcpu);
1829                         if (r) {
1830                                 kvm_x86_ops->cache_regs(vcpu);
1831                                 return r;
1832                         }
1833                 }
1834
1835                 delta = 1;
1836                 if (io->rep) {
1837                         delta *= io->cur_count;
1838                         /*
1839                          * The size of the register should really depend on
1840                          * current address size.
1841                          */
1842                         vcpu->regs[VCPU_REGS_RCX] -= delta;
1843                 }
1844                 if (io->down)
1845                         delta = -delta;
1846                 delta *= io->size;
1847                 if (io->in)
1848                         vcpu->regs[VCPU_REGS_RDI] += delta;
1849                 else
1850                         vcpu->regs[VCPU_REGS_RSI] += delta;
1851         }
1852
1853         kvm_x86_ops->decache_regs(vcpu);
1854
1855         io->count -= io->cur_count;
1856         io->cur_count = 0;
1857
1858         return 0;
1859 }
1860
1861 static void kernel_pio(struct kvm_io_device *pio_dev,
1862                        struct kvm_vcpu *vcpu,
1863                        void *pd)
1864 {
1865         /* TODO: String I/O for in kernel device */
1866
1867         mutex_lock(&vcpu->kvm->lock);
1868         if (vcpu->pio.in)
1869                 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1870                                   vcpu->pio.size,
1871                                   pd);
1872         else
1873                 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1874                                    vcpu->pio.size,
1875                                    pd);
1876         mutex_unlock(&vcpu->kvm->lock);
1877 }
1878
1879 static void pio_string_write(struct kvm_io_device *pio_dev,
1880                              struct kvm_vcpu *vcpu)
1881 {
1882         struct kvm_pio_request *io = &vcpu->pio;
1883         void *pd = vcpu->pio_data;
1884         int i;
1885
1886         mutex_lock(&vcpu->kvm->lock);
1887         for (i = 0; i < io->cur_count; i++) {
1888                 kvm_iodevice_write(pio_dev, io->port,
1889                                    io->size,
1890                                    pd);
1891                 pd += io->size;
1892         }
1893         mutex_unlock(&vcpu->kvm->lock);
1894 }
1895
1896 static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1897                                                gpa_t addr)
1898 {
1899         return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1900 }
1901
1902 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1903                   int size, unsigned port)
1904 {
1905         struct kvm_io_device *pio_dev;
1906
1907         vcpu->run->exit_reason = KVM_EXIT_IO;
1908         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1909         vcpu->run->io.size = vcpu->pio.size = size;
1910         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1911         vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
1912         vcpu->run->io.port = vcpu->pio.port = port;
1913         vcpu->pio.in = in;
1914         vcpu->pio.string = 0;
1915         vcpu->pio.down = 0;
1916         vcpu->pio.guest_page_offset = 0;
1917         vcpu->pio.rep = 0;
1918
1919         kvm_x86_ops->cache_regs(vcpu);
1920         memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1921         kvm_x86_ops->decache_regs(vcpu);
1922
1923         kvm_x86_ops->skip_emulated_instruction(vcpu);
1924
1925         pio_dev = vcpu_find_pio_dev(vcpu, port);
1926         if (pio_dev) {
1927                 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
1928                 complete_pio(vcpu);
1929                 return 1;
1930         }
1931         return 0;
1932 }
1933 EXPORT_SYMBOL_GPL(kvm_emulate_pio);
1934
1935 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1936                   int size, unsigned long count, int down,
1937                   gva_t address, int rep, unsigned port)
1938 {
1939         unsigned now, in_page;
1940         int i, ret = 0;
1941         int nr_pages = 1;
1942         struct page *page;
1943         struct kvm_io_device *pio_dev;
1944
1945         vcpu->run->exit_reason = KVM_EXIT_IO;
1946         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1947         vcpu->run->io.size = vcpu->pio.size = size;
1948         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1949         vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
1950         vcpu->run->io.port = vcpu->pio.port = port;
1951         vcpu->pio.in = in;
1952         vcpu->pio.string = 1;
1953         vcpu->pio.down = down;
1954         vcpu->pio.guest_page_offset = offset_in_page(address);
1955         vcpu->pio.rep = rep;
1956
1957         if (!count) {
1958                 kvm_x86_ops->skip_emulated_instruction(vcpu);
1959                 return 1;
1960         }
1961
1962         if (!down)
1963                 in_page = PAGE_SIZE - offset_in_page(address);
1964         else
1965                 in_page = offset_in_page(address) + size;
1966         now = min(count, (unsigned long)in_page / size);
1967         if (!now) {
1968                 /*
1969                  * String I/O straddles page boundary.  Pin two guest pages
1970                  * so that we satisfy atomicity constraints.  Do just one
1971                  * transaction to avoid complexity.
1972                  */
1973                 nr_pages = 2;
1974                 now = 1;
1975         }
1976         if (down) {
1977                 /*
1978                  * String I/O in reverse.  Yuck.  Kill the guest, fix later.
1979                  */
1980                 pr_unimpl(vcpu, "guest string pio down\n");
1981                 inject_gp(vcpu);
1982                 return 1;
1983         }
1984         vcpu->run->io.count = now;
1985         vcpu->pio.cur_count = now;
1986
1987         if (vcpu->pio.cur_count == vcpu->pio.count)
1988                 kvm_x86_ops->skip_emulated_instruction(vcpu);
1989
1990         for (i = 0; i < nr_pages; ++i) {
1991                 mutex_lock(&vcpu->kvm->lock);
1992                 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1993                 vcpu->pio.guest_pages[i] = page;
1994                 mutex_unlock(&vcpu->kvm->lock);
1995                 if (!page) {
1996                         inject_gp(vcpu);
1997                         free_pio_guest_pages(vcpu);
1998                         return 1;
1999                 }
2000         }
2001
2002         pio_dev = vcpu_find_pio_dev(vcpu, port);
2003         if (!vcpu->pio.in) {
2004                 /* string PIO write */
2005                 ret = pio_copy_data(vcpu);
2006                 if (ret >= 0 && pio_dev) {
2007                         pio_string_write(pio_dev, vcpu);
2008                         complete_pio(vcpu);
2009                         if (vcpu->pio.count == 0)
2010                                 ret = 1;
2011                 }
2012         } else if (pio_dev)
2013                 pr_unimpl(vcpu, "no string pio read support yet, "
2014                        "port %x size %d count %ld\n",
2015                         port, size, count);
2016
2017         return ret;
2018 }
2019 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2020
2021 int kvm_arch_init(void *opaque)
2022 {
2023         int r;
2024         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2025
2026         r = kvm_mmu_module_init();
2027         if (r)
2028                 goto out_fail;
2029
2030         kvm_init_msr_list();
2031
2032         if (kvm_x86_ops) {
2033                 printk(KERN_ERR "kvm: already loaded the other module\n");
2034                 r = -EEXIST;
2035                 goto out;
2036         }
2037
2038         if (!ops->cpu_has_kvm_support()) {
2039                 printk(KERN_ERR "kvm: no hardware support\n");
2040                 r = -EOPNOTSUPP;
2041                 goto out;
2042         }
2043         if (ops->disabled_by_bios()) {
2044                 printk(KERN_ERR "kvm: disabled by bios\n");
2045                 r = -EOPNOTSUPP;
2046                 goto out;
2047         }
2048
2049         kvm_x86_ops = ops;
2050         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
2051         return 0;
2052
2053 out:
2054         kvm_mmu_module_exit();
2055 out_fail:
2056         return r;
2057 }
2058
2059 void kvm_arch_exit(void)
2060 {
2061         kvm_x86_ops = NULL;
2062         kvm_mmu_module_exit();
2063 }
2064
2065 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2066 {
2067         ++vcpu->stat.halt_exits;
2068         if (irqchip_in_kernel(vcpu->kvm)) {
2069                 vcpu->mp_state = VCPU_MP_STATE_HALTED;
2070                 kvm_vcpu_block(vcpu);
2071                 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
2072                         return -EINTR;
2073                 return 1;
2074         } else {
2075                 vcpu->run->exit_reason = KVM_EXIT_HLT;
2076                 return 0;
2077         }
2078 }
2079 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2080
2081 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2082 {
2083         unsigned long nr, a0, a1, a2, a3, ret;
2084
2085         kvm_x86_ops->cache_regs(vcpu);
2086
2087         nr = vcpu->regs[VCPU_REGS_RAX];
2088         a0 = vcpu->regs[VCPU_REGS_RBX];
2089         a1 = vcpu->regs[VCPU_REGS_RCX];
2090         a2 = vcpu->regs[VCPU_REGS_RDX];
2091         a3 = vcpu->regs[VCPU_REGS_RSI];
2092
2093         if (!is_long_mode(vcpu)) {
2094                 nr &= 0xFFFFFFFF;
2095                 a0 &= 0xFFFFFFFF;
2096                 a1 &= 0xFFFFFFFF;
2097                 a2 &= 0xFFFFFFFF;
2098                 a3 &= 0xFFFFFFFF;
2099         }
2100
2101         switch (nr) {
2102         default:
2103                 ret = -KVM_ENOSYS;
2104                 break;
2105         }
2106         vcpu->regs[VCPU_REGS_RAX] = ret;
2107         kvm_x86_ops->decache_regs(vcpu);
2108         return 0;
2109 }
2110 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2111
2112 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2113 {
2114         char instruction[3];
2115         int ret = 0;
2116
2117         mutex_lock(&vcpu->kvm->lock);
2118
2119         /*
2120          * Blow out the MMU to ensure that no other VCPU has an active mapping
2121          * to ensure that the updated hypercall appears atomically across all
2122          * VCPUs.
2123          */
2124         kvm_mmu_zap_all(vcpu->kvm);
2125
2126         kvm_x86_ops->cache_regs(vcpu);
2127         kvm_x86_ops->patch_hypercall(vcpu, instruction);
2128         if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
2129             != X86EMUL_CONTINUE)
2130                 ret = -EFAULT;
2131
2132         mutex_unlock(&vcpu->kvm->lock);
2133
2134         return ret;
2135 }
2136
2137 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2138 {
2139         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2140 }
2141
2142 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2143 {
2144         struct descriptor_table dt = { limit, base };
2145
2146         kvm_x86_ops->set_gdt(vcpu, &dt);
2147 }
2148
2149 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2150 {
2151         struct descriptor_table dt = { limit, base };
2152
2153         kvm_x86_ops->set_idt(vcpu, &dt);
2154 }
2155
2156 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2157                    unsigned long *rflags)
2158 {
2159         lmsw(vcpu, msw);
2160         *rflags = kvm_x86_ops->get_rflags(vcpu);
2161 }
2162
2163 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2164 {
2165         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2166         switch (cr) {
2167         case 0:
2168                 return vcpu->cr0;
2169         case 2:
2170                 return vcpu->cr2;
2171         case 3:
2172                 return vcpu->cr3;
2173         case 4:
2174                 return vcpu->cr4;
2175         default:
2176                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
2177                 return 0;
2178         }
2179 }
2180
2181 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2182                      unsigned long *rflags)
2183 {
2184         switch (cr) {
2185         case 0:
2186                 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
2187                 *rflags = kvm_x86_ops->get_rflags(vcpu);
2188                 break;
2189         case 2:
2190                 vcpu->cr2 = val;
2191                 break;
2192         case 3:
2193                 set_cr3(vcpu, val);
2194                 break;
2195         case 4:
2196                 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
2197                 break;
2198         default:
2199                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
2200         }
2201 }
2202
2203 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2204 {
2205         struct kvm_cpuid_entry2 *e = &vcpu->cpuid_entries[i];
2206         int j, nent = vcpu->cpuid_nent;
2207
2208         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2209         /* when no next entry is found, the current entry[i] is reselected */
2210         for (j = i + 1; j == i; j = (j + 1) % nent) {
2211                 struct kvm_cpuid_entry2 *ej = &vcpu->cpuid_entries[j];
2212                 if (ej->function == e->function) {
2213                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2214                         return j;
2215                 }
2216         }
2217         return 0; /* silence gcc, even though control never reaches here */
2218 }
2219
2220 /* find an entry with matching function, matching index (if needed), and that
2221  * should be read next (if it's stateful) */
2222 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2223         u32 function, u32 index)
2224 {
2225         if (e->function != function)
2226                 return 0;
2227         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2228                 return 0;
2229         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2230                 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2231                 return 0;
2232         return 1;
2233 }
2234
2235 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2236 {
2237         int i;
2238         u32 function, index;
2239         struct kvm_cpuid_entry2 *e, *best;
2240
2241         kvm_x86_ops->cache_regs(vcpu);
2242         function = vcpu->regs[VCPU_REGS_RAX];
2243         index = vcpu->regs[VCPU_REGS_RCX];
2244         vcpu->regs[VCPU_REGS_RAX] = 0;
2245         vcpu->regs[VCPU_REGS_RBX] = 0;
2246         vcpu->regs[VCPU_REGS_RCX] = 0;
2247         vcpu->regs[VCPU_REGS_RDX] = 0;
2248         best = NULL;
2249         for (i = 0; i < vcpu->cpuid_nent; ++i) {
2250                 e = &vcpu->cpuid_entries[i];
2251                 if (is_matching_cpuid_entry(e, function, index)) {
2252                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2253                                 move_to_next_stateful_cpuid_entry(vcpu, i);
2254                         best = e;
2255                         break;
2256                 }
2257                 /*
2258                  * Both basic or both extended?
2259                  */
2260                 if (((e->function ^ function) & 0x80000000) == 0)
2261                         if (!best || e->function > best->function)
2262                                 best = e;
2263         }
2264         if (best) {
2265                 vcpu->regs[VCPU_REGS_RAX] = best->eax;
2266                 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
2267                 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
2268                 vcpu->regs[VCPU_REGS_RDX] = best->edx;
2269         }
2270         kvm_x86_ops->decache_regs(vcpu);
2271         kvm_x86_ops->skip_emulated_instruction(vcpu);
2272 }
2273 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
2274
2275 /*
2276  * Check if userspace requested an interrupt window, and that the
2277  * interrupt window is open.
2278  *
2279  * No need to exit to userspace if we already have an interrupt queued.
2280  */
2281 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2282                                           struct kvm_run *kvm_run)
2283 {
2284         return (!vcpu->irq_summary &&
2285                 kvm_run->request_interrupt_window &&
2286                 vcpu->interrupt_window_open &&
2287                 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2288 }
2289
2290 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2291                               struct kvm_run *kvm_run)
2292 {
2293         kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2294         kvm_run->cr8 = get_cr8(vcpu);
2295         kvm_run->apic_base = kvm_get_apic_base(vcpu);
2296         if (irqchip_in_kernel(vcpu->kvm))
2297                 kvm_run->ready_for_interrupt_injection = 1;
2298         else
2299                 kvm_run->ready_for_interrupt_injection =
2300                                         (vcpu->interrupt_window_open &&
2301                                          vcpu->irq_summary == 0);
2302 }
2303
2304 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2305 {
2306         int r;
2307
2308         if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2309                 pr_debug("vcpu %d received sipi with vector # %x\n",
2310                        vcpu->vcpu_id, vcpu->sipi_vector);
2311                 kvm_lapic_reset(vcpu);
2312                 r = kvm_x86_ops->vcpu_reset(vcpu);
2313                 if (r)
2314                         return r;
2315                 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2316         }
2317
2318 preempted:
2319         if (vcpu->guest_debug.enabled)
2320                 kvm_x86_ops->guest_debug_pre(vcpu);
2321
2322 again:
2323         r = kvm_mmu_reload(vcpu);
2324         if (unlikely(r))
2325                 goto out;
2326
2327         kvm_inject_pending_timer_irqs(vcpu);
2328
2329         preempt_disable();
2330
2331         kvm_x86_ops->prepare_guest_switch(vcpu);
2332         kvm_load_guest_fpu(vcpu);
2333
2334         local_irq_disable();
2335
2336         if (signal_pending(current)) {
2337                 local_irq_enable();
2338                 preempt_enable();
2339                 r = -EINTR;
2340                 kvm_run->exit_reason = KVM_EXIT_INTR;
2341                 ++vcpu->stat.signal_exits;
2342                 goto out;
2343         }
2344
2345         if (irqchip_in_kernel(vcpu->kvm))
2346                 kvm_x86_ops->inject_pending_irq(vcpu);
2347         else if (!vcpu->mmio_read_completed)
2348                 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2349
2350         vcpu->guest_mode = 1;
2351         kvm_guest_enter();
2352
2353         if (vcpu->requests)
2354                 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2355                         kvm_x86_ops->tlb_flush(vcpu);
2356
2357         kvm_x86_ops->run(vcpu, kvm_run);
2358
2359         vcpu->guest_mode = 0;
2360         local_irq_enable();
2361
2362         ++vcpu->stat.exits;
2363
2364         /*
2365          * We must have an instruction between local_irq_enable() and
2366          * kvm_guest_exit(), so the timer interrupt isn't delayed by
2367          * the interrupt shadow.  The stat.exits increment will do nicely.
2368          * But we need to prevent reordering, hence this barrier():
2369          */
2370         barrier();
2371
2372         kvm_guest_exit();
2373
2374         preempt_enable();
2375
2376         /*
2377          * Profile KVM exit RIPs:
2378          */
2379         if (unlikely(prof_on == KVM_PROFILING)) {
2380                 kvm_x86_ops->cache_regs(vcpu);
2381                 profile_hit(KVM_PROFILING, (void *)vcpu->rip);
2382         }
2383
2384         r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2385
2386         if (r > 0) {
2387                 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2388                         r = -EINTR;
2389                         kvm_run->exit_reason = KVM_EXIT_INTR;
2390                         ++vcpu->stat.request_irq_exits;
2391                         goto out;
2392                 }
2393                 if (!need_resched())
2394                         goto again;
2395         }
2396
2397 out:
2398         if (r > 0) {
2399                 kvm_resched(vcpu);
2400                 goto preempted;
2401         }
2402
2403         post_kvm_run_save(vcpu, kvm_run);
2404
2405         return r;
2406 }
2407
2408 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2409 {
2410         int r;
2411         sigset_t sigsaved;
2412
2413         vcpu_load(vcpu);
2414
2415         if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
2416                 kvm_vcpu_block(vcpu);
2417                 vcpu_put(vcpu);
2418                 return -EAGAIN;
2419         }
2420
2421         if (vcpu->sigset_active)
2422                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2423
2424         /* re-sync apic's tpr */
2425         if (!irqchip_in_kernel(vcpu->kvm))
2426                 set_cr8(vcpu, kvm_run->cr8);
2427
2428         if (vcpu->pio.cur_count) {
2429                 r = complete_pio(vcpu);
2430                 if (r)
2431                         goto out;
2432         }
2433 #if CONFIG_HAS_IOMEM
2434         if (vcpu->mmio_needed) {
2435                 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2436                 vcpu->mmio_read_completed = 1;
2437                 vcpu->mmio_needed = 0;
2438                 r = emulate_instruction(vcpu, kvm_run,
2439                                         vcpu->mmio_fault_cr2, 0, 1);
2440                 if (r == EMULATE_DO_MMIO) {
2441                         /*
2442                          * Read-modify-write.  Back to userspace.
2443                          */
2444                         r = 0;
2445                         goto out;
2446                 }
2447         }
2448 #endif
2449         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
2450                 kvm_x86_ops->cache_regs(vcpu);
2451                 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
2452                 kvm_x86_ops->decache_regs(vcpu);
2453         }
2454
2455         r = __vcpu_run(vcpu, kvm_run);
2456
2457 out:
2458         if (vcpu->sigset_active)
2459                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2460
2461         vcpu_put(vcpu);
2462         return r;
2463 }
2464
2465 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2466 {
2467         vcpu_load(vcpu);
2468
2469         kvm_x86_ops->cache_regs(vcpu);
2470
2471         regs->rax = vcpu->regs[VCPU_REGS_RAX];
2472         regs->rbx = vcpu->regs[VCPU_REGS_RBX];
2473         regs->rcx = vcpu->regs[VCPU_REGS_RCX];
2474         regs->rdx = vcpu->regs[VCPU_REGS_RDX];
2475         regs->rsi = vcpu->regs[VCPU_REGS_RSI];
2476         regs->rdi = vcpu->regs[VCPU_REGS_RDI];
2477         regs->rsp = vcpu->regs[VCPU_REGS_RSP];
2478         regs->rbp = vcpu->regs[VCPU_REGS_RBP];
2479 #ifdef CONFIG_X86_64
2480         regs->r8 = vcpu->regs[VCPU_REGS_R8];
2481         regs->r9 = vcpu->regs[VCPU_REGS_R9];
2482         regs->r10 = vcpu->regs[VCPU_REGS_R10];
2483         regs->r11 = vcpu->regs[VCPU_REGS_R11];
2484         regs->r12 = vcpu->regs[VCPU_REGS_R12];
2485         regs->r13 = vcpu->regs[VCPU_REGS_R13];
2486         regs->r14 = vcpu->regs[VCPU_REGS_R14];
2487         regs->r15 = vcpu->regs[VCPU_REGS_R15];
2488 #endif
2489
2490         regs->rip = vcpu->rip;
2491         regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2492
2493         /*
2494          * Don't leak debug flags in case they were set for guest debugging
2495          */
2496         if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2497                 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2498
2499         vcpu_put(vcpu);
2500
2501         return 0;
2502 }
2503
2504 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2505 {
2506         vcpu_load(vcpu);
2507
2508         vcpu->regs[VCPU_REGS_RAX] = regs->rax;
2509         vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
2510         vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
2511         vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
2512         vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
2513         vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
2514         vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2515         vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
2516 #ifdef CONFIG_X86_64
2517         vcpu->regs[VCPU_REGS_R8] = regs->r8;
2518         vcpu->regs[VCPU_REGS_R9] = regs->r9;
2519         vcpu->regs[VCPU_REGS_R10] = regs->r10;
2520         vcpu->regs[VCPU_REGS_R11] = regs->r11;
2521         vcpu->regs[VCPU_REGS_R12] = regs->r12;
2522         vcpu->regs[VCPU_REGS_R13] = regs->r13;
2523         vcpu->regs[VCPU_REGS_R14] = regs->r14;
2524         vcpu->regs[VCPU_REGS_R15] = regs->r15;
2525 #endif
2526
2527         vcpu->rip = regs->rip;
2528         kvm_x86_ops->set_rflags(vcpu, regs->rflags);
2529
2530         kvm_x86_ops->decache_regs(vcpu);
2531
2532         vcpu_put(vcpu);
2533
2534         return 0;
2535 }
2536
2537 static void get_segment(struct kvm_vcpu *vcpu,
2538                         struct kvm_segment *var, int seg)
2539 {
2540         return kvm_x86_ops->get_segment(vcpu, var, seg);
2541 }
2542
2543 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2544 {
2545         struct kvm_segment cs;
2546
2547         get_segment(vcpu, &cs, VCPU_SREG_CS);
2548         *db = cs.db;
2549         *l = cs.l;
2550 }
2551 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2552
2553 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2554                                   struct kvm_sregs *sregs)
2555 {
2556         struct descriptor_table dt;
2557         int pending_vec;
2558
2559         vcpu_load(vcpu);
2560
2561         get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2562         get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2563         get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2564         get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2565         get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2566         get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2567
2568         get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2569         get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2570
2571         kvm_x86_ops->get_idt(vcpu, &dt);
2572         sregs->idt.limit = dt.limit;
2573         sregs->idt.base = dt.base;
2574         kvm_x86_ops->get_gdt(vcpu, &dt);
2575         sregs->gdt.limit = dt.limit;
2576         sregs->gdt.base = dt.base;
2577
2578         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2579         sregs->cr0 = vcpu->cr0;
2580         sregs->cr2 = vcpu->cr2;
2581         sregs->cr3 = vcpu->cr3;
2582         sregs->cr4 = vcpu->cr4;
2583         sregs->cr8 = get_cr8(vcpu);
2584         sregs->efer = vcpu->shadow_efer;
2585         sregs->apic_base = kvm_get_apic_base(vcpu);
2586
2587         if (irqchip_in_kernel(vcpu->kvm)) {
2588                 memset(sregs->interrupt_bitmap, 0,
2589                        sizeof sregs->interrupt_bitmap);
2590                 pending_vec = kvm_x86_ops->get_irq(vcpu);
2591                 if (pending_vec >= 0)
2592                         set_bit(pending_vec,
2593                                 (unsigned long *)sregs->interrupt_bitmap);
2594         } else
2595                 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2596                        sizeof sregs->interrupt_bitmap);
2597
2598         vcpu_put(vcpu);
2599
2600         return 0;
2601 }
2602
2603 static void set_segment(struct kvm_vcpu *vcpu,
2604                         struct kvm_segment *var, int seg)
2605 {
2606         return kvm_x86_ops->set_segment(vcpu, var, seg);
2607 }
2608
2609 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2610                                   struct kvm_sregs *sregs)
2611 {
2612         int mmu_reset_needed = 0;
2613         int i, pending_vec, max_bits;
2614         struct descriptor_table dt;
2615
2616         vcpu_load(vcpu);
2617
2618         dt.limit = sregs->idt.limit;
2619         dt.base = sregs->idt.base;
2620         kvm_x86_ops->set_idt(vcpu, &dt);
2621         dt.limit = sregs->gdt.limit;
2622         dt.base = sregs->gdt.base;
2623         kvm_x86_ops->set_gdt(vcpu, &dt);
2624
2625         vcpu->cr2 = sregs->cr2;
2626         mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2627         vcpu->cr3 = sregs->cr3;
2628
2629         set_cr8(vcpu, sregs->cr8);
2630
2631         mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
2632 #ifdef CONFIG_X86_64
2633         kvm_x86_ops->set_efer(vcpu, sregs->efer);
2634 #endif
2635         kvm_set_apic_base(vcpu, sregs->apic_base);
2636
2637         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2638
2639         mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
2640         vcpu->cr0 = sregs->cr0;
2641         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
2642
2643         mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2644         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2645         if (!is_long_mode(vcpu) && is_pae(vcpu))
2646                 load_pdptrs(vcpu, vcpu->cr3);
2647
2648         if (mmu_reset_needed)
2649                 kvm_mmu_reset_context(vcpu);
2650
2651         if (!irqchip_in_kernel(vcpu->kvm)) {
2652                 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2653                        sizeof vcpu->irq_pending);
2654                 vcpu->irq_summary = 0;
2655                 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
2656                         if (vcpu->irq_pending[i])
2657                                 __set_bit(i, &vcpu->irq_summary);
2658         } else {
2659                 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2660                 pending_vec = find_first_bit(
2661                         (const unsigned long *)sregs->interrupt_bitmap,
2662                         max_bits);
2663                 /* Only pending external irq is handled here */
2664                 if (pending_vec < max_bits) {
2665                         kvm_x86_ops->set_irq(vcpu, pending_vec);
2666                         pr_debug("Set back pending irq %d\n",
2667                                  pending_vec);
2668                 }
2669         }
2670
2671         set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2672         set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2673         set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2674         set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2675         set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2676         set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2677
2678         set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2679         set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2680
2681         vcpu_put(vcpu);
2682
2683         return 0;
2684 }
2685
2686 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2687                                     struct kvm_debug_guest *dbg)
2688 {
2689         int r;
2690
2691         vcpu_load(vcpu);
2692
2693         r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
2694
2695         vcpu_put(vcpu);
2696
2697         return r;
2698 }
2699
2700 /*
2701  * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
2702  * we have asm/x86/processor.h
2703  */
2704 struct fxsave {
2705         u16     cwd;
2706         u16     swd;
2707         u16     twd;
2708         u16     fop;
2709         u64     rip;
2710         u64     rdp;
2711         u32     mxcsr;
2712         u32     mxcsr_mask;
2713         u32     st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
2714 #ifdef CONFIG_X86_64
2715         u32     xmm_space[64];  /* 16*16 bytes for each XMM-reg = 256 bytes */
2716 #else
2717         u32     xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
2718 #endif
2719 };
2720
2721 /*
2722  * Translate a guest virtual address to a guest physical address.
2723  */
2724 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2725                                     struct kvm_translation *tr)
2726 {
2727         unsigned long vaddr = tr->linear_address;
2728         gpa_t gpa;
2729
2730         vcpu_load(vcpu);
2731         mutex_lock(&vcpu->kvm->lock);
2732         gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2733         tr->physical_address = gpa;
2734         tr->valid = gpa != UNMAPPED_GVA;
2735         tr->writeable = 1;
2736         tr->usermode = 0;
2737         mutex_unlock(&vcpu->kvm->lock);
2738         vcpu_put(vcpu);
2739
2740         return 0;
2741 }
2742
2743 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2744 {
2745         struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2746
2747         vcpu_load(vcpu);
2748
2749         memcpy(fpu->fpr, fxsave->st_space, 128);
2750         fpu->fcw = fxsave->cwd;
2751         fpu->fsw = fxsave->swd;
2752         fpu->ftwx = fxsave->twd;
2753         fpu->last_opcode = fxsave->fop;
2754         fpu->last_ip = fxsave->rip;
2755         fpu->last_dp = fxsave->rdp;
2756         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2757
2758         vcpu_put(vcpu);
2759
2760         return 0;
2761 }
2762
2763 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2764 {
2765         struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2766
2767         vcpu_load(vcpu);
2768
2769         memcpy(fxsave->st_space, fpu->fpr, 128);
2770         fxsave->cwd = fpu->fcw;
2771         fxsave->swd = fpu->fsw;
2772         fxsave->twd = fpu->ftwx;
2773         fxsave->fop = fpu->last_opcode;
2774         fxsave->rip = fpu->last_ip;
2775         fxsave->rdp = fpu->last_dp;
2776         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2777
2778         vcpu_put(vcpu);
2779
2780         return 0;
2781 }
2782
2783 void fx_init(struct kvm_vcpu *vcpu)
2784 {
2785         unsigned after_mxcsr_mask;
2786
2787         /* Initialize guest FPU by resetting ours and saving into guest's */
2788         preempt_disable();
2789         fx_save(&vcpu->host_fx_image);
2790         fpu_init();
2791         fx_save(&vcpu->guest_fx_image);
2792         fx_restore(&vcpu->host_fx_image);
2793         preempt_enable();
2794
2795         vcpu->cr0 |= X86_CR0_ET;
2796         after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
2797         vcpu->guest_fx_image.mxcsr = 0x1f80;
2798         memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
2799                0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
2800 }
2801 EXPORT_SYMBOL_GPL(fx_init);
2802
2803 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
2804 {
2805         if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
2806                 return;
2807
2808         vcpu->guest_fpu_loaded = 1;
2809         fx_save(&vcpu->host_fx_image);
2810         fx_restore(&vcpu->guest_fx_image);
2811 }
2812 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
2813
2814 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
2815 {
2816         if (!vcpu->guest_fpu_loaded)
2817                 return;
2818
2819         vcpu->guest_fpu_loaded = 0;
2820         fx_save(&vcpu->guest_fx_image);
2821         fx_restore(&vcpu->host_fx_image);
2822         ++vcpu->stat.fpu_reload;
2823 }
2824 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
2825
2826 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
2827 {
2828         kvm_x86_ops->vcpu_free(vcpu);
2829 }
2830
2831 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2832                                                 unsigned int id)
2833 {
2834         return kvm_x86_ops->vcpu_create(kvm, id);
2835 }
2836
2837 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2838 {
2839         int r;
2840
2841         /* We do fxsave: this must be aligned. */
2842         BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
2843
2844         vcpu_load(vcpu);
2845         r = kvm_arch_vcpu_reset(vcpu);
2846         if (r == 0)
2847                 r = kvm_mmu_setup(vcpu);
2848         vcpu_put(vcpu);
2849         if (r < 0)
2850                 goto free_vcpu;
2851
2852         return 0;
2853 free_vcpu:
2854         kvm_x86_ops->vcpu_free(vcpu);
2855         return r;
2856 }
2857
2858 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2859 {
2860         vcpu_load(vcpu);
2861         kvm_mmu_unload(vcpu);
2862         vcpu_put(vcpu);
2863
2864         kvm_x86_ops->vcpu_free(vcpu);
2865 }
2866
2867 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
2868 {
2869         return kvm_x86_ops->vcpu_reset(vcpu);
2870 }
2871
2872 void kvm_arch_hardware_enable(void *garbage)
2873 {
2874         kvm_x86_ops->hardware_enable(garbage);
2875 }
2876
2877 void kvm_arch_hardware_disable(void *garbage)
2878 {
2879         kvm_x86_ops->hardware_disable(garbage);
2880 }
2881
2882 int kvm_arch_hardware_setup(void)
2883 {
2884         return kvm_x86_ops->hardware_setup();
2885 }
2886
2887 void kvm_arch_hardware_unsetup(void)
2888 {
2889         kvm_x86_ops->hardware_unsetup();
2890 }
2891
2892 void kvm_arch_check_processor_compat(void *rtn)
2893 {
2894         kvm_x86_ops->check_processor_compatibility(rtn);
2895 }
2896
2897 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2898 {
2899         struct page *page;
2900         struct kvm *kvm;
2901         int r;
2902
2903         BUG_ON(vcpu->kvm == NULL);
2904         kvm = vcpu->kvm;
2905
2906         vcpu->mmu.root_hpa = INVALID_PAGE;
2907         if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
2908                 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2909         else
2910                 vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
2911
2912         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2913         if (!page) {
2914                 r = -ENOMEM;
2915                 goto fail;
2916         }
2917         vcpu->pio_data = page_address(page);
2918
2919         r = kvm_mmu_create(vcpu);
2920         if (r < 0)
2921                 goto fail_free_pio_data;
2922
2923         if (irqchip_in_kernel(kvm)) {
2924                 r = kvm_create_lapic(vcpu);
2925                 if (r < 0)
2926                         goto fail_mmu_destroy;
2927         }
2928
2929         return 0;
2930
2931 fail_mmu_destroy:
2932         kvm_mmu_destroy(vcpu);
2933 fail_free_pio_data:
2934         free_page((unsigned long)vcpu->pio_data);
2935 fail:
2936         return r;
2937 }
2938
2939 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
2940 {
2941         kvm_free_lapic(vcpu);
2942         kvm_mmu_destroy(vcpu);
2943         free_page((unsigned long)vcpu->pio_data);
2944 }
2945
2946 struct  kvm *kvm_arch_create_vm(void)
2947 {
2948         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
2949
2950         if (!kvm)
2951                 return ERR_PTR(-ENOMEM);
2952
2953         INIT_LIST_HEAD(&kvm->active_mmu_pages);
2954
2955         return kvm;
2956 }
2957
2958 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
2959 {
2960         vcpu_load(vcpu);
2961         kvm_mmu_unload(vcpu);
2962         vcpu_put(vcpu);
2963 }
2964
2965 static void kvm_free_vcpus(struct kvm *kvm)
2966 {
2967         unsigned int i;
2968
2969         /*
2970          * Unpin any mmu pages first.
2971          */
2972         for (i = 0; i < KVM_MAX_VCPUS; ++i)
2973                 if (kvm->vcpus[i])
2974                         kvm_unload_vcpu_mmu(kvm->vcpus[i]);
2975         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2976                 if (kvm->vcpus[i]) {
2977                         kvm_arch_vcpu_free(kvm->vcpus[i]);
2978                         kvm->vcpus[i] = NULL;
2979                 }
2980         }
2981
2982 }
2983
2984 void kvm_arch_destroy_vm(struct kvm *kvm)
2985 {
2986         kfree(kvm->vpic);
2987         kfree(kvm->vioapic);
2988         kvm_free_vcpus(kvm);
2989         kvm_free_physmem(kvm);
2990         kfree(kvm);
2991 }
2992
2993 int kvm_arch_set_memory_region(struct kvm *kvm,
2994                                 struct kvm_userspace_memory_region *mem,
2995                                 struct kvm_memory_slot old,
2996                                 int user_alloc)
2997 {
2998         int npages = mem->memory_size >> PAGE_SHIFT;
2999         struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
3000
3001         /*To keep backward compatibility with older userspace,
3002          *x86 needs to hanlde !user_alloc case.
3003          */
3004         if (!user_alloc) {
3005                 if (npages && !old.rmap) {
3006                         down_write(&current->mm->mmap_sem);
3007                         memslot->userspace_addr = do_mmap(NULL, 0,
3008                                                      npages * PAGE_SIZE,
3009                                                      PROT_READ | PROT_WRITE,
3010                                                      MAP_SHARED | MAP_ANONYMOUS,
3011                                                      0);
3012                         up_write(&current->mm->mmap_sem);
3013
3014                         if (IS_ERR((void *)memslot->userspace_addr))
3015                                 return PTR_ERR((void *)memslot->userspace_addr);
3016                 } else {
3017                         if (!old.user_alloc && old.rmap) {
3018                                 int ret;
3019
3020                                 down_write(&current->mm->mmap_sem);
3021                                 ret = do_munmap(current->mm, old.userspace_addr,
3022                                                 old.npages * PAGE_SIZE);
3023                                 up_write(&current->mm->mmap_sem);
3024                                 if (ret < 0)
3025                                         printk(KERN_WARNING
3026                                        "kvm_vm_ioctl_set_memory_region: "
3027                                        "failed to munmap memory\n");
3028                         }
3029                 }
3030         }
3031
3032         if (!kvm->n_requested_mmu_pages) {
3033                 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
3034                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
3035         }
3036
3037         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
3038         kvm_flush_remote_tlbs(kvm);
3039
3040         return 0;
3041 }