b0b6ff2a8cbec539a5cc736be05d1d764f33d902
[safe/jmp/linux-2.6] / drivers / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "kvm.h"
19 #include "x86.h"
20 #include "irq.h"
21
22 #include <linux/kvm.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/gfp.h>
27 #include <linux/mm.h>
28 #include <linux/miscdevice.h>
29 #include <linux/vmalloc.h>
30 #include <linux/reboot.h>
31 #include <linux/debugfs.h>
32 #include <linux/highmem.h>
33 #include <linux/file.h>
34 #include <linux/sysdev.h>
35 #include <linux/cpu.h>
36 #include <linux/sched.h>
37 #include <linux/cpumask.h>
38 #include <linux/smp.h>
39 #include <linux/anon_inodes.h>
40 #include <linux/profile.h>
41 #include <linux/kvm_para.h>
42 #include <linux/pagemap.h>
43 #include <linux/mman.h>
44
45 #include <asm/processor.h>
46 #include <asm/io.h>
47 #include <asm/uaccess.h>
48 #include <asm/desc.h>
49
50 MODULE_AUTHOR("Qumranet");
51 MODULE_LICENSE("GPL");
52
53 DEFINE_SPINLOCK(kvm_lock);
54 LIST_HEAD(vm_list);
55
56 static cpumask_t cpus_hardware_enabled;
57
58 struct kmem_cache *kvm_vcpu_cache;
59 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
60
61 static __read_mostly struct preempt_ops kvm_preempt_ops;
62
63 static struct dentry *debugfs_dir;
64
65 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
66                            unsigned long arg);
67
68 static inline int valid_vcpu(int n)
69 {
70         return likely(n >= 0 && n < KVM_MAX_VCPUS);
71 }
72
73 /*
74  * Switches to specified vcpu, until a matching vcpu_put()
75  */
76 void vcpu_load(struct kvm_vcpu *vcpu)
77 {
78         int cpu;
79
80         mutex_lock(&vcpu->mutex);
81         cpu = get_cpu();
82         preempt_notifier_register(&vcpu->preempt_notifier);
83         kvm_arch_vcpu_load(vcpu, cpu);
84         put_cpu();
85 }
86
87 void vcpu_put(struct kvm_vcpu *vcpu)
88 {
89         preempt_disable();
90         kvm_arch_vcpu_put(vcpu);
91         preempt_notifier_unregister(&vcpu->preempt_notifier);
92         preempt_enable();
93         mutex_unlock(&vcpu->mutex);
94 }
95
96 static void ack_flush(void *_completed)
97 {
98 }
99
100 void kvm_flush_remote_tlbs(struct kvm *kvm)
101 {
102         int i, cpu;
103         cpumask_t cpus;
104         struct kvm_vcpu *vcpu;
105
106         cpus_clear(cpus);
107         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
108                 vcpu = kvm->vcpus[i];
109                 if (!vcpu)
110                         continue;
111                 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
112                         continue;
113                 cpu = vcpu->cpu;
114                 if (cpu != -1 && cpu != raw_smp_processor_id())
115                         cpu_set(cpu, cpus);
116         }
117         smp_call_function_mask(cpus, ack_flush, NULL, 1);
118 }
119
120 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
121 {
122         struct page *page;
123         int r;
124
125         mutex_init(&vcpu->mutex);
126         vcpu->cpu = -1;
127         vcpu->kvm = kvm;
128         vcpu->vcpu_id = id;
129         init_waitqueue_head(&vcpu->wq);
130
131         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
132         if (!page) {
133                 r = -ENOMEM;
134                 goto fail;
135         }
136         vcpu->run = page_address(page);
137
138         r = kvm_arch_vcpu_init(vcpu);
139         if (r < 0)
140                 goto fail_free_run;
141         return 0;
142
143 fail_free_run:
144         free_page((unsigned long)vcpu->run);
145 fail:
146         return r;
147 }
148 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
149
150 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
151 {
152         kvm_arch_vcpu_uninit(vcpu);
153         free_page((unsigned long)vcpu->run);
154 }
155 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
156
157 static struct kvm *kvm_create_vm(void)
158 {
159         struct kvm *kvm = kvm_arch_create_vm();
160
161         if (IS_ERR(kvm))
162                 goto out;
163
164         kvm_io_bus_init(&kvm->pio_bus);
165         mutex_init(&kvm->lock);
166         kvm_io_bus_init(&kvm->mmio_bus);
167         spin_lock(&kvm_lock);
168         list_add(&kvm->vm_list, &vm_list);
169         spin_unlock(&kvm_lock);
170 out:
171         return kvm;
172 }
173
174 /*
175  * Free any memory in @free but not in @dont.
176  */
177 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
178                                   struct kvm_memory_slot *dont)
179 {
180         if (!dont || free->rmap != dont->rmap)
181                 vfree(free->rmap);
182
183         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
184                 vfree(free->dirty_bitmap);
185
186         free->npages = 0;
187         free->dirty_bitmap = NULL;
188         free->rmap = NULL;
189 }
190
191 void kvm_free_physmem(struct kvm *kvm)
192 {
193         int i;
194
195         for (i = 0; i < kvm->nmemslots; ++i)
196                 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
197 }
198
199 static void kvm_destroy_vm(struct kvm *kvm)
200 {
201         spin_lock(&kvm_lock);
202         list_del(&kvm->vm_list);
203         spin_unlock(&kvm_lock);
204         kvm_io_bus_destroy(&kvm->pio_bus);
205         kvm_io_bus_destroy(&kvm->mmio_bus);
206         kvm_arch_destroy_vm(kvm);
207 }
208
209 static int kvm_vm_release(struct inode *inode, struct file *filp)
210 {
211         struct kvm *kvm = filp->private_data;
212
213         kvm_destroy_vm(kvm);
214         return 0;
215 }
216
217 /*
218  * Allocate some memory and give it an address in the guest physical address
219  * space.
220  *
221  * Discontiguous memory is allowed, mostly for framebuffers.
222  *
223  * Must be called holding kvm->lock.
224  */
225 int __kvm_set_memory_region(struct kvm *kvm,
226                             struct kvm_userspace_memory_region *mem,
227                             int user_alloc)
228 {
229         int r;
230         gfn_t base_gfn;
231         unsigned long npages;
232         unsigned long i;
233         struct kvm_memory_slot *memslot;
234         struct kvm_memory_slot old, new;
235
236         r = -EINVAL;
237         /* General sanity checks */
238         if (mem->memory_size & (PAGE_SIZE - 1))
239                 goto out;
240         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
241                 goto out;
242         if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
243                 goto out;
244         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
245                 goto out;
246
247         memslot = &kvm->memslots[mem->slot];
248         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
249         npages = mem->memory_size >> PAGE_SHIFT;
250
251         if (!npages)
252                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
253
254         new = old = *memslot;
255
256         new.base_gfn = base_gfn;
257         new.npages = npages;
258         new.flags = mem->flags;
259
260         /* Disallow changing a memory slot's size. */
261         r = -EINVAL;
262         if (npages && old.npages && npages != old.npages)
263                 goto out_free;
264
265         /* Check for overlaps */
266         r = -EEXIST;
267         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
268                 struct kvm_memory_slot *s = &kvm->memslots[i];
269
270                 if (s == memslot)
271                         continue;
272                 if (!((base_gfn + npages <= s->base_gfn) ||
273                       (base_gfn >= s->base_gfn + s->npages)))
274                         goto out_free;
275         }
276
277         /* Free page dirty bitmap if unneeded */
278         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
279                 new.dirty_bitmap = NULL;
280
281         r = -ENOMEM;
282
283         /* Allocate if a slot is being created */
284         if (npages && !new.rmap) {
285                 new.rmap = vmalloc(npages * sizeof(struct page *));
286
287                 if (!new.rmap)
288                         goto out_free;
289
290                 memset(new.rmap, 0, npages * sizeof(*new.rmap));
291
292                 new.user_alloc = user_alloc;
293                 if (user_alloc)
294                         new.userspace_addr = mem->userspace_addr;
295                 else {
296                         down_write(&current->mm->mmap_sem);
297                         new.userspace_addr = do_mmap(NULL, 0,
298                                                      npages * PAGE_SIZE,
299                                                      PROT_READ | PROT_WRITE,
300                                                      MAP_SHARED | MAP_ANONYMOUS,
301                                                      0);
302                         up_write(&current->mm->mmap_sem);
303
304                         if (IS_ERR((void *)new.userspace_addr))
305                                 goto out_free;
306                 }
307         } else {
308                 if (!old.user_alloc && old.rmap) {
309                         int ret;
310
311                         down_write(&current->mm->mmap_sem);
312                         ret = do_munmap(current->mm, old.userspace_addr,
313                                         old.npages * PAGE_SIZE);
314                         up_write(&current->mm->mmap_sem);
315                         if (ret < 0)
316                                 printk(KERN_WARNING
317                                        "kvm_vm_ioctl_set_memory_region: "
318                                        "failed to munmap memory\n");
319                 }
320         }
321
322         /* Allocate page dirty bitmap if needed */
323         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
324                 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
325
326                 new.dirty_bitmap = vmalloc(dirty_bytes);
327                 if (!new.dirty_bitmap)
328                         goto out_free;
329                 memset(new.dirty_bitmap, 0, dirty_bytes);
330         }
331
332         if (mem->slot >= kvm->nmemslots)
333                 kvm->nmemslots = mem->slot + 1;
334
335         if (!kvm->n_requested_mmu_pages) {
336                 unsigned int n_pages;
337
338                 if (npages) {
339                         n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
340                         kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
341                                                  n_pages);
342                 } else {
343                         unsigned int nr_mmu_pages;
344
345                         n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
346                         nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
347                         nr_mmu_pages = max(nr_mmu_pages,
348                                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
349                         kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
350                 }
351         }
352
353         *memslot = new;
354
355         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
356         kvm_flush_remote_tlbs(kvm);
357
358         kvm_free_physmem_slot(&old, &new);
359         return 0;
360
361 out_free:
362         kvm_free_physmem_slot(&new, &old);
363 out:
364         return r;
365
366 }
367 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
368
369 int kvm_set_memory_region(struct kvm *kvm,
370                           struct kvm_userspace_memory_region *mem,
371                           int user_alloc)
372 {
373         int r;
374
375         mutex_lock(&kvm->lock);
376         r = __kvm_set_memory_region(kvm, mem, user_alloc);
377         mutex_unlock(&kvm->lock);
378         return r;
379 }
380 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
381
382 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
383                                    struct
384                                    kvm_userspace_memory_region *mem,
385                                    int user_alloc)
386 {
387         if (mem->slot >= KVM_MEMORY_SLOTS)
388                 return -EINVAL;
389         return kvm_set_memory_region(kvm, mem, user_alloc);
390 }
391
392 /*
393  * Get (and clear) the dirty memory log for a memory slot.
394  */
395 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
396                                       struct kvm_dirty_log *log)
397 {
398         struct kvm_memory_slot *memslot;
399         int r, i;
400         int n;
401         unsigned long any = 0;
402
403         mutex_lock(&kvm->lock);
404
405         r = -EINVAL;
406         if (log->slot >= KVM_MEMORY_SLOTS)
407                 goto out;
408
409         memslot = &kvm->memslots[log->slot];
410         r = -ENOENT;
411         if (!memslot->dirty_bitmap)
412                 goto out;
413
414         n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
415
416         for (i = 0; !any && i < n/sizeof(long); ++i)
417                 any = memslot->dirty_bitmap[i];
418
419         r = -EFAULT;
420         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
421                 goto out;
422
423         /* If nothing is dirty, don't bother messing with page tables. */
424         if (any) {
425                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
426                 kvm_flush_remote_tlbs(kvm);
427                 memset(memslot->dirty_bitmap, 0, n);
428         }
429
430         r = 0;
431
432 out:
433         mutex_unlock(&kvm->lock);
434         return r;
435 }
436
437 int is_error_page(struct page *page)
438 {
439         return page == bad_page;
440 }
441 EXPORT_SYMBOL_GPL(is_error_page);
442
443 static inline unsigned long bad_hva(void)
444 {
445         return PAGE_OFFSET;
446 }
447
448 int kvm_is_error_hva(unsigned long addr)
449 {
450         return addr == bad_hva();
451 }
452 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
453
454 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
455 {
456         int i;
457         struct kvm_mem_alias *alias;
458
459         for (i = 0; i < kvm->naliases; ++i) {
460                 alias = &kvm->aliases[i];
461                 if (gfn >= alias->base_gfn
462                     && gfn < alias->base_gfn + alias->npages)
463                         return alias->target_gfn + gfn - alias->base_gfn;
464         }
465         return gfn;
466 }
467
468 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
469 {
470         int i;
471
472         for (i = 0; i < kvm->nmemslots; ++i) {
473                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
474
475                 if (gfn >= memslot->base_gfn
476                     && gfn < memslot->base_gfn + memslot->npages)
477                         return memslot;
478         }
479         return NULL;
480 }
481
482 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
483 {
484         gfn = unalias_gfn(kvm, gfn);
485         return __gfn_to_memslot(kvm, gfn);
486 }
487
488 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
489 {
490         int i;
491
492         gfn = unalias_gfn(kvm, gfn);
493         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
494                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
495
496                 if (gfn >= memslot->base_gfn
497                     && gfn < memslot->base_gfn + memslot->npages)
498                         return 1;
499         }
500         return 0;
501 }
502 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
503
504 static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
505 {
506         struct kvm_memory_slot *slot;
507
508         gfn = unalias_gfn(kvm, gfn);
509         slot = __gfn_to_memslot(kvm, gfn);
510         if (!slot)
511                 return bad_hva();
512         return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
513 }
514
515 /*
516  * Requires current->mm->mmap_sem to be held
517  */
518 static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
519 {
520         struct page *page[1];
521         unsigned long addr;
522         int npages;
523
524         might_sleep();
525
526         addr = gfn_to_hva(kvm, gfn);
527         if (kvm_is_error_hva(addr)) {
528                 get_page(bad_page);
529                 return bad_page;
530         }
531
532         npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
533                                 NULL);
534
535         if (npages != 1) {
536                 get_page(bad_page);
537                 return bad_page;
538         }
539
540         return page[0];
541 }
542
543 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
544 {
545         struct page *page;
546
547         down_read(&current->mm->mmap_sem);
548         page = __gfn_to_page(kvm, gfn);
549         up_read(&current->mm->mmap_sem);
550
551         return page;
552 }
553
554 EXPORT_SYMBOL_GPL(gfn_to_page);
555
556 void kvm_release_page(struct page *page)
557 {
558         if (!PageReserved(page))
559                 SetPageDirty(page);
560         put_page(page);
561 }
562 EXPORT_SYMBOL_GPL(kvm_release_page);
563
564 static int next_segment(unsigned long len, int offset)
565 {
566         if (len > PAGE_SIZE - offset)
567                 return PAGE_SIZE - offset;
568         else
569                 return len;
570 }
571
572 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
573                         int len)
574 {
575         int r;
576         unsigned long addr;
577
578         addr = gfn_to_hva(kvm, gfn);
579         if (kvm_is_error_hva(addr))
580                 return -EFAULT;
581         r = copy_from_user(data, (void __user *)addr + offset, len);
582         if (r)
583                 return -EFAULT;
584         return 0;
585 }
586 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
587
588 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
589 {
590         gfn_t gfn = gpa >> PAGE_SHIFT;
591         int seg;
592         int offset = offset_in_page(gpa);
593         int ret;
594
595         while ((seg = next_segment(len, offset)) != 0) {
596                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
597                 if (ret < 0)
598                         return ret;
599                 offset = 0;
600                 len -= seg;
601                 data += seg;
602                 ++gfn;
603         }
604         return 0;
605 }
606 EXPORT_SYMBOL_GPL(kvm_read_guest);
607
608 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
609                          int offset, int len)
610 {
611         int r;
612         unsigned long addr;
613
614         addr = gfn_to_hva(kvm, gfn);
615         if (kvm_is_error_hva(addr))
616                 return -EFAULT;
617         r = copy_to_user((void __user *)addr + offset, data, len);
618         if (r)
619                 return -EFAULT;
620         mark_page_dirty(kvm, gfn);
621         return 0;
622 }
623 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
624
625 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
626                     unsigned long len)
627 {
628         gfn_t gfn = gpa >> PAGE_SHIFT;
629         int seg;
630         int offset = offset_in_page(gpa);
631         int ret;
632
633         while ((seg = next_segment(len, offset)) != 0) {
634                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
635                 if (ret < 0)
636                         return ret;
637                 offset = 0;
638                 len -= seg;
639                 data += seg;
640                 ++gfn;
641         }
642         return 0;
643 }
644
645 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
646 {
647         void *page_virt;
648         struct page *page;
649
650         page = gfn_to_page(kvm, gfn);
651         if (is_error_page(page)) {
652                 kvm_release_page(page);
653                 return -EFAULT;
654         }
655         page_virt = kmap_atomic(page, KM_USER0);
656
657         memset(page_virt + offset, 0, len);
658
659         kunmap_atomic(page_virt, KM_USER0);
660         kvm_release_page(page);
661         mark_page_dirty(kvm, gfn);
662         return 0;
663 }
664 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
665
666 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
667 {
668         gfn_t gfn = gpa >> PAGE_SHIFT;
669         int seg;
670         int offset = offset_in_page(gpa);
671         int ret;
672
673         while ((seg = next_segment(len, offset)) != 0) {
674                 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
675                 if (ret < 0)
676                         return ret;
677                 offset = 0;
678                 len -= seg;
679                 ++gfn;
680         }
681         return 0;
682 }
683 EXPORT_SYMBOL_GPL(kvm_clear_guest);
684
685 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
686 {
687         struct kvm_memory_slot *memslot;
688
689         gfn = unalias_gfn(kvm, gfn);
690         memslot = __gfn_to_memslot(kvm, gfn);
691         if (memslot && memslot->dirty_bitmap) {
692                 unsigned long rel_gfn = gfn - memslot->base_gfn;
693
694                 /* avoid RMW */
695                 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
696                         set_bit(rel_gfn, memslot->dirty_bitmap);
697         }
698 }
699
700 /*
701  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
702  */
703 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
704 {
705         DECLARE_WAITQUEUE(wait, current);
706
707         add_wait_queue(&vcpu->wq, &wait);
708
709         /*
710          * We will block until either an interrupt or a signal wakes us up
711          */
712         while (!kvm_cpu_has_interrupt(vcpu)
713                && !signal_pending(current)
714                && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
715                && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
716                 set_current_state(TASK_INTERRUPTIBLE);
717                 vcpu_put(vcpu);
718                 schedule();
719                 vcpu_load(vcpu);
720         }
721
722         __set_current_state(TASK_RUNNING);
723         remove_wait_queue(&vcpu->wq, &wait);
724 }
725
726 void kvm_resched(struct kvm_vcpu *vcpu)
727 {
728         if (!need_resched())
729                 return;
730         cond_resched();
731 }
732 EXPORT_SYMBOL_GPL(kvm_resched);
733
734 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
735                                     struct kvm_interrupt *irq)
736 {
737         if (irq->irq < 0 || irq->irq >= 256)
738                 return -EINVAL;
739         if (irqchip_in_kernel(vcpu->kvm))
740                 return -ENXIO;
741         vcpu_load(vcpu);
742
743         set_bit(irq->irq, vcpu->irq_pending);
744         set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
745
746         vcpu_put(vcpu);
747
748         return 0;
749 }
750
751 static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
752                                     unsigned long address,
753                                     int *type)
754 {
755         struct kvm_vcpu *vcpu = vma->vm_file->private_data;
756         unsigned long pgoff;
757         struct page *page;
758
759         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
760         if (pgoff == 0)
761                 page = virt_to_page(vcpu->run);
762         else if (pgoff == KVM_PIO_PAGE_OFFSET)
763                 page = virt_to_page(vcpu->pio_data);
764         else
765                 return NOPAGE_SIGBUS;
766         get_page(page);
767         if (type != NULL)
768                 *type = VM_FAULT_MINOR;
769
770         return page;
771 }
772
773 static struct vm_operations_struct kvm_vcpu_vm_ops = {
774         .nopage = kvm_vcpu_nopage,
775 };
776
777 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
778 {
779         vma->vm_ops = &kvm_vcpu_vm_ops;
780         return 0;
781 }
782
783 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
784 {
785         struct kvm_vcpu *vcpu = filp->private_data;
786
787         fput(vcpu->kvm->filp);
788         return 0;
789 }
790
791 static struct file_operations kvm_vcpu_fops = {
792         .release        = kvm_vcpu_release,
793         .unlocked_ioctl = kvm_vcpu_ioctl,
794         .compat_ioctl   = kvm_vcpu_ioctl,
795         .mmap           = kvm_vcpu_mmap,
796 };
797
798 /*
799  * Allocates an inode for the vcpu.
800  */
801 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
802 {
803         int fd, r;
804         struct inode *inode;
805         struct file *file;
806
807         r = anon_inode_getfd(&fd, &inode, &file,
808                              "kvm-vcpu", &kvm_vcpu_fops, vcpu);
809         if (r)
810                 return r;
811         atomic_inc(&vcpu->kvm->filp->f_count);
812         return fd;
813 }
814
815 /*
816  * Creates some virtual cpus.  Good luck creating more than one.
817  */
818 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
819 {
820         int r;
821         struct kvm_vcpu *vcpu;
822
823         if (!valid_vcpu(n))
824                 return -EINVAL;
825
826         vcpu = kvm_arch_vcpu_create(kvm, n);
827         if (IS_ERR(vcpu))
828                 return PTR_ERR(vcpu);
829
830         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
831
832         mutex_lock(&kvm->lock);
833         if (kvm->vcpus[n]) {
834                 r = -EEXIST;
835                 mutex_unlock(&kvm->lock);
836                 goto vcpu_destroy;
837         }
838         kvm->vcpus[n] = vcpu;
839         mutex_unlock(&kvm->lock);
840
841         /* Now it's all set up, let userspace reach it */
842         r = create_vcpu_fd(vcpu);
843         if (r < 0)
844                 goto unlink;
845         return r;
846
847 unlink:
848         mutex_lock(&kvm->lock);
849         kvm->vcpus[n] = NULL;
850         mutex_unlock(&kvm->lock);
851 vcpu_destroy:
852         kvm_arch_vcpu_destory(vcpu);
853         return r;
854 }
855
856 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
857 {
858         if (sigset) {
859                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
860                 vcpu->sigset_active = 1;
861                 vcpu->sigset = *sigset;
862         } else
863                 vcpu->sigset_active = 0;
864         return 0;
865 }
866
867 static long kvm_vcpu_ioctl(struct file *filp,
868                            unsigned int ioctl, unsigned long arg)
869 {
870         struct kvm_vcpu *vcpu = filp->private_data;
871         void __user *argp = (void __user *)arg;
872         int r;
873
874         switch (ioctl) {
875         case KVM_RUN:
876                 r = -EINVAL;
877                 if (arg)
878                         goto out;
879                 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
880                 break;
881         case KVM_GET_REGS: {
882                 struct kvm_regs kvm_regs;
883
884                 memset(&kvm_regs, 0, sizeof kvm_regs);
885                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
886                 if (r)
887                         goto out;
888                 r = -EFAULT;
889                 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
890                         goto out;
891                 r = 0;
892                 break;
893         }
894         case KVM_SET_REGS: {
895                 struct kvm_regs kvm_regs;
896
897                 r = -EFAULT;
898                 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
899                         goto out;
900                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
901                 if (r)
902                         goto out;
903                 r = 0;
904                 break;
905         }
906         case KVM_GET_SREGS: {
907                 struct kvm_sregs kvm_sregs;
908
909                 memset(&kvm_sregs, 0, sizeof kvm_sregs);
910                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
911                 if (r)
912                         goto out;
913                 r = -EFAULT;
914                 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
915                         goto out;
916                 r = 0;
917                 break;
918         }
919         case KVM_SET_SREGS: {
920                 struct kvm_sregs kvm_sregs;
921
922                 r = -EFAULT;
923                 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
924                         goto out;
925                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
926                 if (r)
927                         goto out;
928                 r = 0;
929                 break;
930         }
931         case KVM_TRANSLATE: {
932                 struct kvm_translation tr;
933
934                 r = -EFAULT;
935                 if (copy_from_user(&tr, argp, sizeof tr))
936                         goto out;
937                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
938                 if (r)
939                         goto out;
940                 r = -EFAULT;
941                 if (copy_to_user(argp, &tr, sizeof tr))
942                         goto out;
943                 r = 0;
944                 break;
945         }
946         case KVM_INTERRUPT: {
947                 struct kvm_interrupt irq;
948
949                 r = -EFAULT;
950                 if (copy_from_user(&irq, argp, sizeof irq))
951                         goto out;
952                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
953                 if (r)
954                         goto out;
955                 r = 0;
956                 break;
957         }
958         case KVM_DEBUG_GUEST: {
959                 struct kvm_debug_guest dbg;
960
961                 r = -EFAULT;
962                 if (copy_from_user(&dbg, argp, sizeof dbg))
963                         goto out;
964                 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
965                 if (r)
966                         goto out;
967                 r = 0;
968                 break;
969         }
970         case KVM_SET_SIGNAL_MASK: {
971                 struct kvm_signal_mask __user *sigmask_arg = argp;
972                 struct kvm_signal_mask kvm_sigmask;
973                 sigset_t sigset, *p;
974
975                 p = NULL;
976                 if (argp) {
977                         r = -EFAULT;
978                         if (copy_from_user(&kvm_sigmask, argp,
979                                            sizeof kvm_sigmask))
980                                 goto out;
981                         r = -EINVAL;
982                         if (kvm_sigmask.len != sizeof sigset)
983                                 goto out;
984                         r = -EFAULT;
985                         if (copy_from_user(&sigset, sigmask_arg->sigset,
986                                            sizeof sigset))
987                                 goto out;
988                         p = &sigset;
989                 }
990                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
991                 break;
992         }
993         case KVM_GET_FPU: {
994                 struct kvm_fpu fpu;
995
996                 memset(&fpu, 0, sizeof fpu);
997                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
998                 if (r)
999                         goto out;
1000                 r = -EFAULT;
1001                 if (copy_to_user(argp, &fpu, sizeof fpu))
1002                         goto out;
1003                 r = 0;
1004                 break;
1005         }
1006         case KVM_SET_FPU: {
1007                 struct kvm_fpu fpu;
1008
1009                 r = -EFAULT;
1010                 if (copy_from_user(&fpu, argp, sizeof fpu))
1011                         goto out;
1012                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
1013                 if (r)
1014                         goto out;
1015                 r = 0;
1016                 break;
1017         }
1018         default:
1019                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1020         }
1021 out:
1022         return r;
1023 }
1024
1025 static long kvm_vm_ioctl(struct file *filp,
1026                            unsigned int ioctl, unsigned long arg)
1027 {
1028         struct kvm *kvm = filp->private_data;
1029         void __user *argp = (void __user *)arg;
1030         int r;
1031
1032         switch (ioctl) {
1033         case KVM_CREATE_VCPU:
1034                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1035                 if (r < 0)
1036                         goto out;
1037                 break;
1038         case KVM_SET_USER_MEMORY_REGION: {
1039                 struct kvm_userspace_memory_region kvm_userspace_mem;
1040
1041                 r = -EFAULT;
1042                 if (copy_from_user(&kvm_userspace_mem, argp,
1043                                                 sizeof kvm_userspace_mem))
1044                         goto out;
1045
1046                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
1047                 if (r)
1048                         goto out;
1049                 break;
1050         }
1051         case KVM_GET_DIRTY_LOG: {
1052                 struct kvm_dirty_log log;
1053
1054                 r = -EFAULT;
1055                 if (copy_from_user(&log, argp, sizeof log))
1056                         goto out;
1057                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1058                 if (r)
1059                         goto out;
1060                 break;
1061         }
1062         default:
1063                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1064         }
1065 out:
1066         return r;
1067 }
1068
1069 static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
1070                                   unsigned long address,
1071                                   int *type)
1072 {
1073         struct kvm *kvm = vma->vm_file->private_data;
1074         unsigned long pgoff;
1075         struct page *page;
1076
1077         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1078         if (!kvm_is_visible_gfn(kvm, pgoff))
1079                 return NOPAGE_SIGBUS;
1080         /* current->mm->mmap_sem is already held so call lockless version */
1081         page = __gfn_to_page(kvm, pgoff);
1082         if (is_error_page(page)) {
1083                 kvm_release_page(page);
1084                 return NOPAGE_SIGBUS;
1085         }
1086         if (type != NULL)
1087                 *type = VM_FAULT_MINOR;
1088
1089         return page;
1090 }
1091
1092 static struct vm_operations_struct kvm_vm_vm_ops = {
1093         .nopage = kvm_vm_nopage,
1094 };
1095
1096 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1097 {
1098         vma->vm_ops = &kvm_vm_vm_ops;
1099         return 0;
1100 }
1101
1102 static struct file_operations kvm_vm_fops = {
1103         .release        = kvm_vm_release,
1104         .unlocked_ioctl = kvm_vm_ioctl,
1105         .compat_ioctl   = kvm_vm_ioctl,
1106         .mmap           = kvm_vm_mmap,
1107 };
1108
1109 static int kvm_dev_ioctl_create_vm(void)
1110 {
1111         int fd, r;
1112         struct inode *inode;
1113         struct file *file;
1114         struct kvm *kvm;
1115
1116         kvm = kvm_create_vm();
1117         if (IS_ERR(kvm))
1118                 return PTR_ERR(kvm);
1119         r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1120         if (r) {
1121                 kvm_destroy_vm(kvm);
1122                 return r;
1123         }
1124
1125         kvm->filp = file;
1126
1127         return fd;
1128 }
1129
1130 static long kvm_dev_ioctl(struct file *filp,
1131                           unsigned int ioctl, unsigned long arg)
1132 {
1133         void __user *argp = (void __user *)arg;
1134         long r = -EINVAL;
1135
1136         switch (ioctl) {
1137         case KVM_GET_API_VERSION:
1138                 r = -EINVAL;
1139                 if (arg)
1140                         goto out;
1141                 r = KVM_API_VERSION;
1142                 break;
1143         case KVM_CREATE_VM:
1144                 r = -EINVAL;
1145                 if (arg)
1146                         goto out;
1147                 r = kvm_dev_ioctl_create_vm();
1148                 break;
1149         case KVM_CHECK_EXTENSION:
1150                 r = kvm_dev_ioctl_check_extension((long)argp);
1151                 break;
1152         case KVM_GET_VCPU_MMAP_SIZE:
1153                 r = -EINVAL;
1154                 if (arg)
1155                         goto out;
1156                 r = 2 * PAGE_SIZE;
1157                 break;
1158         default:
1159                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
1160         }
1161 out:
1162         return r;
1163 }
1164
1165 static struct file_operations kvm_chardev_ops = {
1166         .unlocked_ioctl = kvm_dev_ioctl,
1167         .compat_ioctl   = kvm_dev_ioctl,
1168 };
1169
1170 static struct miscdevice kvm_dev = {
1171         KVM_MINOR,
1172         "kvm",
1173         &kvm_chardev_ops,
1174 };
1175
1176 static void hardware_enable(void *junk)
1177 {
1178         int cpu = raw_smp_processor_id();
1179
1180         if (cpu_isset(cpu, cpus_hardware_enabled))
1181                 return;
1182         cpu_set(cpu, cpus_hardware_enabled);
1183         kvm_arch_hardware_enable(NULL);
1184 }
1185
1186 static void hardware_disable(void *junk)
1187 {
1188         int cpu = raw_smp_processor_id();
1189
1190         if (!cpu_isset(cpu, cpus_hardware_enabled))
1191                 return;
1192         cpu_clear(cpu, cpus_hardware_enabled);
1193         decache_vcpus_on_cpu(cpu);
1194         kvm_arch_hardware_disable(NULL);
1195 }
1196
1197 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1198                            void *v)
1199 {
1200         int cpu = (long)v;
1201
1202         val &= ~CPU_TASKS_FROZEN;
1203         switch (val) {
1204         case CPU_DYING:
1205                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1206                        cpu);
1207                 hardware_disable(NULL);
1208                 break;
1209         case CPU_UP_CANCELED:
1210                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1211                        cpu);
1212                 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
1213                 break;
1214         case CPU_ONLINE:
1215                 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1216                        cpu);
1217                 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
1218                 break;
1219         }
1220         return NOTIFY_OK;
1221 }
1222
1223 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
1224                       void *v)
1225 {
1226         if (val == SYS_RESTART) {
1227                 /*
1228                  * Some (well, at least mine) BIOSes hang on reboot if
1229                  * in vmx root mode.
1230                  */
1231                 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1232                 on_each_cpu(hardware_disable, NULL, 0, 1);
1233         }
1234         return NOTIFY_OK;
1235 }
1236
1237 static struct notifier_block kvm_reboot_notifier = {
1238         .notifier_call = kvm_reboot,
1239         .priority = 0,
1240 };
1241
1242 void kvm_io_bus_init(struct kvm_io_bus *bus)
1243 {
1244         memset(bus, 0, sizeof(*bus));
1245 }
1246
1247 void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1248 {
1249         int i;
1250
1251         for (i = 0; i < bus->dev_count; i++) {
1252                 struct kvm_io_device *pos = bus->devs[i];
1253
1254                 kvm_iodevice_destructor(pos);
1255         }
1256 }
1257
1258 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
1259 {
1260         int i;
1261
1262         for (i = 0; i < bus->dev_count; i++) {
1263                 struct kvm_io_device *pos = bus->devs[i];
1264
1265                 if (pos->in_range(pos, addr))
1266                         return pos;
1267         }
1268
1269         return NULL;
1270 }
1271
1272 void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1273 {
1274         BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1275
1276         bus->devs[bus->dev_count++] = dev;
1277 }
1278
1279 static struct notifier_block kvm_cpu_notifier = {
1280         .notifier_call = kvm_cpu_hotplug,
1281         .priority = 20, /* must be > scheduler priority */
1282 };
1283
1284 static u64 vm_stat_get(void *_offset)
1285 {
1286         unsigned offset = (long)_offset;
1287         u64 total = 0;
1288         struct kvm *kvm;
1289
1290         spin_lock(&kvm_lock);
1291         list_for_each_entry(kvm, &vm_list, vm_list)
1292                 total += *(u32 *)((void *)kvm + offset);
1293         spin_unlock(&kvm_lock);
1294         return total;
1295 }
1296
1297 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1298
1299 static u64 vcpu_stat_get(void *_offset)
1300 {
1301         unsigned offset = (long)_offset;
1302         u64 total = 0;
1303         struct kvm *kvm;
1304         struct kvm_vcpu *vcpu;
1305         int i;
1306
1307         spin_lock(&kvm_lock);
1308         list_for_each_entry(kvm, &vm_list, vm_list)
1309                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
1310                         vcpu = kvm->vcpus[i];
1311                         if (vcpu)
1312                                 total += *(u32 *)((void *)vcpu + offset);
1313                 }
1314         spin_unlock(&kvm_lock);
1315         return total;
1316 }
1317
1318 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1319
1320 static struct file_operations *stat_fops[] = {
1321         [KVM_STAT_VCPU] = &vcpu_stat_fops,
1322         [KVM_STAT_VM]   = &vm_stat_fops,
1323 };
1324
1325 static void kvm_init_debug(void)
1326 {
1327         struct kvm_stats_debugfs_item *p;
1328
1329         debugfs_dir = debugfs_create_dir("kvm", NULL);
1330         for (p = debugfs_entries; p->name; ++p)
1331                 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
1332                                                 (void *)(long)p->offset,
1333                                                 stat_fops[p->kind]);
1334 }
1335
1336 static void kvm_exit_debug(void)
1337 {
1338         struct kvm_stats_debugfs_item *p;
1339
1340         for (p = debugfs_entries; p->name; ++p)
1341                 debugfs_remove(p->dentry);
1342         debugfs_remove(debugfs_dir);
1343 }
1344
1345 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1346 {
1347         hardware_disable(NULL);
1348         return 0;
1349 }
1350
1351 static int kvm_resume(struct sys_device *dev)
1352 {
1353         hardware_enable(NULL);
1354         return 0;
1355 }
1356
1357 static struct sysdev_class kvm_sysdev_class = {
1358         .name = "kvm",
1359         .suspend = kvm_suspend,
1360         .resume = kvm_resume,
1361 };
1362
1363 static struct sys_device kvm_sysdev = {
1364         .id = 0,
1365         .cls = &kvm_sysdev_class,
1366 };
1367
1368 struct page *bad_page;
1369
1370 static inline
1371 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1372 {
1373         return container_of(pn, struct kvm_vcpu, preempt_notifier);
1374 }
1375
1376 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1377 {
1378         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1379
1380         kvm_arch_vcpu_load(vcpu, cpu);
1381 }
1382
1383 static void kvm_sched_out(struct preempt_notifier *pn,
1384                           struct task_struct *next)
1385 {
1386         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1387
1388         kvm_arch_vcpu_put(vcpu);
1389 }
1390
1391 int kvm_init(void *opaque, unsigned int vcpu_size,
1392                   struct module *module)
1393 {
1394         int r;
1395         int cpu;
1396
1397         r = kvm_mmu_module_init();
1398         if (r)
1399                 goto out4;
1400
1401         kvm_init_debug();
1402
1403         r = kvm_arch_init(opaque);
1404         if (r)
1405                 goto out4;
1406
1407         bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1408
1409         if (bad_page == NULL) {
1410                 r = -ENOMEM;
1411                 goto out;
1412         }
1413
1414         r = kvm_arch_hardware_setup();
1415         if (r < 0)
1416                 goto out;
1417
1418         for_each_online_cpu(cpu) {
1419                 smp_call_function_single(cpu,
1420                                 kvm_arch_check_processor_compat,
1421                                 &r, 0, 1);
1422                 if (r < 0)
1423                         goto out_free_0;
1424         }
1425
1426         on_each_cpu(hardware_enable, NULL, 0, 1);
1427         r = register_cpu_notifier(&kvm_cpu_notifier);
1428         if (r)
1429                 goto out_free_1;
1430         register_reboot_notifier(&kvm_reboot_notifier);
1431
1432         r = sysdev_class_register(&kvm_sysdev_class);
1433         if (r)
1434                 goto out_free_2;
1435
1436         r = sysdev_register(&kvm_sysdev);
1437         if (r)
1438                 goto out_free_3;
1439
1440         /* A kmem cache lets us meet the alignment requirements of fx_save. */
1441         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
1442                                            __alignof__(struct kvm_vcpu),
1443                                            0, NULL);
1444         if (!kvm_vcpu_cache) {
1445                 r = -ENOMEM;
1446                 goto out_free_4;
1447         }
1448
1449         kvm_chardev_ops.owner = module;
1450
1451         r = misc_register(&kvm_dev);
1452         if (r) {
1453                 printk(KERN_ERR "kvm: misc device register failed\n");
1454                 goto out_free;
1455         }
1456
1457         kvm_preempt_ops.sched_in = kvm_sched_in;
1458         kvm_preempt_ops.sched_out = kvm_sched_out;
1459
1460         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
1461
1462         return 0;
1463
1464 out_free:
1465         kmem_cache_destroy(kvm_vcpu_cache);
1466 out_free_4:
1467         sysdev_unregister(&kvm_sysdev);
1468 out_free_3:
1469         sysdev_class_unregister(&kvm_sysdev_class);
1470 out_free_2:
1471         unregister_reboot_notifier(&kvm_reboot_notifier);
1472         unregister_cpu_notifier(&kvm_cpu_notifier);
1473 out_free_1:
1474         on_each_cpu(hardware_disable, NULL, 0, 1);
1475 out_free_0:
1476         kvm_arch_hardware_unsetup();
1477 out:
1478         kvm_arch_exit();
1479         kvm_exit_debug();
1480         kvm_mmu_module_exit();
1481 out4:
1482         return r;
1483 }
1484 EXPORT_SYMBOL_GPL(kvm_init);
1485
1486 void kvm_exit(void)
1487 {
1488         misc_deregister(&kvm_dev);
1489         kmem_cache_destroy(kvm_vcpu_cache);
1490         sysdev_unregister(&kvm_sysdev);
1491         sysdev_class_unregister(&kvm_sysdev_class);
1492         unregister_reboot_notifier(&kvm_reboot_notifier);
1493         unregister_cpu_notifier(&kvm_cpu_notifier);
1494         on_each_cpu(hardware_disable, NULL, 0, 1);
1495         kvm_arch_hardware_unsetup();
1496         kvm_arch_exit();
1497         kvm_exit_debug();
1498         __free_page(bad_page);
1499         kvm_mmu_module_exit();
1500 }
1501 EXPORT_SYMBOL_GPL(kvm_exit);