KVM: Move assigned device code to own file
[safe/jmp/linux-2.6] / virt / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "iodev.h"
19
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/bitops.h>
45 #include <linux/spinlock.h>
46
47 #include <asm/processor.h>
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 #include <asm/pgtable.h>
51
52 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
53 #include "coalesced_mmio.h"
54 #endif
55
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/kvm.h>
58
59 MODULE_AUTHOR("Qumranet");
60 MODULE_LICENSE("GPL");
61
62 /*
63  * Ordering of locks:
64  *
65  *              kvm->slots_lock --> kvm->lock --> kvm->irq_lock
66  */
67
68 DEFINE_SPINLOCK(kvm_lock);
69 LIST_HEAD(vm_list);
70
71 static cpumask_var_t cpus_hardware_enabled;
72
73 struct kmem_cache *kvm_vcpu_cache;
74 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
75
76 static __read_mostly struct preempt_ops kvm_preempt_ops;
77
78 struct dentry *kvm_debugfs_dir;
79
80 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
81                            unsigned long arg);
82
83 static bool kvm_rebooting;
84
85 static bool largepages_enabled = true;
86
87 inline int kvm_is_mmio_pfn(pfn_t pfn)
88 {
89         if (pfn_valid(pfn)) {
90                 struct page *page = compound_head(pfn_to_page(pfn));
91                 return PageReserved(page);
92         }
93
94         return true;
95 }
96
97 /*
98  * Switches to specified vcpu, until a matching vcpu_put()
99  */
100 void vcpu_load(struct kvm_vcpu *vcpu)
101 {
102         int cpu;
103
104         mutex_lock(&vcpu->mutex);
105         cpu = get_cpu();
106         preempt_notifier_register(&vcpu->preempt_notifier);
107         kvm_arch_vcpu_load(vcpu, cpu);
108         put_cpu();
109 }
110
111 void vcpu_put(struct kvm_vcpu *vcpu)
112 {
113         preempt_disable();
114         kvm_arch_vcpu_put(vcpu);
115         preempt_notifier_unregister(&vcpu->preempt_notifier);
116         preempt_enable();
117         mutex_unlock(&vcpu->mutex);
118 }
119
120 static void ack_flush(void *_completed)
121 {
122 }
123
124 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
125 {
126         int i, cpu, me;
127         cpumask_var_t cpus;
128         bool called = true;
129         struct kvm_vcpu *vcpu;
130
131         zalloc_cpumask_var(&cpus, GFP_ATOMIC);
132
133         spin_lock(&kvm->requests_lock);
134         me = smp_processor_id();
135         kvm_for_each_vcpu(i, vcpu, kvm) {
136                 if (test_and_set_bit(req, &vcpu->requests))
137                         continue;
138                 cpu = vcpu->cpu;
139                 if (cpus != NULL && cpu != -1 && cpu != me)
140                         cpumask_set_cpu(cpu, cpus);
141         }
142         if (unlikely(cpus == NULL))
143                 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
144         else if (!cpumask_empty(cpus))
145                 smp_call_function_many(cpus, ack_flush, NULL, 1);
146         else
147                 called = false;
148         spin_unlock(&kvm->requests_lock);
149         free_cpumask_var(cpus);
150         return called;
151 }
152
153 void kvm_flush_remote_tlbs(struct kvm *kvm)
154 {
155         if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
156                 ++kvm->stat.remote_tlb_flush;
157 }
158
159 void kvm_reload_remote_mmus(struct kvm *kvm)
160 {
161         make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
162 }
163
164 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
165 {
166         struct page *page;
167         int r;
168
169         mutex_init(&vcpu->mutex);
170         vcpu->cpu = -1;
171         vcpu->kvm = kvm;
172         vcpu->vcpu_id = id;
173         init_waitqueue_head(&vcpu->wq);
174
175         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
176         if (!page) {
177                 r = -ENOMEM;
178                 goto fail;
179         }
180         vcpu->run = page_address(page);
181
182         r = kvm_arch_vcpu_init(vcpu);
183         if (r < 0)
184                 goto fail_free_run;
185         return 0;
186
187 fail_free_run:
188         free_page((unsigned long)vcpu->run);
189 fail:
190         return r;
191 }
192 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
193
194 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
195 {
196         kvm_arch_vcpu_uninit(vcpu);
197         free_page((unsigned long)vcpu->run);
198 }
199 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
200
201 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
202 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
203 {
204         return container_of(mn, struct kvm, mmu_notifier);
205 }
206
207 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
208                                              struct mm_struct *mm,
209                                              unsigned long address)
210 {
211         struct kvm *kvm = mmu_notifier_to_kvm(mn);
212         int need_tlb_flush;
213
214         /*
215          * When ->invalidate_page runs, the linux pte has been zapped
216          * already but the page is still allocated until
217          * ->invalidate_page returns. So if we increase the sequence
218          * here the kvm page fault will notice if the spte can't be
219          * established because the page is going to be freed. If
220          * instead the kvm page fault establishes the spte before
221          * ->invalidate_page runs, kvm_unmap_hva will release it
222          * before returning.
223          *
224          * The sequence increase only need to be seen at spin_unlock
225          * time, and not at spin_lock time.
226          *
227          * Increasing the sequence after the spin_unlock would be
228          * unsafe because the kvm page fault could then establish the
229          * pte after kvm_unmap_hva returned, without noticing the page
230          * is going to be freed.
231          */
232         spin_lock(&kvm->mmu_lock);
233         kvm->mmu_notifier_seq++;
234         need_tlb_flush = kvm_unmap_hva(kvm, address);
235         spin_unlock(&kvm->mmu_lock);
236
237         /* we've to flush the tlb before the pages can be freed */
238         if (need_tlb_flush)
239                 kvm_flush_remote_tlbs(kvm);
240
241 }
242
243 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
244                                         struct mm_struct *mm,
245                                         unsigned long address,
246                                         pte_t pte)
247 {
248         struct kvm *kvm = mmu_notifier_to_kvm(mn);
249
250         spin_lock(&kvm->mmu_lock);
251         kvm->mmu_notifier_seq++;
252         kvm_set_spte_hva(kvm, address, pte);
253         spin_unlock(&kvm->mmu_lock);
254 }
255
256 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
257                                                     struct mm_struct *mm,
258                                                     unsigned long start,
259                                                     unsigned long end)
260 {
261         struct kvm *kvm = mmu_notifier_to_kvm(mn);
262         int need_tlb_flush = 0;
263
264         spin_lock(&kvm->mmu_lock);
265         /*
266          * The count increase must become visible at unlock time as no
267          * spte can be established without taking the mmu_lock and
268          * count is also read inside the mmu_lock critical section.
269          */
270         kvm->mmu_notifier_count++;
271         for (; start < end; start += PAGE_SIZE)
272                 need_tlb_flush |= kvm_unmap_hva(kvm, start);
273         spin_unlock(&kvm->mmu_lock);
274
275         /* we've to flush the tlb before the pages can be freed */
276         if (need_tlb_flush)
277                 kvm_flush_remote_tlbs(kvm);
278 }
279
280 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
281                                                   struct mm_struct *mm,
282                                                   unsigned long start,
283                                                   unsigned long end)
284 {
285         struct kvm *kvm = mmu_notifier_to_kvm(mn);
286
287         spin_lock(&kvm->mmu_lock);
288         /*
289          * This sequence increase will notify the kvm page fault that
290          * the page that is going to be mapped in the spte could have
291          * been freed.
292          */
293         kvm->mmu_notifier_seq++;
294         /*
295          * The above sequence increase must be visible before the
296          * below count decrease but both values are read by the kvm
297          * page fault under mmu_lock spinlock so we don't need to add
298          * a smb_wmb() here in between the two.
299          */
300         kvm->mmu_notifier_count--;
301         spin_unlock(&kvm->mmu_lock);
302
303         BUG_ON(kvm->mmu_notifier_count < 0);
304 }
305
306 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
307                                               struct mm_struct *mm,
308                                               unsigned long address)
309 {
310         struct kvm *kvm = mmu_notifier_to_kvm(mn);
311         int young;
312
313         spin_lock(&kvm->mmu_lock);
314         young = kvm_age_hva(kvm, address);
315         spin_unlock(&kvm->mmu_lock);
316
317         if (young)
318                 kvm_flush_remote_tlbs(kvm);
319
320         return young;
321 }
322
323 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
324                                      struct mm_struct *mm)
325 {
326         struct kvm *kvm = mmu_notifier_to_kvm(mn);
327         kvm_arch_flush_shadow(kvm);
328 }
329
330 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
331         .invalidate_page        = kvm_mmu_notifier_invalidate_page,
332         .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
333         .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
334         .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
335         .change_pte             = kvm_mmu_notifier_change_pte,
336         .release                = kvm_mmu_notifier_release,
337 };
338 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
339
340 static struct kvm *kvm_create_vm(void)
341 {
342         struct kvm *kvm = kvm_arch_create_vm();
343 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
344         struct page *page;
345 #endif
346
347         if (IS_ERR(kvm))
348                 goto out;
349 #ifdef CONFIG_HAVE_KVM_IRQCHIP
350         INIT_HLIST_HEAD(&kvm->mask_notifier_list);
351         INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
352 #endif
353
354 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
355         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
356         if (!page) {
357                 kfree(kvm);
358                 return ERR_PTR(-ENOMEM);
359         }
360         kvm->coalesced_mmio_ring =
361                         (struct kvm_coalesced_mmio_ring *)page_address(page);
362 #endif
363
364 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
365         {
366                 int err;
367                 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
368                 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
369                 if (err) {
370 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
371                         put_page(page);
372 #endif
373                         kfree(kvm);
374                         return ERR_PTR(err);
375                 }
376         }
377 #endif
378
379         kvm->mm = current->mm;
380         atomic_inc(&kvm->mm->mm_count);
381         spin_lock_init(&kvm->mmu_lock);
382         spin_lock_init(&kvm->requests_lock);
383         kvm_io_bus_init(&kvm->pio_bus);
384         kvm_eventfd_init(kvm);
385         mutex_init(&kvm->lock);
386         mutex_init(&kvm->irq_lock);
387         kvm_io_bus_init(&kvm->mmio_bus);
388         init_rwsem(&kvm->slots_lock);
389         atomic_set(&kvm->users_count, 1);
390         spin_lock(&kvm_lock);
391         list_add(&kvm->vm_list, &vm_list);
392         spin_unlock(&kvm_lock);
393 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
394         kvm_coalesced_mmio_init(kvm);
395 #endif
396 out:
397         return kvm;
398 }
399
400 /*
401  * Free any memory in @free but not in @dont.
402  */
403 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
404                                   struct kvm_memory_slot *dont)
405 {
406         int i;
407
408         if (!dont || free->rmap != dont->rmap)
409                 vfree(free->rmap);
410
411         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
412                 vfree(free->dirty_bitmap);
413
414
415         for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
416                 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
417                         vfree(free->lpage_info[i]);
418                         free->lpage_info[i] = NULL;
419                 }
420         }
421
422         free->npages = 0;
423         free->dirty_bitmap = NULL;
424         free->rmap = NULL;
425 }
426
427 void kvm_free_physmem(struct kvm *kvm)
428 {
429         int i;
430
431         for (i = 0; i < kvm->nmemslots; ++i)
432                 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
433 }
434
435 static void kvm_destroy_vm(struct kvm *kvm)
436 {
437         struct mm_struct *mm = kvm->mm;
438
439         kvm_arch_sync_events(kvm);
440         spin_lock(&kvm_lock);
441         list_del(&kvm->vm_list);
442         spin_unlock(&kvm_lock);
443         kvm_free_irq_routing(kvm);
444         kvm_io_bus_destroy(&kvm->pio_bus);
445         kvm_io_bus_destroy(&kvm->mmio_bus);
446 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
447         if (kvm->coalesced_mmio_ring != NULL)
448                 free_page((unsigned long)kvm->coalesced_mmio_ring);
449 #endif
450 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
451         mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
452 #else
453         kvm_arch_flush_shadow(kvm);
454 #endif
455         kvm_arch_destroy_vm(kvm);
456         mmdrop(mm);
457 }
458
459 void kvm_get_kvm(struct kvm *kvm)
460 {
461         atomic_inc(&kvm->users_count);
462 }
463 EXPORT_SYMBOL_GPL(kvm_get_kvm);
464
465 void kvm_put_kvm(struct kvm *kvm)
466 {
467         if (atomic_dec_and_test(&kvm->users_count))
468                 kvm_destroy_vm(kvm);
469 }
470 EXPORT_SYMBOL_GPL(kvm_put_kvm);
471
472
473 static int kvm_vm_release(struct inode *inode, struct file *filp)
474 {
475         struct kvm *kvm = filp->private_data;
476
477         kvm_irqfd_release(kvm);
478
479         kvm_put_kvm(kvm);
480         return 0;
481 }
482
483 /*
484  * Allocate some memory and give it an address in the guest physical address
485  * space.
486  *
487  * Discontiguous memory is allowed, mostly for framebuffers.
488  *
489  * Must be called holding mmap_sem for write.
490  */
491 int __kvm_set_memory_region(struct kvm *kvm,
492                             struct kvm_userspace_memory_region *mem,
493                             int user_alloc)
494 {
495         int r;
496         gfn_t base_gfn;
497         unsigned long npages;
498         unsigned long i;
499         struct kvm_memory_slot *memslot;
500         struct kvm_memory_slot old, new;
501
502         r = -EINVAL;
503         /* General sanity checks */
504         if (mem->memory_size & (PAGE_SIZE - 1))
505                 goto out;
506         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
507                 goto out;
508         if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
509                 goto out;
510         if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
511                 goto out;
512         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
513                 goto out;
514
515         memslot = &kvm->memslots[mem->slot];
516         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
517         npages = mem->memory_size >> PAGE_SHIFT;
518
519         if (!npages)
520                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
521
522         new = old = *memslot;
523
524         new.base_gfn = base_gfn;
525         new.npages = npages;
526         new.flags = mem->flags;
527
528         /* Disallow changing a memory slot's size. */
529         r = -EINVAL;
530         if (npages && old.npages && npages != old.npages)
531                 goto out_free;
532
533         /* Check for overlaps */
534         r = -EEXIST;
535         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
536                 struct kvm_memory_slot *s = &kvm->memslots[i];
537
538                 if (s == memslot || !s->npages)
539                         continue;
540                 if (!((base_gfn + npages <= s->base_gfn) ||
541                       (base_gfn >= s->base_gfn + s->npages)))
542                         goto out_free;
543         }
544
545         /* Free page dirty bitmap if unneeded */
546         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
547                 new.dirty_bitmap = NULL;
548
549         r = -ENOMEM;
550
551         /* Allocate if a slot is being created */
552 #ifndef CONFIG_S390
553         if (npages && !new.rmap) {
554                 new.rmap = vmalloc(npages * sizeof(struct page *));
555
556                 if (!new.rmap)
557                         goto out_free;
558
559                 memset(new.rmap, 0, npages * sizeof(*new.rmap));
560
561                 new.user_alloc = user_alloc;
562                 /*
563                  * hva_to_rmmap() serialzies with the mmu_lock and to be
564                  * safe it has to ignore memslots with !user_alloc &&
565                  * !userspace_addr.
566                  */
567                 if (user_alloc)
568                         new.userspace_addr = mem->userspace_addr;
569                 else
570                         new.userspace_addr = 0;
571         }
572         if (!npages)
573                 goto skip_lpage;
574
575         for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
576                 unsigned long ugfn;
577                 unsigned long j;
578                 int lpages;
579                 int level = i + 2;
580
581                 /* Avoid unused variable warning if no large pages */
582                 (void)level;
583
584                 if (new.lpage_info[i])
585                         continue;
586
587                 lpages = 1 + (base_gfn + npages - 1) /
588                              KVM_PAGES_PER_HPAGE(level);
589                 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
590
591                 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
592
593                 if (!new.lpage_info[i])
594                         goto out_free;
595
596                 memset(new.lpage_info[i], 0,
597                        lpages * sizeof(*new.lpage_info[i]));
598
599                 if (base_gfn % KVM_PAGES_PER_HPAGE(level))
600                         new.lpage_info[i][0].write_count = 1;
601                 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
602                         new.lpage_info[i][lpages - 1].write_count = 1;
603                 ugfn = new.userspace_addr >> PAGE_SHIFT;
604                 /*
605                  * If the gfn and userspace address are not aligned wrt each
606                  * other, or if explicitly asked to, disable large page
607                  * support for this slot
608                  */
609                 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
610                     !largepages_enabled)
611                         for (j = 0; j < lpages; ++j)
612                                 new.lpage_info[i][j].write_count = 1;
613         }
614
615 skip_lpage:
616
617         /* Allocate page dirty bitmap if needed */
618         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
619                 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
620
621                 new.dirty_bitmap = vmalloc(dirty_bytes);
622                 if (!new.dirty_bitmap)
623                         goto out_free;
624                 memset(new.dirty_bitmap, 0, dirty_bytes);
625                 if (old.npages)
626                         kvm_arch_flush_shadow(kvm);
627         }
628 #else  /* not defined CONFIG_S390 */
629         new.user_alloc = user_alloc;
630         if (user_alloc)
631                 new.userspace_addr = mem->userspace_addr;
632 #endif /* not defined CONFIG_S390 */
633
634         if (!npages)
635                 kvm_arch_flush_shadow(kvm);
636
637         spin_lock(&kvm->mmu_lock);
638         if (mem->slot >= kvm->nmemslots)
639                 kvm->nmemslots = mem->slot + 1;
640
641         *memslot = new;
642         spin_unlock(&kvm->mmu_lock);
643
644         r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
645         if (r) {
646                 spin_lock(&kvm->mmu_lock);
647                 *memslot = old;
648                 spin_unlock(&kvm->mmu_lock);
649                 goto out_free;
650         }
651
652         kvm_free_physmem_slot(&old, npages ? &new : NULL);
653         /* Slot deletion case: we have to update the current slot */
654         spin_lock(&kvm->mmu_lock);
655         if (!npages)
656                 *memslot = old;
657         spin_unlock(&kvm->mmu_lock);
658 #ifdef CONFIG_DMAR
659         /* map the pages in iommu page table */
660         r = kvm_iommu_map_pages(kvm, base_gfn, npages);
661         if (r)
662                 goto out;
663 #endif
664         return 0;
665
666 out_free:
667         kvm_free_physmem_slot(&new, &old);
668 out:
669         return r;
670
671 }
672 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
673
674 int kvm_set_memory_region(struct kvm *kvm,
675                           struct kvm_userspace_memory_region *mem,
676                           int user_alloc)
677 {
678         int r;
679
680         down_write(&kvm->slots_lock);
681         r = __kvm_set_memory_region(kvm, mem, user_alloc);
682         up_write(&kvm->slots_lock);
683         return r;
684 }
685 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
686
687 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
688                                    struct
689                                    kvm_userspace_memory_region *mem,
690                                    int user_alloc)
691 {
692         if (mem->slot >= KVM_MEMORY_SLOTS)
693                 return -EINVAL;
694         return kvm_set_memory_region(kvm, mem, user_alloc);
695 }
696
697 int kvm_get_dirty_log(struct kvm *kvm,
698                         struct kvm_dirty_log *log, int *is_dirty)
699 {
700         struct kvm_memory_slot *memslot;
701         int r, i;
702         int n;
703         unsigned long any = 0;
704
705         r = -EINVAL;
706         if (log->slot >= KVM_MEMORY_SLOTS)
707                 goto out;
708
709         memslot = &kvm->memslots[log->slot];
710         r = -ENOENT;
711         if (!memslot->dirty_bitmap)
712                 goto out;
713
714         n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
715
716         for (i = 0; !any && i < n/sizeof(long); ++i)
717                 any = memslot->dirty_bitmap[i];
718
719         r = -EFAULT;
720         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
721                 goto out;
722
723         if (any)
724                 *is_dirty = 1;
725
726         r = 0;
727 out:
728         return r;
729 }
730
731 void kvm_disable_largepages(void)
732 {
733         largepages_enabled = false;
734 }
735 EXPORT_SYMBOL_GPL(kvm_disable_largepages);
736
737 int is_error_page(struct page *page)
738 {
739         return page == bad_page;
740 }
741 EXPORT_SYMBOL_GPL(is_error_page);
742
743 int is_error_pfn(pfn_t pfn)
744 {
745         return pfn == bad_pfn;
746 }
747 EXPORT_SYMBOL_GPL(is_error_pfn);
748
749 static inline unsigned long bad_hva(void)
750 {
751         return PAGE_OFFSET;
752 }
753
754 int kvm_is_error_hva(unsigned long addr)
755 {
756         return addr == bad_hva();
757 }
758 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
759
760 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
761 {
762         int i;
763
764         for (i = 0; i < kvm->nmemslots; ++i) {
765                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
766
767                 if (gfn >= memslot->base_gfn
768                     && gfn < memslot->base_gfn + memslot->npages)
769                         return memslot;
770         }
771         return NULL;
772 }
773 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
774
775 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
776 {
777         gfn = unalias_gfn(kvm, gfn);
778         return gfn_to_memslot_unaliased(kvm, gfn);
779 }
780
781 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
782 {
783         int i;
784
785         gfn = unalias_gfn(kvm, gfn);
786         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
787                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
788
789                 if (gfn >= memslot->base_gfn
790                     && gfn < memslot->base_gfn + memslot->npages)
791                         return 1;
792         }
793         return 0;
794 }
795 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
796
797 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
798 {
799         struct kvm_memory_slot *slot;
800
801         gfn = unalias_gfn(kvm, gfn);
802         slot = gfn_to_memslot_unaliased(kvm, gfn);
803         if (!slot)
804                 return bad_hva();
805         return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
806 }
807 EXPORT_SYMBOL_GPL(gfn_to_hva);
808
809 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
810 {
811         struct page *page[1];
812         unsigned long addr;
813         int npages;
814         pfn_t pfn;
815
816         might_sleep();
817
818         addr = gfn_to_hva(kvm, gfn);
819         if (kvm_is_error_hva(addr)) {
820                 get_page(bad_page);
821                 return page_to_pfn(bad_page);
822         }
823
824         npages = get_user_pages_fast(addr, 1, 1, page);
825
826         if (unlikely(npages != 1)) {
827                 struct vm_area_struct *vma;
828
829                 down_read(&current->mm->mmap_sem);
830                 vma = find_vma(current->mm, addr);
831
832                 if (vma == NULL || addr < vma->vm_start ||
833                     !(vma->vm_flags & VM_PFNMAP)) {
834                         up_read(&current->mm->mmap_sem);
835                         get_page(bad_page);
836                         return page_to_pfn(bad_page);
837                 }
838
839                 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
840                 up_read(&current->mm->mmap_sem);
841                 BUG_ON(!kvm_is_mmio_pfn(pfn));
842         } else
843                 pfn = page_to_pfn(page[0]);
844
845         return pfn;
846 }
847
848 EXPORT_SYMBOL_GPL(gfn_to_pfn);
849
850 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
851 {
852         pfn_t pfn;
853
854         pfn = gfn_to_pfn(kvm, gfn);
855         if (!kvm_is_mmio_pfn(pfn))
856                 return pfn_to_page(pfn);
857
858         WARN_ON(kvm_is_mmio_pfn(pfn));
859
860         get_page(bad_page);
861         return bad_page;
862 }
863
864 EXPORT_SYMBOL_GPL(gfn_to_page);
865
866 void kvm_release_page_clean(struct page *page)
867 {
868         kvm_release_pfn_clean(page_to_pfn(page));
869 }
870 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
871
872 void kvm_release_pfn_clean(pfn_t pfn)
873 {
874         if (!kvm_is_mmio_pfn(pfn))
875                 put_page(pfn_to_page(pfn));
876 }
877 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
878
879 void kvm_release_page_dirty(struct page *page)
880 {
881         kvm_release_pfn_dirty(page_to_pfn(page));
882 }
883 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
884
885 void kvm_release_pfn_dirty(pfn_t pfn)
886 {
887         kvm_set_pfn_dirty(pfn);
888         kvm_release_pfn_clean(pfn);
889 }
890 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
891
892 void kvm_set_page_dirty(struct page *page)
893 {
894         kvm_set_pfn_dirty(page_to_pfn(page));
895 }
896 EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
897
898 void kvm_set_pfn_dirty(pfn_t pfn)
899 {
900         if (!kvm_is_mmio_pfn(pfn)) {
901                 struct page *page = pfn_to_page(pfn);
902                 if (!PageReserved(page))
903                         SetPageDirty(page);
904         }
905 }
906 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
907
908 void kvm_set_pfn_accessed(pfn_t pfn)
909 {
910         if (!kvm_is_mmio_pfn(pfn))
911                 mark_page_accessed(pfn_to_page(pfn));
912 }
913 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
914
915 void kvm_get_pfn(pfn_t pfn)
916 {
917         if (!kvm_is_mmio_pfn(pfn))
918                 get_page(pfn_to_page(pfn));
919 }
920 EXPORT_SYMBOL_GPL(kvm_get_pfn);
921
922 static int next_segment(unsigned long len, int offset)
923 {
924         if (len > PAGE_SIZE - offset)
925                 return PAGE_SIZE - offset;
926         else
927                 return len;
928 }
929
930 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
931                         int len)
932 {
933         int r;
934         unsigned long addr;
935
936         addr = gfn_to_hva(kvm, gfn);
937         if (kvm_is_error_hva(addr))
938                 return -EFAULT;
939         r = copy_from_user(data, (void __user *)addr + offset, len);
940         if (r)
941                 return -EFAULT;
942         return 0;
943 }
944 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
945
946 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
947 {
948         gfn_t gfn = gpa >> PAGE_SHIFT;
949         int seg;
950         int offset = offset_in_page(gpa);
951         int ret;
952
953         while ((seg = next_segment(len, offset)) != 0) {
954                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
955                 if (ret < 0)
956                         return ret;
957                 offset = 0;
958                 len -= seg;
959                 data += seg;
960                 ++gfn;
961         }
962         return 0;
963 }
964 EXPORT_SYMBOL_GPL(kvm_read_guest);
965
966 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
967                           unsigned long len)
968 {
969         int r;
970         unsigned long addr;
971         gfn_t gfn = gpa >> PAGE_SHIFT;
972         int offset = offset_in_page(gpa);
973
974         addr = gfn_to_hva(kvm, gfn);
975         if (kvm_is_error_hva(addr))
976                 return -EFAULT;
977         pagefault_disable();
978         r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
979         pagefault_enable();
980         if (r)
981                 return -EFAULT;
982         return 0;
983 }
984 EXPORT_SYMBOL(kvm_read_guest_atomic);
985
986 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
987                          int offset, int len)
988 {
989         int r;
990         unsigned long addr;
991
992         addr = gfn_to_hva(kvm, gfn);
993         if (kvm_is_error_hva(addr))
994                 return -EFAULT;
995         r = copy_to_user((void __user *)addr + offset, data, len);
996         if (r)
997                 return -EFAULT;
998         mark_page_dirty(kvm, gfn);
999         return 0;
1000 }
1001 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1002
1003 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1004                     unsigned long len)
1005 {
1006         gfn_t gfn = gpa >> PAGE_SHIFT;
1007         int seg;
1008         int offset = offset_in_page(gpa);
1009         int ret;
1010
1011         while ((seg = next_segment(len, offset)) != 0) {
1012                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1013                 if (ret < 0)
1014                         return ret;
1015                 offset = 0;
1016                 len -= seg;
1017                 data += seg;
1018                 ++gfn;
1019         }
1020         return 0;
1021 }
1022
1023 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1024 {
1025         return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
1026 }
1027 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1028
1029 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1030 {
1031         gfn_t gfn = gpa >> PAGE_SHIFT;
1032         int seg;
1033         int offset = offset_in_page(gpa);
1034         int ret;
1035
1036         while ((seg = next_segment(len, offset)) != 0) {
1037                 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1038                 if (ret < 0)
1039                         return ret;
1040                 offset = 0;
1041                 len -= seg;
1042                 ++gfn;
1043         }
1044         return 0;
1045 }
1046 EXPORT_SYMBOL_GPL(kvm_clear_guest);
1047
1048 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1049 {
1050         struct kvm_memory_slot *memslot;
1051
1052         gfn = unalias_gfn(kvm, gfn);
1053         memslot = gfn_to_memslot_unaliased(kvm, gfn);
1054         if (memslot && memslot->dirty_bitmap) {
1055                 unsigned long rel_gfn = gfn - memslot->base_gfn;
1056
1057                 /* avoid RMW */
1058                 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1059                         set_bit(rel_gfn, memslot->dirty_bitmap);
1060         }
1061 }
1062
1063 /*
1064  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1065  */
1066 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1067 {
1068         DEFINE_WAIT(wait);
1069
1070         for (;;) {
1071                 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1072
1073                 if (kvm_arch_vcpu_runnable(vcpu)) {
1074                         set_bit(KVM_REQ_UNHALT, &vcpu->requests);
1075                         break;
1076                 }
1077                 if (kvm_cpu_has_pending_timer(vcpu))
1078                         break;
1079                 if (signal_pending(current))
1080                         break;
1081
1082                 schedule();
1083         }
1084
1085         finish_wait(&vcpu->wq, &wait);
1086 }
1087
1088 void kvm_resched(struct kvm_vcpu *vcpu)
1089 {
1090         if (!need_resched())
1091                 return;
1092         cond_resched();
1093 }
1094 EXPORT_SYMBOL_GPL(kvm_resched);
1095
1096 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1097 {
1098         struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1099         struct page *page;
1100
1101         if (vmf->pgoff == 0)
1102                 page = virt_to_page(vcpu->run);
1103 #ifdef CONFIG_X86
1104         else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1105                 page = virt_to_page(vcpu->arch.pio_data);
1106 #endif
1107 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1108         else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1109                 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1110 #endif
1111         else
1112                 return VM_FAULT_SIGBUS;
1113         get_page(page);
1114         vmf->page = page;
1115         return 0;
1116 }
1117
1118 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1119         .fault = kvm_vcpu_fault,
1120 };
1121
1122 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1123 {
1124         vma->vm_ops = &kvm_vcpu_vm_ops;
1125         return 0;
1126 }
1127
1128 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1129 {
1130         struct kvm_vcpu *vcpu = filp->private_data;
1131
1132         kvm_put_kvm(vcpu->kvm);
1133         return 0;
1134 }
1135
1136 static struct file_operations kvm_vcpu_fops = {
1137         .release        = kvm_vcpu_release,
1138         .unlocked_ioctl = kvm_vcpu_ioctl,
1139         .compat_ioctl   = kvm_vcpu_ioctl,
1140         .mmap           = kvm_vcpu_mmap,
1141 };
1142
1143 /*
1144  * Allocates an inode for the vcpu.
1145  */
1146 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1147 {
1148         return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
1149 }
1150
1151 /*
1152  * Creates some virtual cpus.  Good luck creating more than one.
1153  */
1154 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1155 {
1156         int r;
1157         struct kvm_vcpu *vcpu, *v;
1158
1159         vcpu = kvm_arch_vcpu_create(kvm, id);
1160         if (IS_ERR(vcpu))
1161                 return PTR_ERR(vcpu);
1162
1163         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1164
1165         r = kvm_arch_vcpu_setup(vcpu);
1166         if (r)
1167                 return r;
1168
1169         mutex_lock(&kvm->lock);
1170         if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1171                 r = -EINVAL;
1172                 goto vcpu_destroy;
1173         }
1174
1175         kvm_for_each_vcpu(r, v, kvm)
1176                 if (v->vcpu_id == id) {
1177                         r = -EEXIST;
1178                         goto vcpu_destroy;
1179                 }
1180
1181         BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1182
1183         /* Now it's all set up, let userspace reach it */
1184         kvm_get_kvm(kvm);
1185         r = create_vcpu_fd(vcpu);
1186         if (r < 0) {
1187                 kvm_put_kvm(kvm);
1188                 goto vcpu_destroy;
1189         }
1190
1191         kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1192         smp_wmb();
1193         atomic_inc(&kvm->online_vcpus);
1194
1195 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1196         if (kvm->bsp_vcpu_id == id)
1197                 kvm->bsp_vcpu = vcpu;
1198 #endif
1199         mutex_unlock(&kvm->lock);
1200         return r;
1201
1202 vcpu_destroy:
1203         mutex_unlock(&kvm->lock);
1204         kvm_arch_vcpu_destroy(vcpu);
1205         return r;
1206 }
1207
1208 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1209 {
1210         if (sigset) {
1211                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1212                 vcpu->sigset_active = 1;
1213                 vcpu->sigset = *sigset;
1214         } else
1215                 vcpu->sigset_active = 0;
1216         return 0;
1217 }
1218
1219 static long kvm_vcpu_ioctl(struct file *filp,
1220                            unsigned int ioctl, unsigned long arg)
1221 {
1222         struct kvm_vcpu *vcpu = filp->private_data;
1223         void __user *argp = (void __user *)arg;
1224         int r;
1225         struct kvm_fpu *fpu = NULL;
1226         struct kvm_sregs *kvm_sregs = NULL;
1227
1228         if (vcpu->kvm->mm != current->mm)
1229                 return -EIO;
1230         switch (ioctl) {
1231         case KVM_RUN:
1232                 r = -EINVAL;
1233                 if (arg)
1234                         goto out;
1235                 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1236                 break;
1237         case KVM_GET_REGS: {
1238                 struct kvm_regs *kvm_regs;
1239
1240                 r = -ENOMEM;
1241                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1242                 if (!kvm_regs)
1243                         goto out;
1244                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1245                 if (r)
1246                         goto out_free1;
1247                 r = -EFAULT;
1248                 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1249                         goto out_free1;
1250                 r = 0;
1251 out_free1:
1252                 kfree(kvm_regs);
1253                 break;
1254         }
1255         case KVM_SET_REGS: {
1256                 struct kvm_regs *kvm_regs;
1257
1258                 r = -ENOMEM;
1259                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1260                 if (!kvm_regs)
1261                         goto out;
1262                 r = -EFAULT;
1263                 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1264                         goto out_free2;
1265                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1266                 if (r)
1267                         goto out_free2;
1268                 r = 0;
1269 out_free2:
1270                 kfree(kvm_regs);
1271                 break;
1272         }
1273         case KVM_GET_SREGS: {
1274                 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1275                 r = -ENOMEM;
1276                 if (!kvm_sregs)
1277                         goto out;
1278                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1279                 if (r)
1280                         goto out;
1281                 r = -EFAULT;
1282                 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1283                         goto out;
1284                 r = 0;
1285                 break;
1286         }
1287         case KVM_SET_SREGS: {
1288                 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1289                 r = -ENOMEM;
1290                 if (!kvm_sregs)
1291                         goto out;
1292                 r = -EFAULT;
1293                 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1294                         goto out;
1295                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1296                 if (r)
1297                         goto out;
1298                 r = 0;
1299                 break;
1300         }
1301         case KVM_GET_MP_STATE: {
1302                 struct kvm_mp_state mp_state;
1303
1304                 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1305                 if (r)
1306                         goto out;
1307                 r = -EFAULT;
1308                 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1309                         goto out;
1310                 r = 0;
1311                 break;
1312         }
1313         case KVM_SET_MP_STATE: {
1314                 struct kvm_mp_state mp_state;
1315
1316                 r = -EFAULT;
1317                 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1318                         goto out;
1319                 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1320                 if (r)
1321                         goto out;
1322                 r = 0;
1323                 break;
1324         }
1325         case KVM_TRANSLATE: {
1326                 struct kvm_translation tr;
1327
1328                 r = -EFAULT;
1329                 if (copy_from_user(&tr, argp, sizeof tr))
1330                         goto out;
1331                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
1332                 if (r)
1333                         goto out;
1334                 r = -EFAULT;
1335                 if (copy_to_user(argp, &tr, sizeof tr))
1336                         goto out;
1337                 r = 0;
1338                 break;
1339         }
1340         case KVM_SET_GUEST_DEBUG: {
1341                 struct kvm_guest_debug dbg;
1342
1343                 r = -EFAULT;
1344                 if (copy_from_user(&dbg, argp, sizeof dbg))
1345                         goto out;
1346                 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
1347                 if (r)
1348                         goto out;
1349                 r = 0;
1350                 break;
1351         }
1352         case KVM_SET_SIGNAL_MASK: {
1353                 struct kvm_signal_mask __user *sigmask_arg = argp;
1354                 struct kvm_signal_mask kvm_sigmask;
1355                 sigset_t sigset, *p;
1356
1357                 p = NULL;
1358                 if (argp) {
1359                         r = -EFAULT;
1360                         if (copy_from_user(&kvm_sigmask, argp,
1361                                            sizeof kvm_sigmask))
1362                                 goto out;
1363                         r = -EINVAL;
1364                         if (kvm_sigmask.len != sizeof sigset)
1365                                 goto out;
1366                         r = -EFAULT;
1367                         if (copy_from_user(&sigset, sigmask_arg->sigset,
1368                                            sizeof sigset))
1369                                 goto out;
1370                         p = &sigset;
1371                 }
1372                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1373                 break;
1374         }
1375         case KVM_GET_FPU: {
1376                 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1377                 r = -ENOMEM;
1378                 if (!fpu)
1379                         goto out;
1380                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1381                 if (r)
1382                         goto out;
1383                 r = -EFAULT;
1384                 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1385                         goto out;
1386                 r = 0;
1387                 break;
1388         }
1389         case KVM_SET_FPU: {
1390                 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1391                 r = -ENOMEM;
1392                 if (!fpu)
1393                         goto out;
1394                 r = -EFAULT;
1395                 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1396                         goto out;
1397                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1398                 if (r)
1399                         goto out;
1400                 r = 0;
1401                 break;
1402         }
1403         default:
1404                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1405         }
1406 out:
1407         kfree(fpu);
1408         kfree(kvm_sregs);
1409         return r;
1410 }
1411
1412 static long kvm_vm_ioctl(struct file *filp,
1413                            unsigned int ioctl, unsigned long arg)
1414 {
1415         struct kvm *kvm = filp->private_data;
1416         void __user *argp = (void __user *)arg;
1417         int r;
1418
1419         if (kvm->mm != current->mm)
1420                 return -EIO;
1421         switch (ioctl) {
1422         case KVM_CREATE_VCPU:
1423                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1424                 if (r < 0)
1425                         goto out;
1426                 break;
1427         case KVM_SET_USER_MEMORY_REGION: {
1428                 struct kvm_userspace_memory_region kvm_userspace_mem;
1429
1430                 r = -EFAULT;
1431                 if (copy_from_user(&kvm_userspace_mem, argp,
1432                                                 sizeof kvm_userspace_mem))
1433                         goto out;
1434
1435                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
1436                 if (r)
1437                         goto out;
1438                 break;
1439         }
1440         case KVM_GET_DIRTY_LOG: {
1441                 struct kvm_dirty_log log;
1442
1443                 r = -EFAULT;
1444                 if (copy_from_user(&log, argp, sizeof log))
1445                         goto out;
1446                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1447                 if (r)
1448                         goto out;
1449                 break;
1450         }
1451 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1452         case KVM_REGISTER_COALESCED_MMIO: {
1453                 struct kvm_coalesced_mmio_zone zone;
1454                 r = -EFAULT;
1455                 if (copy_from_user(&zone, argp, sizeof zone))
1456                         goto out;
1457                 r = -ENXIO;
1458                 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1459                 if (r)
1460                         goto out;
1461                 r = 0;
1462                 break;
1463         }
1464         case KVM_UNREGISTER_COALESCED_MMIO: {
1465                 struct kvm_coalesced_mmio_zone zone;
1466                 r = -EFAULT;
1467                 if (copy_from_user(&zone, argp, sizeof zone))
1468                         goto out;
1469                 r = -ENXIO;
1470                 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1471                 if (r)
1472                         goto out;
1473                 r = 0;
1474                 break;
1475         }
1476 #endif
1477         case KVM_IRQFD: {
1478                 struct kvm_irqfd data;
1479
1480                 r = -EFAULT;
1481                 if (copy_from_user(&data, argp, sizeof data))
1482                         goto out;
1483                 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
1484                 break;
1485         }
1486         case KVM_IOEVENTFD: {
1487                 struct kvm_ioeventfd data;
1488
1489                 r = -EFAULT;
1490                 if (copy_from_user(&data, argp, sizeof data))
1491                         goto out;
1492                 r = kvm_ioeventfd(kvm, &data);
1493                 break;
1494         }
1495 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1496         case KVM_SET_BOOT_CPU_ID:
1497                 r = 0;
1498                 mutex_lock(&kvm->lock);
1499                 if (atomic_read(&kvm->online_vcpus) != 0)
1500                         r = -EBUSY;
1501                 else
1502                         kvm->bsp_vcpu_id = arg;
1503                 mutex_unlock(&kvm->lock);
1504                 break;
1505 #endif
1506         default:
1507                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1508                 if (r == -ENOTTY)
1509                         r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
1510         }
1511 out:
1512         return r;
1513 }
1514
1515 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1516 {
1517         struct page *page[1];
1518         unsigned long addr;
1519         int npages;
1520         gfn_t gfn = vmf->pgoff;
1521         struct kvm *kvm = vma->vm_file->private_data;
1522
1523         addr = gfn_to_hva(kvm, gfn);
1524         if (kvm_is_error_hva(addr))
1525                 return VM_FAULT_SIGBUS;
1526
1527         npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1528                                 NULL);
1529         if (unlikely(npages != 1))
1530                 return VM_FAULT_SIGBUS;
1531
1532         vmf->page = page[0];
1533         return 0;
1534 }
1535
1536 static const struct vm_operations_struct kvm_vm_vm_ops = {
1537         .fault = kvm_vm_fault,
1538 };
1539
1540 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1541 {
1542         vma->vm_ops = &kvm_vm_vm_ops;
1543         return 0;
1544 }
1545
1546 static struct file_operations kvm_vm_fops = {
1547         .release        = kvm_vm_release,
1548         .unlocked_ioctl = kvm_vm_ioctl,
1549         .compat_ioctl   = kvm_vm_ioctl,
1550         .mmap           = kvm_vm_mmap,
1551 };
1552
1553 static int kvm_dev_ioctl_create_vm(void)
1554 {
1555         int fd;
1556         struct kvm *kvm;
1557
1558         kvm = kvm_create_vm();
1559         if (IS_ERR(kvm))
1560                 return PTR_ERR(kvm);
1561         fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
1562         if (fd < 0)
1563                 kvm_put_kvm(kvm);
1564
1565         return fd;
1566 }
1567
1568 static long kvm_dev_ioctl_check_extension_generic(long arg)
1569 {
1570         switch (arg) {
1571         case KVM_CAP_USER_MEMORY:
1572         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
1573         case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
1574 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1575         case KVM_CAP_SET_BOOT_CPU_ID:
1576 #endif
1577                 return 1;
1578 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1579         case KVM_CAP_IRQ_ROUTING:
1580                 return KVM_MAX_IRQ_ROUTES;
1581 #endif
1582         default:
1583                 break;
1584         }
1585         return kvm_dev_ioctl_check_extension(arg);
1586 }
1587
1588 static long kvm_dev_ioctl(struct file *filp,
1589                           unsigned int ioctl, unsigned long arg)
1590 {
1591         long r = -EINVAL;
1592
1593         switch (ioctl) {
1594         case KVM_GET_API_VERSION:
1595                 r = -EINVAL;
1596                 if (arg)
1597                         goto out;
1598                 r = KVM_API_VERSION;
1599                 break;
1600         case KVM_CREATE_VM:
1601                 r = -EINVAL;
1602                 if (arg)
1603                         goto out;
1604                 r = kvm_dev_ioctl_create_vm();
1605                 break;
1606         case KVM_CHECK_EXTENSION:
1607                 r = kvm_dev_ioctl_check_extension_generic(arg);
1608                 break;
1609         case KVM_GET_VCPU_MMAP_SIZE:
1610                 r = -EINVAL;
1611                 if (arg)
1612                         goto out;
1613                 r = PAGE_SIZE;     /* struct kvm_run */
1614 #ifdef CONFIG_X86
1615                 r += PAGE_SIZE;    /* pio data page */
1616 #endif
1617 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1618                 r += PAGE_SIZE;    /* coalesced mmio ring page */
1619 #endif
1620                 break;
1621         case KVM_TRACE_ENABLE:
1622         case KVM_TRACE_PAUSE:
1623         case KVM_TRACE_DISABLE:
1624                 r = -EOPNOTSUPP;
1625                 break;
1626         default:
1627                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
1628         }
1629 out:
1630         return r;
1631 }
1632
1633 static struct file_operations kvm_chardev_ops = {
1634         .unlocked_ioctl = kvm_dev_ioctl,
1635         .compat_ioctl   = kvm_dev_ioctl,
1636 };
1637
1638 static struct miscdevice kvm_dev = {
1639         KVM_MINOR,
1640         "kvm",
1641         &kvm_chardev_ops,
1642 };
1643
1644 static void hardware_enable(void *junk)
1645 {
1646         int cpu = raw_smp_processor_id();
1647
1648         if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1649                 return;
1650         cpumask_set_cpu(cpu, cpus_hardware_enabled);
1651         kvm_arch_hardware_enable(NULL);
1652 }
1653
1654 static void hardware_disable(void *junk)
1655 {
1656         int cpu = raw_smp_processor_id();
1657
1658         if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1659                 return;
1660         cpumask_clear_cpu(cpu, cpus_hardware_enabled);
1661         kvm_arch_hardware_disable(NULL);
1662 }
1663
1664 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1665                            void *v)
1666 {
1667         int cpu = (long)v;
1668
1669         val &= ~CPU_TASKS_FROZEN;
1670         switch (val) {
1671         case CPU_DYING:
1672                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1673                        cpu);
1674                 hardware_disable(NULL);
1675                 break;
1676         case CPU_UP_CANCELED:
1677                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1678                        cpu);
1679                 smp_call_function_single(cpu, hardware_disable, NULL, 1);
1680                 break;
1681         case CPU_ONLINE:
1682                 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1683                        cpu);
1684                 smp_call_function_single(cpu, hardware_enable, NULL, 1);
1685                 break;
1686         }
1687         return NOTIFY_OK;
1688 }
1689
1690
1691 asmlinkage void kvm_handle_fault_on_reboot(void)
1692 {
1693         if (kvm_rebooting)
1694                 /* spin while reset goes on */
1695                 while (true)
1696                         ;
1697         /* Fault while not rebooting.  We want the trace. */
1698         BUG();
1699 }
1700 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
1701
1702 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
1703                       void *v)
1704 {
1705         /*
1706          * Some (well, at least mine) BIOSes hang on reboot if
1707          * in vmx root mode.
1708          *
1709          * And Intel TXT required VMX off for all cpu when system shutdown.
1710          */
1711         printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1712         kvm_rebooting = true;
1713         on_each_cpu(hardware_disable, NULL, 1);
1714         return NOTIFY_OK;
1715 }
1716
1717 static struct notifier_block kvm_reboot_notifier = {
1718         .notifier_call = kvm_reboot,
1719         .priority = 0,
1720 };
1721
1722 void kvm_io_bus_init(struct kvm_io_bus *bus)
1723 {
1724         memset(bus, 0, sizeof(*bus));
1725 }
1726
1727 void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1728 {
1729         int i;
1730
1731         for (i = 0; i < bus->dev_count; i++) {
1732                 struct kvm_io_device *pos = bus->devs[i];
1733
1734                 kvm_iodevice_destructor(pos);
1735         }
1736 }
1737
1738 /* kvm_io_bus_write - called under kvm->slots_lock */
1739 int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
1740                      int len, const void *val)
1741 {
1742         int i;
1743         for (i = 0; i < bus->dev_count; i++)
1744                 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
1745                         return 0;
1746         return -EOPNOTSUPP;
1747 }
1748
1749 /* kvm_io_bus_read - called under kvm->slots_lock */
1750 int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
1751 {
1752         int i;
1753         for (i = 0; i < bus->dev_count; i++)
1754                 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
1755                         return 0;
1756         return -EOPNOTSUPP;
1757 }
1758
1759 int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
1760                              struct kvm_io_device *dev)
1761 {
1762         int ret;
1763
1764         down_write(&kvm->slots_lock);
1765         ret = __kvm_io_bus_register_dev(bus, dev);
1766         up_write(&kvm->slots_lock);
1767
1768         return ret;
1769 }
1770
1771 /* An unlocked version. Caller must have write lock on slots_lock. */
1772 int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
1773                               struct kvm_io_device *dev)
1774 {
1775         if (bus->dev_count > NR_IOBUS_DEVS-1)
1776                 return -ENOSPC;
1777
1778         bus->devs[bus->dev_count++] = dev;
1779
1780         return 0;
1781 }
1782
1783 void kvm_io_bus_unregister_dev(struct kvm *kvm,
1784                                struct kvm_io_bus *bus,
1785                                struct kvm_io_device *dev)
1786 {
1787         down_write(&kvm->slots_lock);
1788         __kvm_io_bus_unregister_dev(bus, dev);
1789         up_write(&kvm->slots_lock);
1790 }
1791
1792 /* An unlocked version. Caller must have write lock on slots_lock. */
1793 void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
1794                                  struct kvm_io_device *dev)
1795 {
1796         int i;
1797
1798         for (i = 0; i < bus->dev_count; i++)
1799                 if (bus->devs[i] == dev) {
1800                         bus->devs[i] = bus->devs[--bus->dev_count];
1801                         break;
1802                 }
1803 }
1804
1805 static struct notifier_block kvm_cpu_notifier = {
1806         .notifier_call = kvm_cpu_hotplug,
1807         .priority = 20, /* must be > scheduler priority */
1808 };
1809
1810 static int vm_stat_get(void *_offset, u64 *val)
1811 {
1812         unsigned offset = (long)_offset;
1813         struct kvm *kvm;
1814
1815         *val = 0;
1816         spin_lock(&kvm_lock);
1817         list_for_each_entry(kvm, &vm_list, vm_list)
1818                 *val += *(u32 *)((void *)kvm + offset);
1819         spin_unlock(&kvm_lock);
1820         return 0;
1821 }
1822
1823 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1824
1825 static int vcpu_stat_get(void *_offset, u64 *val)
1826 {
1827         unsigned offset = (long)_offset;
1828         struct kvm *kvm;
1829         struct kvm_vcpu *vcpu;
1830         int i;
1831
1832         *val = 0;
1833         spin_lock(&kvm_lock);
1834         list_for_each_entry(kvm, &vm_list, vm_list)
1835                 kvm_for_each_vcpu(i, vcpu, kvm)
1836                         *val += *(u32 *)((void *)vcpu + offset);
1837
1838         spin_unlock(&kvm_lock);
1839         return 0;
1840 }
1841
1842 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1843
1844 static const struct file_operations *stat_fops[] = {
1845         [KVM_STAT_VCPU] = &vcpu_stat_fops,
1846         [KVM_STAT_VM]   = &vm_stat_fops,
1847 };
1848
1849 static void kvm_init_debug(void)
1850 {
1851         struct kvm_stats_debugfs_item *p;
1852
1853         kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
1854         for (p = debugfs_entries; p->name; ++p)
1855                 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1856                                                 (void *)(long)p->offset,
1857                                                 stat_fops[p->kind]);
1858 }
1859
1860 static void kvm_exit_debug(void)
1861 {
1862         struct kvm_stats_debugfs_item *p;
1863
1864         for (p = debugfs_entries; p->name; ++p)
1865                 debugfs_remove(p->dentry);
1866         debugfs_remove(kvm_debugfs_dir);
1867 }
1868
1869 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1870 {
1871         hardware_disable(NULL);
1872         return 0;
1873 }
1874
1875 static int kvm_resume(struct sys_device *dev)
1876 {
1877         hardware_enable(NULL);
1878         return 0;
1879 }
1880
1881 static struct sysdev_class kvm_sysdev_class = {
1882         .name = "kvm",
1883         .suspend = kvm_suspend,
1884         .resume = kvm_resume,
1885 };
1886
1887 static struct sys_device kvm_sysdev = {
1888         .id = 0,
1889         .cls = &kvm_sysdev_class,
1890 };
1891
1892 struct page *bad_page;
1893 pfn_t bad_pfn;
1894
1895 static inline
1896 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1897 {
1898         return container_of(pn, struct kvm_vcpu, preempt_notifier);
1899 }
1900
1901 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1902 {
1903         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1904
1905         kvm_arch_vcpu_load(vcpu, cpu);
1906 }
1907
1908 static void kvm_sched_out(struct preempt_notifier *pn,
1909                           struct task_struct *next)
1910 {
1911         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1912
1913         kvm_arch_vcpu_put(vcpu);
1914 }
1915
1916 int kvm_init(void *opaque, unsigned int vcpu_size,
1917                   struct module *module)
1918 {
1919         int r;
1920         int cpu;
1921
1922         r = kvm_arch_init(opaque);
1923         if (r)
1924                 goto out_fail;
1925
1926         bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1927
1928         if (bad_page == NULL) {
1929                 r = -ENOMEM;
1930                 goto out;
1931         }
1932
1933         bad_pfn = page_to_pfn(bad_page);
1934
1935         if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
1936                 r = -ENOMEM;
1937                 goto out_free_0;
1938         }
1939
1940         r = kvm_arch_hardware_setup();
1941         if (r < 0)
1942                 goto out_free_0a;
1943
1944         for_each_online_cpu(cpu) {
1945                 smp_call_function_single(cpu,
1946                                 kvm_arch_check_processor_compat,
1947                                 &r, 1);
1948                 if (r < 0)
1949                         goto out_free_1;
1950         }
1951
1952         on_each_cpu(hardware_enable, NULL, 1);
1953         r = register_cpu_notifier(&kvm_cpu_notifier);
1954         if (r)
1955                 goto out_free_2;
1956         register_reboot_notifier(&kvm_reboot_notifier);
1957
1958         r = sysdev_class_register(&kvm_sysdev_class);
1959         if (r)
1960                 goto out_free_3;
1961
1962         r = sysdev_register(&kvm_sysdev);
1963         if (r)
1964                 goto out_free_4;
1965
1966         /* A kmem cache lets us meet the alignment requirements of fx_save. */
1967         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
1968                                            __alignof__(struct kvm_vcpu),
1969                                            0, NULL);
1970         if (!kvm_vcpu_cache) {
1971                 r = -ENOMEM;
1972                 goto out_free_5;
1973         }
1974
1975         kvm_chardev_ops.owner = module;
1976         kvm_vm_fops.owner = module;
1977         kvm_vcpu_fops.owner = module;
1978
1979         r = misc_register(&kvm_dev);
1980         if (r) {
1981                 printk(KERN_ERR "kvm: misc device register failed\n");
1982                 goto out_free;
1983         }
1984
1985         kvm_preempt_ops.sched_in = kvm_sched_in;
1986         kvm_preempt_ops.sched_out = kvm_sched_out;
1987
1988         kvm_init_debug();
1989
1990         return 0;
1991
1992 out_free:
1993         kmem_cache_destroy(kvm_vcpu_cache);
1994 out_free_5:
1995         sysdev_unregister(&kvm_sysdev);
1996 out_free_4:
1997         sysdev_class_unregister(&kvm_sysdev_class);
1998 out_free_3:
1999         unregister_reboot_notifier(&kvm_reboot_notifier);
2000         unregister_cpu_notifier(&kvm_cpu_notifier);
2001 out_free_2:
2002         on_each_cpu(hardware_disable, NULL, 1);
2003 out_free_1:
2004         kvm_arch_hardware_unsetup();
2005 out_free_0a:
2006         free_cpumask_var(cpus_hardware_enabled);
2007 out_free_0:
2008         __free_page(bad_page);
2009 out:
2010         kvm_arch_exit();
2011 out_fail:
2012         return r;
2013 }
2014 EXPORT_SYMBOL_GPL(kvm_init);
2015
2016 void kvm_exit(void)
2017 {
2018         tracepoint_synchronize_unregister();
2019         kvm_exit_debug();
2020         misc_deregister(&kvm_dev);
2021         kmem_cache_destroy(kvm_vcpu_cache);
2022         sysdev_unregister(&kvm_sysdev);
2023         sysdev_class_unregister(&kvm_sysdev_class);
2024         unregister_reboot_notifier(&kvm_reboot_notifier);
2025         unregister_cpu_notifier(&kvm_cpu_notifier);
2026         on_each_cpu(hardware_disable, NULL, 1);
2027         kvm_arch_hardware_unsetup();
2028         kvm_arch_exit();
2029         free_cpumask_var(cpus_hardware_enabled);
2030         __free_page(bad_page);
2031 }
2032 EXPORT_SYMBOL_GPL(kvm_exit);