Merge branch 'kvm-updates/2.6.35' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 22 May 2010 00:16:21 +0000 (17:16 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 22 May 2010 00:16:21 +0000 (17:16 -0700)
* 'kvm-updates/2.6.35' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (269 commits)
  KVM: x86: Add missing locking to arch specific vcpu ioctls
  KVM: PPC: Add missing vcpu_load()/vcpu_put() in vcpu ioctls
  KVM: MMU: Segregate shadow pages with different cr0.wp
  KVM: x86: Check LMA bit before set_efer
  KVM: Don't allow lmsw to clear cr0.pe
  KVM: Add cpuid.txt file
  KVM: x86: Tell the guest we'll warn it about tsc stability
  x86, paravirt: don't compute pvclock adjustments if we trust the tsc
  x86: KVM guest: Try using new kvm clock msrs
  KVM: x86: export paravirtual cpuid flags in KVM_GET_SUPPORTED_CPUID
  KVM: x86: add new KVMCLOCK cpuid feature
  KVM: x86: change msr numbers for kvmclock
  x86, paravirt: Add a global synchronization point for pvclock
  x86, paravirt: Enable pvclock flags in vcpu_time_info structure
  KVM: x86: Inject #GP with the right rip on efer writes
  KVM: SVM: Don't allow nested guest to VMMCALL into host
  KVM: x86: Fix exception reinjection forced to true
  KVM: Fix wallclock version writing race
  KVM: MMU: Don't read pdptrs with mmu spinlock held in mmu_alloc_roots
  KVM: VMX: enable VMXON check with SMX enabled (Intel TXT)
  ...

1  2 
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/reg.h
arch/powerpc/kernel/asm-offsets.c
arch/x86/kernel/tboot.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
virt/kvm/iommu.c

@@@ -23,7 -23,7 +23,7 @@@
  #include <asm/page.h>
  #include <asm/exception-64e.h>
  #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- #include <asm/kvm_book3s_64_asm.h>
+ #include <asm/kvm_book3s_asm.h>
  #endif
  
  register struct paca_struct *local_paca asm("r13");
@@@ -82,7 -82,6 +82,7 @@@ struct paca_struct 
        s16 hw_cpu_id;                  /* Physical processor number */
        u8 cpu_start;                   /* At startup, processor spins until */
                                        /* this becomes non-zero. */
 +      u8 kexec_state;         /* set when kexec down has irqs off */
  #ifdef CONFIG_PPC_STD_MMU_64
        struct slb_shadow *slb_shadow_ptr;
  
        u64 startpurr;                  /* PURR/TB value snapshot */
        u64 startspurr;                 /* SPURR value snapshot */
  
- #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-       struct  {
-               u64     esid;
-               u64     vsid;
-       } kvm_slb[64];                  /* guest SLB */
+ #ifdef CONFIG_KVM_BOOK3S_HANDLER
        /* We use this to store guest state in */
        struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
-       u8 kvm_slb_max;                 /* highest used guest slb entry */
-       u8 kvm_in_guest;                /* are we inside the guest? */
  #endif
  };
  
  #define HID1_ABE      (1<<10)         /* 7450 Address Broadcast Enable */
  #define HID1_PS               (1<<16)         /* 750FX PLL selection */
  #define SPRN_HID2     0x3F8           /* Hardware Implementation Register 2 */
+ #define SPRN_HID2_GEKKO       0x398           /* Gekko HID2 Register */
  #define SPRN_IABR     0x3F2   /* Instruction Address Breakpoint Register */
  #define SPRN_IABR2    0x3FA           /* 83xx */
  #define SPRN_IBCR     0x135           /* 83xx Insn Breakpoint Control Reg */
  #define SPRN_HID4     0x3F4           /* 970 HID4 */
+ #define SPRN_HID4_GEKKO       0x3F3           /* Gekko HID4 */
  #define SPRN_HID5     0x3F6           /* 970 HID5 */
  #define SPRN_HID6     0x3F9   /* BE HID 6 */
  #define   HID6_LB     (0x0F<<12) /* Concurrent Large Page Modes */
  #define SPRN_VRSAVE   0x100   /* Vector Register Save Register */
  #define SPRN_XER      0x001   /* Fixed Point Exception Register */
  
+ #define SPRN_MMCR0_GEKKO 0x3B8 /* Gekko Monitor Mode Control Register 0 */
+ #define SPRN_MMCR1_GEKKO 0x3BC /* Gekko Monitor Mode Control Register 1 */
+ #define SPRN_PMC1_GEKKO  0x3B9 /* Gekko Performance Monitor Control 1 */
+ #define SPRN_PMC2_GEKKO  0x3BA /* Gekko Performance Monitor Control 2 */
+ #define SPRN_PMC3_GEKKO  0x3BD /* Gekko Performance Monitor Control 3 */
+ #define SPRN_PMC4_GEKKO  0x3BE /* Gekko Performance Monitor Control 4 */
+ #define SPRN_WPAR_GEKKO  0x399 /* Gekko Write Pipe Address Register */
  #define SPRN_SCOMC    0x114   /* SCOM Access Control */
  #define SPRN_SCOMD    0x115   /* SCOM Access DATA */
  
  #define PVR_403GC     0x00200200
  #define PVR_403GCX    0x00201400
  #define PVR_405GP     0x40110000
 +#define PVR_476               0x11a52000
  #define PVR_STB03XXX  0x40310000
  #define PVR_NP405H    0x41410000
  #define PVR_NP405L    0x41610000
  #define PVR_8245      0x80811014
  #define PVR_8260      PVR_8240
  
 +/* 476 Simulator seems to currently have the PVR of the 602... */
 +#define PVR_476_ISS   0x00052000
 +
  /* 64-bit processors */
  /* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
  #define PV_NORTHSTAR  0x0033
@@@ -50,6 -50,9 +50,9 @@@
  #endif
  #ifdef CONFIG_KVM
  #include <linux/kvm_host.h>
+ #ifndef CONFIG_BOOKE
+ #include <asm/kvm_book3s.h>
+ #endif
  #endif
  
  #ifdef CONFIG_PPC32
@@@ -105,6 -108,9 +108,9 @@@ int main(void
        DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
  #endif /* CONFIG_SPE */
  #endif /* CONFIG_PPC64 */
+ #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+       DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
+ #endif
  
        DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
        DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
        DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
        DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
        DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
 -      DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
        DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
  #ifdef CONFIG_PPC_MM_SLICES
        DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
  #endif /* CONFIG_PPC_STD_MMU_64 */
        DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
        DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
 +      DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
        DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
        DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
        DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
        DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
        DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
  #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-       DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
-       DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
-       DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
-       DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
-       DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
-       DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
-       DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
-       DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
-       DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
-       DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
-       DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
-       DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
-       DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
-       DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
-       DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
-       DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
-       DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
-       DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
-       DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
-       DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
-       DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
-       DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
-                                           shadow_vcpu.vmhandler));
-       DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
-                                          shadow_vcpu.scratch0));
-       DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
-                                          shadow_vcpu.scratch1));
+       DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
+       DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
+       DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
  #endif
  #endif /* CONFIG_PPC64 */
  
        /* Interrupt register frame */
        DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
        DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
- #ifdef CONFIG_PPC64
        DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+ #ifdef CONFIG_PPC64
        /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
        DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
        DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
        DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
        DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
        DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
-       DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
-       DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
-       DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
        DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
        DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
        DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
        DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
        DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
  
-       DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
-       DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
-       DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
-       /* book3s_64 */
- #ifdef CONFIG_PPC64
-       DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
+       /* book3s */
+ #ifdef CONFIG_PPC_BOOK3S
        DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
-       DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
        DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
        DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
-       DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
        DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
        DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
        DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
        DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
        DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
+       DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
+                          offsetof(struct kvmppc_vcpu_book3s, vcpu));
+       DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
+       DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
+       DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
+       DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
+       DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
+       DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
+       DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));
+       DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
+       DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
+       DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
+       DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
+       DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
+       DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
+       DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
+       DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
+       DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
+       DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
+       DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
+       DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
+       DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
+       DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
+       DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                        vmhandler));
+       DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                       scratch0));
+       DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                       scratch1));
+       DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                       in_guest));
+       DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                          fault_dsisr));
+       DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                        fault_dar));
+       DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                        last_inst));
+       DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                          shadow_srr1));
+ #ifdef CONFIG_PPC_BOOK3S_32
+       DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
+ #endif
  #else
        DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
        DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
- #endif /* CONFIG_PPC64 */
+       DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+       DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+       DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+       DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+       DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
+       DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
+ #endif /* CONFIG_PPC_BOOK3S */
  #endif
  #ifdef CONFIG_44x
        DEFINE(PGD_T_LOG2, PGD_T_LOG2);
        DEFINE(PTE_T_LOG2, PTE_T_LOG2);
  #endif
 +#ifdef CONFIG_FSL_BOOKE
 +      DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
 +      DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
 +      DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
 +      DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
 +      DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
 +      DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
 +#endif
  
  #ifdef CONFIG_KVM_EXIT_TIMING
        DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --combined arch/x86/kernel/tboot.c
@@@ -46,6 -46,7 +46,7 @@@
  
  /* Global pointer to shared data; NULL means no measured launch. */
  struct tboot *tboot __read_mostly;
+ EXPORT_SYMBOL(tboot);
  
  /* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
  #define AP_WAIT_TIMEOUT               1
@@@ -175,9 -176,6 +176,9 @@@ static void add_mac_region(phys_addr_t 
        struct tboot_mac_region *mr;
        phys_addr_t end = start + size;
  
 +      if (tboot->num_mac_regions >= MAX_TB_MAC_REGIONS)
 +              panic("tboot: Too many MAC regions\n");
 +
        if (start && size) {
                mr = &tboot->mac_regions[tboot->num_mac_regions++];
                mr->start = round_down(start, PAGE_SIZE);
  
  static int tboot_setup_sleep(void)
  {
 +      int i;
 +
        tboot->num_mac_regions = 0;
  
 -      /* S3 resume code */
 -      add_mac_region(acpi_wakeup_address, WAKEUP_SIZE);
 +      for (i = 0; i < e820.nr_map; i++) {
 +              if ((e820.map[i].type != E820_RAM)
 +               && (e820.map[i].type != E820_RESERVED_KERN))
 +                      continue;
  
 -#ifdef CONFIG_X86_TRAMPOLINE
 -      /* AP trampoline code */
 -      add_mac_region(virt_to_phys(trampoline_base), TRAMPOLINE_SIZE);
 -#endif
 -
 -      /* kernel code + data + bss */
 -      add_mac_region(virt_to_phys(_text), _end - _text);
 +              add_mac_region(e820.map[i].addr, e820.map[i].size);
 +      }
  
        tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address;
  
diff --combined arch/x86/kvm/svm.c
@@@ -44,10 -44,11 +44,11 @@@ MODULE_LICENSE("GPL")
  #define SEG_TYPE_LDT 2
  #define SEG_TYPE_BUSY_TSS16 3
  
- #define SVM_FEATURE_NPT  (1 << 0)
- #define SVM_FEATURE_LBRV (1 << 1)
- #define SVM_FEATURE_SVML (1 << 2)
- #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
+ #define SVM_FEATURE_NPT            (1 <<  0)
+ #define SVM_FEATURE_LBRV           (1 <<  1)
+ #define SVM_FEATURE_SVML           (1 <<  2)
+ #define SVM_FEATURE_NRIP           (1 <<  3)
+ #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
  
  #define NESTED_EXIT_HOST      0       /* Exit handled on host level */
  #define NESTED_EXIT_DONE      1       /* Exit caused nested vmexit  */
@@@ -70,6 -71,7 +71,7 @@@ struct kvm_vcpu
  struct nested_state {
        struct vmcb *hsave;
        u64 hsave_msr;
+       u64 vm_cr_msr;
        u64 vmcb;
  
        /* These are the merged vectors */
@@@ -77,6 -79,7 +79,7 @@@
  
        /* gpa pointers to the real vectors */
        u64 vmcb_msrpm;
+       u64 vmcb_iopm;
  
        /* A VMEXIT is required but not yet emulated */
        bool exit_required;
@@@ -91,6 -94,9 +94,9 @@@
  
  };
  
+ #define MSRPM_OFFSETS 16
+ static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
  struct vcpu_svm {
        struct kvm_vcpu vcpu;
        struct vmcb *vmcb;
        struct nested_state nested;
  
        bool nmi_singlestep;
+       unsigned int3_injected;
+       unsigned long int3_rip;
+ };
+ #define MSR_INVALID                   0xffffffffU
+ static struct svm_direct_access_msrs {
+       u32 index;   /* Index of the MSR */
+       bool always; /* True if intercept is always on */
+ } direct_access_msrs[] = {
+       { .index = MSR_K6_STAR,                         .always = true  },
+       { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
+ #ifdef CONFIG_X86_64
+       { .index = MSR_GS_BASE,                         .always = true  },
+       { .index = MSR_FS_BASE,                         .always = true  },
+       { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
+       { .index = MSR_LSTAR,                           .always = true  },
+       { .index = MSR_CSTAR,                           .always = true  },
+       { .index = MSR_SYSCALL_MASK,                    .always = true  },
+ #endif
+       { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
+       { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
+       { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
+       { .index = MSR_IA32_LASTINTTOIP,                .always = false },
+       { .index = MSR_INVALID,                         .always = false },
  };
  
  /* enable NPT for AMD64 and X86 with PAE */
  #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  static bool npt_enabled = true;
  #else
- static bool npt_enabled = false;
+ static bool npt_enabled;
  #endif
  static int npt = 1;
  
@@@ -129,6 -161,7 +161,7 @@@ static void svm_flush_tlb(struct kvm_vc
  static void svm_complete_interrupts(struct vcpu_svm *svm);
  
  static int nested_svm_exit_handled(struct vcpu_svm *svm);
+ static int nested_svm_intercept(struct vcpu_svm *svm);
  static int nested_svm_vmexit(struct vcpu_svm *svm);
  static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code);
@@@ -163,8 -196,8 +196,8 @@@ static unsigned long iopm_base
  struct kvm_ldttss_desc {
        u16 limit0;
        u16 base0;
-       unsigned base1 : 8, type : 5, dpl : 2, p : 1;
-       unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+       unsigned base1:8, type:5, dpl:2, p:1;
+       unsigned limit1:4, zero0:3, g:1, base2:8;
        u32 base3;
        u32 zero1;
  } __attribute__((packed));
@@@ -194,6 -227,27 +227,27 @@@ static u32 msrpm_ranges[] = {0, 0xc0000
  #define MSRS_RANGE_SIZE 2048
  #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
  
+ static u32 svm_msrpm_offset(u32 msr)
+ {
+       u32 offset;
+       int i;
+       for (i = 0; i < NUM_MSR_MAPS; i++) {
+               if (msr < msrpm_ranges[i] ||
+                   msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
+                       continue;
+               offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
+               offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
+               /* Now we have the u8 offset - but need the u32 offset */
+               return offset / 4;
+       }
+       /* MSR not in any range */
+       return MSR_INVALID;
+ }
  #define MAX_INST_SIZE 15
  
  static inline u32 svm_has(u32 feat)
@@@ -213,7 -267,7 +267,7 @@@ static inline void stgi(void
  
  static inline void invlpga(unsigned long addr, u32 asid)
  {
-       asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
+       asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
  }
  
  static inline void force_new_asid(struct kvm_vcpu *vcpu)
@@@ -235,23 -289,6 +289,6 @@@ static void svm_set_efer(struct kvm_vcp
        vcpu->arch.efer = efer;
  }
  
- static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
-                               bool has_error_code, u32 error_code)
- {
-       struct vcpu_svm *svm = to_svm(vcpu);
-       /* If we are within a nested VM we'd better #VMEXIT and let the
-          guest handle the exception */
-       if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
-               return;
-       svm->vmcb->control.event_inj = nr
-               | SVM_EVTINJ_VALID
-               | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
-               | SVM_EVTINJ_TYPE_EXEPT;
-       svm->vmcb->control.event_inj_err = error_code;
- }
  static int is_external_interrupt(u32 info)
  {
        info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
@@@ -264,7 -301,7 +301,7 @@@ static u32 svm_get_interrupt_shadow(str
        u32 ret = 0;
  
        if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
-               ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
+               ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
        return ret & mask;
  }
  
@@@ -283,6 -320,9 +320,9 @@@ static void skip_emulated_instruction(s
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
+       if (svm->vmcb->control.next_rip != 0)
+               svm->next_rip = svm->vmcb->control.next_rip;
        if (!svm->next_rip) {
                if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
                                EMULATE_DONE)
        svm_set_interrupt_shadow(vcpu, 0);
  }
  
+ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
+                               bool has_error_code, u32 error_code,
+                               bool reinject)
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
+       /*
+        * If we are within a nested VM we'd better #VMEXIT and let the guest
+        * handle the exception
+        */
+       if (!reinject &&
+           nested_svm_check_exception(svm, nr, has_error_code, error_code))
+               return;
+       if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
+               unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
+               /*
+                * For guest debugging where we have to reinject #BP if some
+                * INT3 is guest-owned:
+                * Emulate nRIP by moving RIP forward. Will fail if injection
+                * raises a fault that is not intercepted. Still better than
+                * failing in all cases.
+                */
+               skip_emulated_instruction(&svm->vcpu);
+               rip = kvm_rip_read(&svm->vcpu);
+               svm->int3_rip = rip + svm->vmcb->save.cs.base;
+               svm->int3_injected = rip - old_rip;
+       }
+       svm->vmcb->control.event_inj = nr
+               | SVM_EVTINJ_VALID
+               | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
+               | SVM_EVTINJ_TYPE_EXEPT;
+       svm->vmcb->control.event_inj_err = error_code;
+ }
  static int has_svm(void)
  {
        const char *msg;
@@@ -319,7 -396,7 +396,7 @@@ static int svm_hardware_enable(void *ga
  
        struct svm_cpu_data *sd;
        uint64_t efer;
-       struct descriptor_table gdt_descr;
+       struct desc_ptr gdt_descr;
        struct desc_struct *gdt;
        int me = raw_smp_processor_id();
  
        sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
        sd->next_asid = sd->max_asid + 1;
  
-       kvm_get_gdt(&gdt_descr);
-       gdt = (struct desc_struct *)gdt_descr.base;
+       native_store_gdt(&gdt_descr);
+       gdt = (struct desc_struct *)gdt_descr.address;
        sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
  
        wrmsrl(MSR_EFER, efer | EFER_SVME);
@@@ -391,42 -468,98 +468,98 @@@ err_1
  
  }
  
+ static bool valid_msr_intercept(u32 index)
+ {
+       int i;
+       for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
+               if (direct_access_msrs[i].index == index)
+                       return true;
+       return false;
+ }
  static void set_msr_interception(u32 *msrpm, unsigned msr,
                                 int read, int write)
  {
+       u8 bit_read, bit_write;
+       unsigned long tmp;
+       u32 offset;
+       /*
+        * If this warning triggers extend the direct_access_msrs list at the
+        * beginning of the file
+        */
+       WARN_ON(!valid_msr_intercept(msr));
+       offset    = svm_msrpm_offset(msr);
+       bit_read  = 2 * (msr & 0x0f);
+       bit_write = 2 * (msr & 0x0f) + 1;
+       tmp       = msrpm[offset];
+       BUG_ON(offset == MSR_INVALID);
+       read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
+       write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
+       msrpm[offset] = tmp;
+ }
+ static void svm_vcpu_init_msrpm(u32 *msrpm)
+ {
        int i;
  
-       for (i = 0; i < NUM_MSR_MAPS; i++) {
-               if (msr >= msrpm_ranges[i] &&
-                   msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
-                       u32 msr_offset = (i * MSRS_IN_RANGE + msr -
-                                         msrpm_ranges[i]) * 2;
-                       u32 *base = msrpm + (msr_offset / 32);
-                       u32 msr_shift = msr_offset % 32;
-                       u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
-                       *base = (*base & ~(0x3 << msr_shift)) |
-                               (mask << msr_shift);
+       memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+       for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
+               if (!direct_access_msrs[i].always)
+                       continue;
+               set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
+       }
+ }
+ static void add_msr_offset(u32 offset)
+ {
+       int i;
+       for (i = 0; i < MSRPM_OFFSETS; ++i) {
+               /* Offset already in list? */
+               if (msrpm_offsets[i] == offset)
                        return;
-               }
+               /* Slot used by another offset? */
+               if (msrpm_offsets[i] != MSR_INVALID)
+                       continue;
+               /* Add offset to list */
+               msrpm_offsets[i] = offset;
+               return;
        }
+       /*
+        * If this BUG triggers the msrpm_offsets table has an overflow. Just
+        * increase MSRPM_OFFSETS in this case.
+        */
        BUG();
  }
  
- static void svm_vcpu_init_msrpm(u32 *msrpm)
+ static void init_msrpm_offsets(void)
  {
-       memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+       int i;
  
- #ifdef CONFIG_X86_64
-       set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
-       set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
-       set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
-       set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
-       set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
-       set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
- #endif
-       set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
-       set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
+       memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
+       for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
+               u32 offset;
+               offset = svm_msrpm_offset(direct_access_msrs[i].index);
+               BUG_ON(offset == MSR_INVALID);
+               add_msr_offset(offset);
+       }
  }
  
  static void svm_enable_lbrv(struct vcpu_svm *svm)
@@@ -467,6 -600,8 +600,8 @@@ static __init int svm_hardware_setup(vo
        memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
        iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
  
+       init_msrpm_offsets();
        if (boot_cpu_has(X86_FEATURE_NX))
                kvm_enable_efer_bits(EFER_NX);
  
@@@ -523,7 -658,7 +658,7 @@@ static void init_seg(struct vmcb_seg *s
  {
        seg->selector = 0;
        seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
-               SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
+                     SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
        seg->limit = 0xffff;
        seg->base = 0;
  }
@@@ -543,16 -678,16 +678,16 @@@ static void init_vmcb(struct vcpu_svm *
  
        svm->vcpu.fpu_active = 1;
  
-       control->intercept_cr_read =    INTERCEPT_CR0_MASK |
+       control->intercept_cr_read =    INTERCEPT_CR0_MASK |
                                        INTERCEPT_CR3_MASK |
                                        INTERCEPT_CR4_MASK;
  
-       control->intercept_cr_write =   INTERCEPT_CR0_MASK |
+       control->intercept_cr_write =   INTERCEPT_CR0_MASK |
                                        INTERCEPT_CR3_MASK |
                                        INTERCEPT_CR4_MASK |
                                        INTERCEPT_CR8_MASK;
  
-       control->intercept_dr_read =    INTERCEPT_DR0_MASK |
+       control->intercept_dr_read =    INTERCEPT_DR0_MASK |
                                        INTERCEPT_DR1_MASK |
                                        INTERCEPT_DR2_MASK |
                                        INTERCEPT_DR3_MASK |
                                        INTERCEPT_DR6_MASK |
                                        INTERCEPT_DR7_MASK;
  
-       control->intercept_dr_write =   INTERCEPT_DR0_MASK |
+       control->intercept_dr_write =   INTERCEPT_DR0_MASK |
                                        INTERCEPT_DR1_MASK |
                                        INTERCEPT_DR2_MASK |
                                        INTERCEPT_DR3_MASK |
                                        (1 << MC_VECTOR);
  
  
-       control->intercept =    (1ULL << INTERCEPT_INTR) |
+       control->intercept =    (1ULL << INTERCEPT_INTR) |
                                (1ULL << INTERCEPT_NMI) |
                                (1ULL << INTERCEPT_SMI) |
                                (1ULL << INTERCEPT_SELECTIVE_CR0) |
        save->rip = 0x0000fff0;
        svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
  
-       /* This is the guest-visible cr0 value.
+       /*
+        * This is the guest-visible cr0 value.
         * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
         */
        svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
@@@ -729,6 -865,7 +865,7 @@@ static struct kvm_vcpu *svm_create_vcpu
        svm_vcpu_init_msrpm(svm->msrpm);
  
        svm->nested.msrpm = page_address(nested_msrpm_pages);
+       svm_vcpu_init_msrpm(svm->nested.msrpm);
  
        svm->vmcb = page_address(page);
        clear_page(svm->vmcb);
@@@ -882,7 -1019,8 +1019,8 @@@ static void svm_get_segment(struct kvm_
        var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
        var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
  
-       /* AMD's VMCB does not have an explicit unusable field, so emulate it
+       /*
+        * AMD's VMCB does not have an explicit unusable field, so emulate it
         * for cross vendor migration purposes by "not present"
         */
        var->unusable = !var->present || (var->type == 0);
                        var->type |= 0x1;
                break;
        case VCPU_SREG_SS:
-               /* On AMD CPUs sometimes the DB bit in the segment
+               /*
+                * On AMD CPUs sometimes the DB bit in the segment
                 * descriptor is left as 1, although the whole segment has
                 * been made unusable. Clear it here to pass an Intel VMX
                 * entry check when cross vendor migrating.
@@@ -936,36 -1075,36 +1075,36 @@@ static int svm_get_cpl(struct kvm_vcpu 
        return save->cpl;
  }
  
- static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+ static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
-       dt->limit = svm->vmcb->save.idtr.limit;
-       dt->base = svm->vmcb->save.idtr.base;
+       dt->size = svm->vmcb->save.idtr.limit;
+       dt->address = svm->vmcb->save.idtr.base;
  }
  
- static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+ static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
-       svm->vmcb->save.idtr.limit = dt->limit;
-       svm->vmcb->save.idtr.base = dt->base ;
+       svm->vmcb->save.idtr.limit = dt->size;
+       svm->vmcb->save.idtr.base = dt->address ;
  }
  
- static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+ static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
-       dt->limit = svm->vmcb->save.gdtr.limit;
-       dt->base = svm->vmcb->save.gdtr.base;
+       dt->size = svm->vmcb->save.gdtr.limit;
+       dt->address = svm->vmcb->save.gdtr.base;
  }
  
- static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
-       svm->vmcb->save.gdtr.limit = dt->limit;
-       svm->vmcb->save.gdtr.base = dt->base ;
+       svm->vmcb->save.gdtr.limit = dt->size;
+       svm->vmcb->save.gdtr.base = dt->address ;
  }
  
  static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
@@@ -978,6 -1117,7 +1117,7 @@@ static void svm_decache_cr4_guest_bits(
  
  static void update_cr0_intercept(struct vcpu_svm *svm)
  {
+       struct vmcb *vmcb = svm->vmcb;
        ulong gcr0 = svm->vcpu.arch.cr0;
        u64 *hcr0 = &svm->vmcb->save.cr0;
  
  
  
        if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
-               svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
-               svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+               vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
+               vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+               if (is_nested(svm)) {
+                       struct vmcb *hsave = svm->nested.hsave;
+                       hsave->control.intercept_cr_read  &= ~INTERCEPT_CR0_MASK;
+                       hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+                       vmcb->control.intercept_cr_read  |= svm->nested.intercept_cr_read;
+                       vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
+               }
        } else {
                svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
                svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
+               if (is_nested(svm)) {
+                       struct vmcb *hsave = svm->nested.hsave;
+                       hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
+                       hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
+               }
        }
  }
  
@@@ -1001,6 -1155,27 +1155,27 @@@ static void svm_set_cr0(struct kvm_vcp
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
+       if (is_nested(svm)) {
+               /*
+                * We are here because we run in nested mode, the host kvm
+                * intercepts cr0 writes but the l1 hypervisor does not.
+                * But the L1 hypervisor may intercept selective cr0 writes.
+                * This needs to be checked here.
+                */
+               unsigned long old, new;
+               /* Remove bits that would trigger a real cr0 write intercept */
+               old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
+               new = cr0 & SVM_CR0_SELECTIVE_MASK;
+               if (old == new) {
+                       /* cr0 write with ts and mp unchanged */
+                       svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
+                       if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
+                               return;
+               }
+       }
  #ifdef CONFIG_X86_64
        if (vcpu->arch.efer & EFER_LME) {
                if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
@@@ -1134,70 -1309,11 +1309,11 @@@ static void new_asid(struct vcpu_svm *s
        svm->vmcb->control.asid = sd->next_asid++;
  }
  
- static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
+ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
-       switch (dr) {
-       case 0 ... 3:
-               *dest = vcpu->arch.db[dr];
-               break;
-       case 4:
-               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
-                       return EMULATE_FAIL; /* will re-inject UD */
-               /* fall through */
-       case 6:
-               if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
-                       *dest = vcpu->arch.dr6;
-               else
-                       *dest = svm->vmcb->save.dr6;
-               break;
-       case 5:
-               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
-                       return EMULATE_FAIL; /* will re-inject UD */
-               /* fall through */
-       case 7:
-               if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
-                       *dest = vcpu->arch.dr7;
-               else
-                       *dest = svm->vmcb->save.dr7;
-               break;
-       }
-       return EMULATE_DONE;
- }
- static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
- {
-       struct vcpu_svm *svm = to_svm(vcpu);
-       switch (dr) {
-       case 0 ... 3:
-               vcpu->arch.db[dr] = value;
-               if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
-                       vcpu->arch.eff_db[dr] = value;
-               break;
-       case 4:
-               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
-                       return EMULATE_FAIL; /* will re-inject UD */
-               /* fall through */
-       case 6:
-               vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
-               break;
-       case 5:
-               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
-                       return EMULATE_FAIL; /* will re-inject UD */
-               /* fall through */
-       case 7:
-               vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
-               if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
-                       svm->vmcb->save.dr7 = vcpu->arch.dr7;
-                       vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
-               }
-               break;
-       }
-       return EMULATE_DONE;
+       svm->vmcb->save.dr7 = value;
  }
  
  static int pf_interception(struct vcpu_svm *svm)
@@@ -1234,7 -1350,7 +1350,7 @@@ static int db_interception(struct vcpu_
        }
  
        if (svm->vcpu.guest_debug &
-           (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){
+           (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
                kvm_run->exit_reason = KVM_EXIT_DEBUG;
                kvm_run->debug.arch.pc =
                        svm->vmcb->save.cs.base + svm->vmcb->save.rip;
@@@ -1268,7 -1384,22 +1384,22 @@@ static int ud_interception(struct vcpu_
  static void svm_fpu_activate(struct kvm_vcpu *vcpu)
  {
        struct vcpu_svm *svm = to_svm(vcpu);
-       svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
+       u32 excp;
+       if (is_nested(svm)) {
+               u32 h_excp, n_excp;
+               h_excp  = svm->nested.hsave->control.intercept_exceptions;
+               n_excp  = svm->nested.intercept_exceptions;
+               h_excp &= ~(1 << NM_VECTOR);
+               excp    = h_excp | n_excp;
+       } else {
+               excp  = svm->vmcb->control.intercept_exceptions;
+               excp &= ~(1 << NM_VECTOR);
+       }
+       svm->vmcb->control.intercept_exceptions = excp;
        svm->vcpu.fpu_active = 1;
        update_cr0_intercept(svm);
  }
@@@ -1309,29 -1440,23 +1440,23 @@@ static int shutdown_interception(struc
  
  static int io_interception(struct vcpu_svm *svm)
  {
+       struct kvm_vcpu *vcpu = &svm->vcpu;
        u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
        int size, in, string;
        unsigned port;
  
        ++svm->vcpu.stat.io_exits;
-       svm->next_rip = svm->vmcb->control.exit_info_2;
        string = (io_info & SVM_IOIO_STR_MASK) != 0;
-       if (string) {
-               if (emulate_instruction(&svm->vcpu,
-                                       0, 0, 0) == EMULATE_DO_MMIO)
-                       return 0;
-               return 1;
-       }
        in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
+       if (string || in)
+               return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
        port = io_info >> 16;
        size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
+       svm->next_rip = svm->vmcb->control.exit_info_2;
        skip_emulated_instruction(&svm->vcpu);
-       return kvm_emulate_pio(&svm->vcpu, in, size, port);
+       return kvm_fast_pio_out(vcpu, size, port);
  }
  
  static int nmi_interception(struct vcpu_svm *svm)
@@@ -1384,6 -1509,8 +1509,8 @@@ static int nested_svm_check_permissions
  static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code)
  {
+       int vmexit;
        if (!is_nested(svm))
                return 0;
  
        svm->vmcb->control.exit_info_1 = error_code;
        svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
  
-       return nested_svm_exit_handled(svm);
+       vmexit = nested_svm_intercept(svm);
+       if (vmexit == NESTED_EXIT_DONE)
+               svm->nested.exit_required = true;
+       return vmexit;
  }
  
- static inline int nested_svm_intr(struct vcpu_svm *svm)
+ /* This function returns true if it is save to enable the irq window */
+ static inline bool nested_svm_intr(struct vcpu_svm *svm)
  {
        if (!is_nested(svm))
-               return 0;
+               return true;
  
        if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
-               return 0;
+               return true;
  
        if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
-               return 0;
+               return false;
  
-       svm->vmcb->control.exit_code = SVM_EXIT_INTR;
+       svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
+       svm->vmcb->control.exit_info_1 = 0;
+       svm->vmcb->control.exit_info_2 = 0;
  
        if (svm->nested.intercept & 1ULL) {
                /*
                 */
                svm->nested.exit_required = true;
                trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
-               return 1;
+               return false;
        }
  
-       return 0;
+       return true;
+ }
+ /* This function returns true if it is save to enable the nmi window */
+ static inline bool nested_svm_nmi(struct vcpu_svm *svm)
+ {
+       if (!is_nested(svm))
+               return true;
+       if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
+               return true;
+       svm->vmcb->control.exit_code = SVM_EXIT_NMI;
+       svm->nested.exit_required = true;
+       return false;
  }
  
- static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
+ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
  {
        struct page *page;
  
+       might_sleep();
        page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
        if (is_error_page(page))
                goto error;
  
-       return kmap_atomic(page, idx);
+       *_page = page;
+       return kmap(page);
  
  error:
        kvm_release_page_clean(page);
        return NULL;
  }
  
- static void nested_svm_unmap(void *addr, enum km_type idx)
+ static void nested_svm_unmap(struct page *page)
  {
-       struct page *page;
+       kunmap(page);
+       kvm_release_page_dirty(page);
+ }
  
-       if (!addr)
-               return;
+ static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
+ {
+       unsigned port;
+       u8 val, bit;
+       u64 gpa;
  
-       page = kmap_atomic_to_page(addr);
+       if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
+               return NESTED_EXIT_HOST;
  
-       kunmap_atomic(addr, idx);
-       kvm_release_page_dirty(page);
+       port = svm->vmcb->control.exit_info_1 >> 16;
+       gpa  = svm->nested.vmcb_iopm + (port / 8);
+       bit  = port % 8;
+       val  = 0;
+       if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
+               val &= (1 << bit);
+       return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
  }
  
- static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
+ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
  {
-       u32 param = svm->vmcb->control.exit_info_1 & 1;
-       u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
-       bool ret = false;
-       u32 t0, t1;
-       u8 *msrpm;
+       u32 offset, msr, value;
+       int write, mask;
  
        if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
-               return false;
+               return NESTED_EXIT_HOST;
  
-       msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
+       msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
+       offset = svm_msrpm_offset(msr);
+       write  = svm->vmcb->control.exit_info_1 & 1;
+       mask   = 1 << ((2 * (msr & 0xf)) + write);
  
-       if (!msrpm)
-               goto out;
+       if (offset == MSR_INVALID)
+               return NESTED_EXIT_DONE;
  
-       switch (msr) {
-       case 0 ... 0x1fff:
-               t0 = (msr * 2) % 8;
-               t1 = msr / 8;
-               break;
-       case 0xc0000000 ... 0xc0001fff:
-               t0 = (8192 + msr - 0xc0000000) * 2;
-               t1 = (t0 / 8);
-               t0 %= 8;
-               break;
-       case 0xc0010000 ... 0xc0011fff:
-               t0 = (16384 + msr - 0xc0010000) * 2;
-               t1 = (t0 / 8);
-               t0 %= 8;
-               break;
-       default:
-               ret = true;
-               goto out;
-       }
+       /* Offset is in 32 bit units but need in 8 bit units */
+       offset *= 4;
  
-       ret = msrpm[t1] & ((1 << param) << t0);
- out:
-       nested_svm_unmap(msrpm, KM_USER0);
+       if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
+               return NESTED_EXIT_DONE;
  
-       return ret;
+       return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
  }
  
  static int nested_svm_exit_special(struct vcpu_svm *svm)
        switch (exit_code) {
        case SVM_EXIT_INTR:
        case SVM_EXIT_NMI:
+       case SVM_EXIT_EXCP_BASE + MC_VECTOR:
                return NESTED_EXIT_HOST;
-               /* For now we are always handling NPFs when using them */
        case SVM_EXIT_NPF:
+               /* For now we are always handling NPFs when using them */
                if (npt_enabled)
                        return NESTED_EXIT_HOST;
                break;
-       /* When we're shadowing, trap PFs */
        case SVM_EXIT_EXCP_BASE + PF_VECTOR:
+               /* When we're shadowing, trap PFs */
                if (!npt_enabled)
                        return NESTED_EXIT_HOST;
                break;
+       case SVM_EXIT_EXCP_BASE + NM_VECTOR:
+               nm_interception(svm);
+               break;
        default:
                break;
        }
  /*
   * If this function returns true, this #vmexit was already handled
   */
- static int nested_svm_exit_handled(struct vcpu_svm *svm)
+ static int nested_svm_intercept(struct vcpu_svm *svm)
  {
        u32 exit_code = svm->vmcb->control.exit_code;
        int vmexit = NESTED_EXIT_HOST;
        case SVM_EXIT_MSR:
                vmexit = nested_svm_exit_handled_msr(svm);
                break;
+       case SVM_EXIT_IOIO:
+               vmexit = nested_svm_intercept_ioio(svm);
+               break;
        case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
                u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
                if (svm->nested.intercept_cr_read & cr_bits)
                        vmexit = NESTED_EXIT_DONE;
                break;
        }
+       case SVM_EXIT_ERR: {
+               vmexit = NESTED_EXIT_DONE;
+               break;
+       }
        default: {
                u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
                if (svm->nested.intercept & exit_bits)
        }
        }
  
-       if (vmexit == NESTED_EXIT_DONE) {
+       return vmexit;
+ }
+ static int nested_svm_exit_handled(struct vcpu_svm *svm)
+ {
+       int vmexit;
+       vmexit = nested_svm_intercept(svm);
+       if (vmexit == NESTED_EXIT_DONE)
                nested_svm_vmexit(svm);
-       }
  
        return vmexit;
  }
@@@ -1615,6 -1781,7 +1781,7 @@@ static int nested_svm_vmexit(struct vcp
        struct vmcb *nested_vmcb;
        struct vmcb *hsave = svm->nested.hsave;
        struct vmcb *vmcb = svm->vmcb;
+       struct page *page;
  
        trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
                                       vmcb->control.exit_info_1,
                                       vmcb->control.exit_int_info,
                                       vmcb->control.exit_int_info_err);
  
-       nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
+       nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
        if (!nested_vmcb)
                return 1;
  
+       /* Exit nested SVM mode */
+       svm->nested.vmcb = 0;
        /* Give the current vmcb to the guest */
        disable_gif(svm);
  
        nested_vmcb->save.ds     = vmcb->save.ds;
        nested_vmcb->save.gdtr   = vmcb->save.gdtr;
        nested_vmcb->save.idtr   = vmcb->save.idtr;
-       if (npt_enabled)
-               nested_vmcb->save.cr3    = vmcb->save.cr3;
+       nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
+       nested_vmcb->save.cr3    = svm->vcpu.arch.cr3;
        nested_vmcb->save.cr2    = vmcb->save.cr2;
+       nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
        nested_vmcb->save.rflags = vmcb->save.rflags;
        nested_vmcb->save.rip    = vmcb->save.rip;
        nested_vmcb->save.rsp    = vmcb->save.rsp;
        svm->vmcb->save.cpl = 0;
        svm->vmcb->control.exit_int_info = 0;
  
-       /* Exit nested SVM mode */
-       svm->nested.vmcb = 0;
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
  
        kvm_mmu_reset_context(&svm->vcpu);
        kvm_mmu_load(&svm->vcpu);
  
  static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
  {
-       u32 *nested_msrpm;
+       /*
+        * This function merges the msr permission bitmaps of kvm and the
+        * nested vmcb. It is omptimized in that it only merges the parts where
+        * the kvm msr permission bitmap may contain zero bits
+        */
        int i;
  
-       nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
-       if (!nested_msrpm)
-               return false;
+       if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
+               return true;
  
-       for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
-               svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
+       for (i = 0; i < MSRPM_OFFSETS; i++) {
+               u32 value, p;
+               u64 offset;
  
-       svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
+               if (msrpm_offsets[i] == 0xffffffff)
+                       break;
+               p      = msrpm_offsets[i];
+               offset = svm->nested.vmcb_msrpm + (p * 4);
+               if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
+                       return false;
+               svm->nested.msrpm[p] = svm->msrpm[p] | value;
+       }
  
-       nested_svm_unmap(nested_msrpm, KM_USER0);
+       svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
  
        return true;
  }
@@@ -1744,26 -1926,34 +1926,34 @@@ static bool nested_svm_vmrun(struct vcp
        struct vmcb *nested_vmcb;
        struct vmcb *hsave = svm->nested.hsave;
        struct vmcb *vmcb = svm->vmcb;
+       struct page *page;
+       u64 vmcb_gpa;
  
-       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+       vmcb_gpa = svm->vmcb->save.rax;
+       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
        if (!nested_vmcb)
                return false;
  
-       /* nested_vmcb is our indicator if nested SVM is activated */
-       svm->nested.vmcb = svm->vmcb->save.rax;
-       trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, svm->nested.vmcb,
+       trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, vmcb_gpa,
                               nested_vmcb->save.rip,
                               nested_vmcb->control.int_ctl,
                               nested_vmcb->control.event_inj,
                               nested_vmcb->control.nested_ctl);
  
+       trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read,
+                                   nested_vmcb->control.intercept_cr_write,
+                                   nested_vmcb->control.intercept_exceptions,
+                                   nested_vmcb->control.intercept);
        /* Clear internal status */
        kvm_clear_exception_queue(&svm->vcpu);
        kvm_clear_interrupt_queue(&svm->vcpu);
  
-       /* Save the old vmcb, so we don't need to pick what we save, but
-          can restore everything when a VMEXIT occurs */
+       /*
+        * Save the old vmcb, so we don't need to pick what we save, but can
+        * restore everything when a VMEXIT occurs
+        */
        hsave->save.es     = vmcb->save.es;
        hsave->save.cs     = vmcb->save.cs;
        hsave->save.ss     = vmcb->save.ss;
        if (npt_enabled) {
                svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
                svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
-       } else {
+       } else
                kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
-               kvm_mmu_reset_context(&svm->vcpu);
-       }
+       /* Guest paging mode is active - reset mmu */
+       kvm_mmu_reset_context(&svm->vcpu);
        svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
        kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
        kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
        kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
        /* In case we don't even reach vcpu_run, the fields are not updated */
        svm->vmcb->save.rax = nested_vmcb->save.rax;
        svm->vmcb->save.rsp = nested_vmcb->save.rsp;
        svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
        svm->vmcb->save.cpl = nested_vmcb->save.cpl;
  
-       /* We don't want a nested guest to be more powerful than the guest,
-          so all intercepts are ORed */
-       svm->vmcb->control.intercept_cr_read |=
-               nested_vmcb->control.intercept_cr_read;
-       svm->vmcb->control.intercept_cr_write |=
-               nested_vmcb->control.intercept_cr_write;
-       svm->vmcb->control.intercept_dr_read |=
-               nested_vmcb->control.intercept_dr_read;
-       svm->vmcb->control.intercept_dr_write |=
-               nested_vmcb->control.intercept_dr_write;
-       svm->vmcb->control.intercept_exceptions |=
-               nested_vmcb->control.intercept_exceptions;
-       svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
-       svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
+       svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
+       svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
  
        /* cache intercepts */
        svm->nested.intercept_cr_read    = nested_vmcb->control.intercept_cr_read;
        else
                svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
  
+       if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
+               /* We only want the cr8 intercept bits of the guest */
+               svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
+               svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+       }
+       /* We don't want to see VMMCALLs from a nested guest */
+       svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
+       /*
+        * We don't want a nested guest to be more powerful than the guest, so
+        * all intercepts are ORed
+        */
+       svm->vmcb->control.intercept_cr_read |=
+               nested_vmcb->control.intercept_cr_read;
+       svm->vmcb->control.intercept_cr_write |=
+               nested_vmcb->control.intercept_cr_write;
+       svm->vmcb->control.intercept_dr_read |=
+               nested_vmcb->control.intercept_dr_read;
+       svm->vmcb->control.intercept_dr_write |=
+               nested_vmcb->control.intercept_dr_write;
+       svm->vmcb->control.intercept_exceptions |=
+               nested_vmcb->control.intercept_exceptions;
+       svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
+       svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
        svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
        svm->vmcb->control.int_state = nested_vmcb->control.int_state;
        svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
        svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
        svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
  
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
+       /* nested_vmcb is our indicator if nested SVM is activated */
+       svm->nested.vmcb = vmcb_gpa;
  
        enable_gif(svm);
  
@@@ -1883,6 -2092,7 +2092,7 @@@ static void nested_svm_vmloadsave(struc
  static int vmload_interception(struct vcpu_svm *svm)
  {
        struct vmcb *nested_vmcb;
+       struct page *page;
  
        if (nested_svm_check_permissions(svm))
                return 1;
        svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
        skip_emulated_instruction(&svm->vcpu);
  
-       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
        if (!nested_vmcb)
                return 1;
  
        nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
  
        return 1;
  }
  static int vmsave_interception(struct vcpu_svm *svm)
  {
        struct vmcb *nested_vmcb;
+       struct page *page;
  
        if (nested_svm_check_permissions(svm))
                return 1;
        svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
        skip_emulated_instruction(&svm->vcpu);
  
-       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
        if (!nested_vmcb)
                return 1;
  
        nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
  
        return 1;
  }
@@@ -2018,6 -2229,8 +2229,8 @@@ static int task_switch_interception(str
                svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
        uint32_t idt_v =
                svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
+       bool has_error_code = false;
+       u32 error_code = 0;
  
        tss_selector = (u16)svm->vmcb->control.exit_info_1;
  
                        svm->vcpu.arch.nmi_injected = false;
                        break;
                case SVM_EXITINTINFO_TYPE_EXEPT:
+                       if (svm->vmcb->control.exit_info_2 &
+                           (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
+                               has_error_code = true;
+                               error_code =
+                                       (u32)svm->vmcb->control.exit_info_2;
+                       }
                        kvm_clear_exception_queue(&svm->vcpu);
                        break;
                case SVM_EXITINTINFO_TYPE_INTR:
             (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
                skip_emulated_instruction(&svm->vcpu);
  
-       return kvm_task_switch(&svm->vcpu, tss_selector, reason);
+       if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
+                               has_error_code, error_code) == EMULATE_FAIL) {
+               svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+               svm->vcpu.run->internal.ndata = 0;
+               return 0;
+       }
+       return 1;
  }
  
  static int cpuid_interception(struct vcpu_svm *svm)
  static int iret_interception(struct vcpu_svm *svm)
  {
        ++svm->vcpu.stat.nmi_window_exits;
 -      svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
 +      svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
        svm->vcpu.arch.hflags |= HF_IRET_MASK;
        return 1;
  }
@@@ -2145,9 -2371,11 +2371,11 @@@ static int svm_get_msr(struct kvm_vcpu 
        case MSR_IA32_SYSENTER_ESP:
                *data = svm->sysenter_esp;
                break;
-       /* Nobody will change the following 5 values in the VMCB so
-          we can safely return them on rdmsr. They will always be 0
-          until LBRV is implemented. */
+       /*
+        * Nobody will change the following 5 values in the VMCB so we can
+        * safely return them on rdmsr. They will always be 0 until LBRV is
+        * implemented.
+        */
        case MSR_IA32_DEBUGCTLMSR:
                *data = svm->vmcb->save.dbgctl;
                break;
                *data = svm->nested.hsave_msr;
                break;
        case MSR_VM_CR:
-               *data = 0;
+               *data = svm->nested.vm_cr_msr;
                break;
        case MSR_IA32_UCODE_REV:
                *data = 0x01000065;
@@@ -2197,6 -2425,31 +2425,31 @@@ static int rdmsr_interception(struct vc
        return 1;
  }
  
+ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
+       int svm_dis, chg_mask;
+       if (data & ~SVM_VM_CR_VALID_MASK)
+               return 1;
+       chg_mask = SVM_VM_CR_VALID_MASK;
+       if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
+               chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
+       svm->nested.vm_cr_msr &= ~chg_mask;
+       svm->nested.vm_cr_msr |= (data & chg_mask);
+       svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
+       /* check for svm_disable while efer.svme is set */
+       if (svm_dis && (vcpu->arch.efer & EFER_SVME))
+               return 1;
+       return 0;
+ }
  static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
  {
        struct vcpu_svm *svm = to_svm(vcpu);
                svm->nested.hsave_msr = data;
                break;
        case MSR_VM_CR:
+               return svm_set_vm_cr(vcpu, data);
        case MSR_VM_IGNNE:
                pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
                break;
@@@ -2326,16 -2580,16 +2580,16 @@@ static int pause_interception(struct vc
  }
  
  static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
-       [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
-       [SVM_EXIT_READ_CR3]                     = emulate_on_interception,
-       [SVM_EXIT_READ_CR4]                     = emulate_on_interception,
-       [SVM_EXIT_READ_CR8]                     = emulate_on_interception,
+       [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
+       [SVM_EXIT_READ_CR3]                     = emulate_on_interception,
+       [SVM_EXIT_READ_CR4]                     = emulate_on_interception,
+       [SVM_EXIT_READ_CR8]                     = emulate_on_interception,
        [SVM_EXIT_CR0_SEL_WRITE]                = emulate_on_interception,
-       [SVM_EXIT_WRITE_CR0]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_CR3]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_CR4]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
-       [SVM_EXIT_READ_DR0]                     = emulate_on_interception,
+       [SVM_EXIT_WRITE_CR0]                    = emulate_on_interception,
+       [SVM_EXIT_WRITE_CR3]                    = emulate_on_interception,
+       [SVM_EXIT_WRITE_CR4]                    = emulate_on_interception,
+       [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
+       [SVM_EXIT_READ_DR0]                     = emulate_on_interception,
        [SVM_EXIT_READ_DR1]                     = emulate_on_interception,
        [SVM_EXIT_READ_DR2]                     = emulate_on_interception,
        [SVM_EXIT_READ_DR3]                     = emulate_on_interception,
        [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
        [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
        [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
-       [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
-       [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
-       [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
-       [SVM_EXIT_INTR]                         = intr_interception,
+       [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
+       [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
+       [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
+       [SVM_EXIT_INTR]                         = intr_interception,
        [SVM_EXIT_NMI]                          = nmi_interception,
        [SVM_EXIT_SMI]                          = nop_on_interception,
        [SVM_EXIT_INIT]                         = nop_on_interception,
        [SVM_EXIT_VINTR]                        = interrupt_window_interception,
-       /* [SVM_EXIT_CR0_SEL_WRITE]             = emulate_on_interception, */
        [SVM_EXIT_CPUID]                        = cpuid_interception,
        [SVM_EXIT_IRET]                         = iret_interception,
        [SVM_EXIT_INVD]                         = emulate_on_interception,
        [SVM_EXIT_HLT]                          = halt_interception,
        [SVM_EXIT_INVLPG]                       = invlpg_interception,
        [SVM_EXIT_INVLPGA]                      = invlpga_interception,
-       [SVM_EXIT_IOIO]                         = io_interception,
+       [SVM_EXIT_IOIO]                         = io_interception,
        [SVM_EXIT_MSR]                          = msr_interception,
        [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
        [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
@@@ -2393,7 -2646,12 +2646,12 @@@ static int handle_exit(struct kvm_vcpu 
        struct kvm_run *kvm_run = vcpu->run;
        u32 exit_code = svm->vmcb->control.exit_code;
  
-       trace_kvm_exit(exit_code, svm->vmcb->save.rip);
+       trace_kvm_exit(exit_code, vcpu);
+       if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
+               vcpu->arch.cr0 = svm->vmcb->save.cr0;
+       if (npt_enabled)
+               vcpu->arch.cr3 = svm->vmcb->save.cr3;
  
        if (unlikely(svm->nested.exit_required)) {
                nested_svm_vmexit(svm);
  
        svm_complete_interrupts(svm);
  
-       if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
-               vcpu->arch.cr0 = svm->vmcb->save.cr0;
-       if (npt_enabled)
-               vcpu->arch.cr3 = svm->vmcb->save.cr3;
        if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
                kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
                kvm_run->fail_entry.hardware_entry_failure_reason
@@@ -2479,7 -2732,7 +2732,7 @@@ static void svm_inject_nmi(struct kvm_v
  
        svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
        vcpu->arch.hflags |= HF_NMI_MASK;
 -      svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
 +      svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
        ++vcpu->stat.nmi_injections;
  }
  
@@@ -2511,6 -2764,9 +2764,9 @@@ static void update_cr8_intercept(struc
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
+       if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+               return;
        if (irr == -1)
                return;
  
@@@ -2522,8 -2778,12 +2778,12 @@@ static int svm_nmi_allowed(struct kvm_v
  {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct vmcb *vmcb = svm->vmcb;
-       return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
-               !(svm->vcpu.arch.hflags & HF_NMI_MASK);
+       int ret;
+       ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
+             !(svm->vcpu.arch.hflags & HF_NMI_MASK);
+       ret = ret && gif_set(svm) && nested_svm_nmi(svm);
+       return ret;
  }
  
  static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
@@@ -2539,10 -2799,10 +2799,10 @@@ static void svm_set_nmi_mask(struct kvm
  
        if (masked) {
                svm->vcpu.arch.hflags |= HF_NMI_MASK;
 -              svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
 +              svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
        } else {
                svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
 -              svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
 +              svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
        }
  }
  
@@@ -2568,13 -2828,13 +2828,13 @@@ static void enable_irq_window(struct kv
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
-       nested_svm_intr(svm);
-       /* In case GIF=0 we can't rely on the CPU to tell us when
-        * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
-        * The next time we get that intercept, this function will be
-        * called again though and we'll get the vintr intercept. */
-       if (gif_set(svm)) {
+       /*
+        * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
+        * 1, because that's a separate STGI/VMRUN intercept.  The next time we
+        * get that intercept, this function will be called again though and
+        * we'll get the vintr intercept.
+        */
+       if (gif_set(svm) && nested_svm_intr(svm)) {
                svm_set_vintr(svm);
                svm_inject_irq(svm, 0x0);
        }
@@@ -2588,9 -2848,10 +2848,10 @@@ static void enable_nmi_window(struct kv
            == HF_NMI_MASK)
                return; /* IRET will cause a vm exit */
  
-       /* Something prevents NMI from been injected. Single step over
-          possible problem (IRET or exception injection or interrupt
-          shadow) */
+       /*
+        * Something prevents NMI from been injected. Single step over possible
+        * problem (IRET or exception injection or interrupt shadow)
+        */
        svm->nmi_singlestep = true;
        svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
        update_db_intercept(vcpu);
@@@ -2614,6 -2875,9 +2875,9 @@@ static inline void sync_cr8_to_lapic(st
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
+       if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+               return;
        if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
                int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
                kvm_set_cr8(vcpu, cr8);
@@@ -2625,6 -2889,9 +2889,9 @@@ static inline void sync_lapic_to_cr8(st
        struct vcpu_svm *svm = to_svm(vcpu);
        u64 cr8;
  
+       if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+               return;
        cr8 = kvm_get_cr8(vcpu);
        svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
        svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
@@@ -2635,6 -2902,9 +2902,9 @@@ static void svm_complete_interrupts(str
        u8 vector;
        int type;
        u32 exitintinfo = svm->vmcb->control.exit_int_info;
+       unsigned int3_injected = svm->int3_injected;
+       svm->int3_injected = 0;
  
        if (svm->vcpu.arch.hflags & HF_IRET_MASK)
                svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
                svm->vcpu.arch.nmi_injected = true;
                break;
        case SVM_EXITINTINFO_TYPE_EXEPT:
-               /* In case of software exception do not reinject an exception
-                  vector, but re-execute and instruction instead */
-               if (is_nested(svm))
-                       break;
-               if (kvm_exception_is_soft(vector))
+               /*
+                * In case of software exceptions, do not reinject the vector,
+                * but re-execute the instruction instead. Rewind RIP first
+                * if we emulated INT3 before.
+                */
+               if (kvm_exception_is_soft(vector)) {
+                       if (vector == BP_VECTOR && int3_injected &&
+                           kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
+                               kvm_rip_write(&svm->vcpu,
+                                             kvm_rip_read(&svm->vcpu) -
+                                             int3_injected);
                        break;
+               }
                if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
                        u32 err = svm->vmcb->control.exit_int_info_err;
-                       kvm_queue_exception_e(&svm->vcpu, vector, err);
+                       kvm_requeue_exception_e(&svm->vcpu, vector, err);
  
                } else
-                       kvm_queue_exception(&svm->vcpu, vector);
+                       kvm_requeue_exception(&svm->vcpu, vector);
                break;
        case SVM_EXITINTINFO_TYPE_INTR:
                kvm_queue_interrupt(&svm->vcpu, vector, false);
@@@ -2688,6 -2965,10 +2965,10 @@@ static void svm_vcpu_run(struct kvm_vcp
        u16 gs_selector;
        u16 ldt_selector;
  
+       svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+       svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+       svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
        /*
         * A vmexit emulation is required before the vcpu can be executed
         * again.
        if (unlikely(svm->nested.exit_required))
                return;
  
-       svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
-       svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
-       svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
        pre_svm_run(svm);
  
        sync_lapic_to_cr8(vcpu);
@@@ -2879,25 -3156,39 +3156,39 @@@ static void svm_cpuid_update(struct kvm
  {
  }
  
+ static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+ {
+       switch (func) {
+       case 0x8000000A:
+               entry->eax = 1; /* SVM revision 1 */
+               entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
+                                  ASID emulation to nested SVM */
+               entry->ecx = 0; /* Reserved */
+               entry->edx = 0; /* Do not support any additional features */
+               break;
+       }
+ }
  static const struct trace_print_flags svm_exit_reasons_str[] = {
-       { SVM_EXIT_READ_CR0,                    "read_cr0" },
-       { SVM_EXIT_READ_CR3,                    "read_cr3" },
-       { SVM_EXIT_READ_CR4,                    "read_cr4" },
-       { SVM_EXIT_READ_CR8,                    "read_cr8" },
-       { SVM_EXIT_WRITE_CR0,                   "write_cr0" },
-       { SVM_EXIT_WRITE_CR3,                   "write_cr3" },
-       { SVM_EXIT_WRITE_CR4,                   "write_cr4" },
-       { SVM_EXIT_WRITE_CR8,                   "write_cr8" },
-       { SVM_EXIT_READ_DR0,                    "read_dr0" },
-       { SVM_EXIT_READ_DR1,                    "read_dr1" },
-       { SVM_EXIT_READ_DR2,                    "read_dr2" },
-       { SVM_EXIT_READ_DR3,                    "read_dr3" },
-       { SVM_EXIT_WRITE_DR0,                   "write_dr0" },
-       { SVM_EXIT_WRITE_DR1,                   "write_dr1" },
-       { SVM_EXIT_WRITE_DR2,                   "write_dr2" },
-       { SVM_EXIT_WRITE_DR3,                   "write_dr3" },
-       { SVM_EXIT_WRITE_DR5,                   "write_dr5" },
-       { SVM_EXIT_WRITE_DR7,                   "write_dr7" },
+       { SVM_EXIT_READ_CR0,                    "read_cr0" },
+       { SVM_EXIT_READ_CR3,                    "read_cr3" },
+       { SVM_EXIT_READ_CR4,                    "read_cr4" },
+       { SVM_EXIT_READ_CR8,                    "read_cr8" },
+       { SVM_EXIT_WRITE_CR0,                   "write_cr0" },
+       { SVM_EXIT_WRITE_CR3,                   "write_cr3" },
+       { SVM_EXIT_WRITE_CR4,                   "write_cr4" },
+       { SVM_EXIT_WRITE_CR8,                   "write_cr8" },
+       { SVM_EXIT_READ_DR0,                    "read_dr0" },
+       { SVM_EXIT_READ_DR1,                    "read_dr1" },
+       { SVM_EXIT_READ_DR2,                    "read_dr2" },
+       { SVM_EXIT_READ_DR3,                    "read_dr3" },
+       { SVM_EXIT_WRITE_DR0,                   "write_dr0" },
+       { SVM_EXIT_WRITE_DR1,                   "write_dr1" },
+       { SVM_EXIT_WRITE_DR2,                   "write_dr2" },
+       { SVM_EXIT_WRITE_DR3,                   "write_dr3" },
+       { SVM_EXIT_WRITE_DR5,                   "write_dr5" },
+       { SVM_EXIT_WRITE_DR7,                   "write_dr7" },
        { SVM_EXIT_EXCP_BASE + DB_VECTOR,       "DB excp" },
        { SVM_EXIT_EXCP_BASE + BP_VECTOR,       "BP excp" },
        { SVM_EXIT_EXCP_BASE + UD_VECTOR,       "UD excp" },
@@@ -2946,8 -3237,10 +3237,10 @@@ static void svm_fpu_deactivate(struct k
  {
        struct vcpu_svm *svm = to_svm(vcpu);
  
-       update_cr0_intercept(svm);
        svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
+       if (is_nested(svm))
+               svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
+       update_cr0_intercept(svm);
  }
  
  static struct kvm_x86_ops svm_x86_ops = {
        .set_idt = svm_set_idt,
        .get_gdt = svm_get_gdt,
        .set_gdt = svm_set_gdt,
-       .get_dr = svm_get_dr,
-       .set_dr = svm_set_dr,
+       .set_dr7 = svm_set_dr7,
        .cache_reg = svm_cache_reg,
        .get_rflags = svm_get_rflags,
        .set_rflags = svm_set_rflags,
        .cpuid_update = svm_cpuid_update,
  
        .rdtscp_supported = svm_rdtscp_supported,
+       .set_supported_cpuid = svm_set_supported_cpuid,
  };
  
  static int __init svm_init(void)
  {
        return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
-                             THIS_MODULE);
+                       __alignof__(struct vcpu_svm), THIS_MODULE);
  }
  
  static void __exit svm_exit(void)
diff --combined arch/x86/kvm/vmx.c
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/moduleparam.h>
  #include <linux/ftrace_event.h>
  #include <linux/slab.h>
+ #include <linux/tboot.h>
  #include "kvm_cache_regs.h"
  #include "x86.h"
  
@@@ -98,6 -99,8 +99,8 @@@ module_param(ple_gap, int, S_IRUGO)
  static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
  module_param(ple_window, int, S_IRUGO);
  
+ #define NR_AUTOLOAD_MSRS 1
  struct vmcs {
        u32 revision_id;
        u32 abort;
@@@ -125,6 -128,11 +128,11 @@@ struct vcpu_vmx 
        u64                   msr_guest_kernel_gs_base;
  #endif
        struct vmcs          *vmcs;
+       struct msr_autoload {
+               unsigned nr;
+               struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
+               struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
+       } msr_autoload;
        struct {
                int           loaded;
                u16           fs_sel, gs_sel, ldt_sel;
@@@ -234,56 -242,56 +242,56 @@@ static const u32 vmx_msr_index[] = 
  };
  #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
  
- static inline int is_page_fault(u32 intr_info)
+ static inline bool is_page_fault(u32 intr_info)
  {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
  }
  
- static inline int is_no_device(u32 intr_info)
+ static inline bool is_no_device(u32 intr_info)
  {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
  }
  
- static inline int is_invalid_opcode(u32 intr_info)
+ static inline bool is_invalid_opcode(u32 intr_info)
  {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
  }
  
- static inline int is_external_interrupt(u32 intr_info)
+ static inline bool is_external_interrupt(u32 intr_info)
  {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
                == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
  }
  
- static inline int is_machine_check(u32 intr_info)
+ static inline bool is_machine_check(u32 intr_info)
  {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
  }
  
- static inline int cpu_has_vmx_msr_bitmap(void)
+ static inline bool cpu_has_vmx_msr_bitmap(void)
  {
        return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
  }
  
- static inline int cpu_has_vmx_tpr_shadow(void)
+ static inline bool cpu_has_vmx_tpr_shadow(void)
  {
        return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
  }
  
- static inline int vm_need_tpr_shadow(struct kvm *kvm)
+ static inline bool vm_need_tpr_shadow(struct kvm *kvm)
  {
        return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
  }
  
- static inline int cpu_has_secondary_exec_ctrls(void)
+ static inline bool cpu_has_secondary_exec_ctrls(void)
  {
        return vmcs_config.cpu_based_exec_ctrl &
                CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@@ -303,80 -311,80 +311,80 @@@ static inline bool cpu_has_vmx_flexprio
  
  static inline bool cpu_has_vmx_ept_execute_only(void)
  {
-       return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
+       return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
  }
  
  static inline bool cpu_has_vmx_eptp_uncacheable(void)
  {
-       return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
+       return vmx_capability.ept & VMX_EPTP_UC_BIT;
  }
  
  static inline bool cpu_has_vmx_eptp_writeback(void)
  {
-       return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
+       return vmx_capability.ept & VMX_EPTP_WB_BIT;
  }
  
  static inline bool cpu_has_vmx_ept_2m_page(void)
  {
-       return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
+       return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
  }
  
  static inline bool cpu_has_vmx_ept_1g_page(void)
  {
-       return !!(vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT);
+       return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
  }
  
- static inline int cpu_has_vmx_invept_individual_addr(void)
+ static inline bool cpu_has_vmx_invept_individual_addr(void)
  {
-       return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
+       return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
  }
  
- static inline int cpu_has_vmx_invept_context(void)
+ static inline bool cpu_has_vmx_invept_context(void)
  {
-       return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT);
+       return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
  }
  
- static inline int cpu_has_vmx_invept_global(void)
+ static inline bool cpu_has_vmx_invept_global(void)
  {
-       return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT);
+       return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
  }
  
- static inline int cpu_has_vmx_ept(void)
+ static inline bool cpu_has_vmx_ept(void)
  {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_ENABLE_EPT;
  }
  
- static inline int cpu_has_vmx_unrestricted_guest(void)
+ static inline bool cpu_has_vmx_unrestricted_guest(void)
  {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_UNRESTRICTED_GUEST;
  }
  
- static inline int cpu_has_vmx_ple(void)
+ static inline bool cpu_has_vmx_ple(void)
  {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_PAUSE_LOOP_EXITING;
  }
  
- static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
+ static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
  {
        return flexpriority_enabled && irqchip_in_kernel(kvm);
  }
  
- static inline int cpu_has_vmx_vpid(void)
+ static inline bool cpu_has_vmx_vpid(void)
  {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_ENABLE_VPID;
  }
  
- static inline int cpu_has_vmx_rdtscp(void)
+ static inline bool cpu_has_vmx_rdtscp(void)
  {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_RDTSCP;
  }
  
- static inline int cpu_has_virtual_nmis(void)
+ static inline bool cpu_has_virtual_nmis(void)
  {
        return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
  }
@@@ -595,16 -603,56 +603,56 @@@ static void update_exception_bitmap(str
        vmcs_write32(EXCEPTION_BITMAP, eb);
  }
  
+ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
+ {
+       unsigned i;
+       struct msr_autoload *m = &vmx->msr_autoload;
+       for (i = 0; i < m->nr; ++i)
+               if (m->guest[i].index == msr)
+                       break;
+       if (i == m->nr)
+               return;
+       --m->nr;
+       m->guest[i] = m->guest[m->nr];
+       m->host[i] = m->host[m->nr];
+       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+ }
+ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+                                 u64 guest_val, u64 host_val)
+ {
+       unsigned i;
+       struct msr_autoload *m = &vmx->msr_autoload;
+       for (i = 0; i < m->nr; ++i)
+               if (m->guest[i].index == msr)
+                       break;
+       if (i == m->nr) {
+               ++m->nr;
+               vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+               vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+       }
+       m->guest[i].index = msr;
+       m->guest[i].value = guest_val;
+       m->host[i].index = msr;
+       m->host[i].value = host_val;
+ }
  static void reload_tss(void)
  {
        /*
         * VT restores TR but not its size.  Useless.
         */
-       struct descriptor_table gdt;
+       struct desc_ptr gdt;
        struct desc_struct *descs;
  
-       kvm_get_gdt(&gdt);
-       descs = (void *)gdt.base;
+       native_store_gdt(&gdt);
+       descs = (void *)gdt.address;
        descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
        load_TR_desc();
  }
@@@ -631,9 -679,57 +679,57 @@@ static bool update_transition_efer(stru
        guest_efer |= host_efer & ignore_bits;
        vmx->guest_msrs[efer_offset].data = guest_efer;
        vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+       clear_atomic_switch_msr(vmx, MSR_EFER);
+       /* On ept, can't emulate nx, and must switch nx atomically */
+       if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
+               guest_efer = vmx->vcpu.arch.efer;
+               if (!(guest_efer & EFER_LMA))
+                       guest_efer &= ~EFER_LME;
+               add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
+               return false;
+       }
        return true;
  }
  
+ static unsigned long segment_base(u16 selector)
+ {
+       struct desc_ptr gdt;
+       struct desc_struct *d;
+       unsigned long table_base;
+       unsigned long v;
+       if (!(selector & ~3))
+               return 0;
+       native_store_gdt(&gdt);
+       table_base = gdt.address;
+       if (selector & 4) {           /* from ldt */
+               u16 ldt_selector = kvm_read_ldt();
+               if (!(ldt_selector & ~3))
+                       return 0;
+               table_base = segment_base(ldt_selector);
+       }
+       d = (struct desc_struct *)(table_base + (selector & ~7));
+       v = get_desc_base(d);
+ #ifdef CONFIG_X86_64
+        if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+                v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
+ #endif
+       return v;
+ }
+ static inline unsigned long kvm_read_tr_base(void)
+ {
+       u16 tr;
+       asm("str %0" : "=g"(tr));
+       return segment_base(tr);
+ }
  static void vmx_save_host_state(struct kvm_vcpu *vcpu)
  {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@@ -758,7 -854,7 +854,7 @@@ static void vmx_vcpu_load(struct kvm_vc
        }
  
        if (vcpu->cpu != cpu) {
-               struct descriptor_table dt;
+               struct desc_ptr dt;
                unsigned long sysenter_esp;
  
                vcpu->cpu = cpu;
                 * processors.
                 */
                vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
-               kvm_get_gdt(&dt);
-               vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
+               native_store_gdt(&dt);
+               vmcs_writel(HOST_GDTR_BASE, dt.address);   /* 22.2.4 */
  
                rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
                vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
@@@ -846,9 -942,9 +942,9 @@@ static u32 vmx_get_interrupt_shadow(str
        int ret = 0;
  
        if (interruptibility & GUEST_INTR_STATE_STI)
-               ret |= X86_SHADOW_INT_STI;
+               ret |= KVM_X86_SHADOW_INT_STI;
        if (interruptibility & GUEST_INTR_STATE_MOV_SS)
-               ret |= X86_SHADOW_INT_MOV_SS;
+               ret |= KVM_X86_SHADOW_INT_MOV_SS;
  
        return ret & mask;
  }
@@@ -860,9 -956,9 +956,9 @@@ static void vmx_set_interrupt_shadow(st
  
        interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
  
-       if (mask & X86_SHADOW_INT_MOV_SS)
+       if (mask & KVM_X86_SHADOW_INT_MOV_SS)
                interruptibility |= GUEST_INTR_STATE_MOV_SS;
-       if (mask & X86_SHADOW_INT_STI)
+       else if (mask & KVM_X86_SHADOW_INT_STI)
                interruptibility |= GUEST_INTR_STATE_STI;
  
        if ((interruptibility != interruptibility_old))
@@@ -882,7 -978,8 +978,8 @@@ static void skip_emulated_instruction(s
  }
  
  static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
-                               bool has_error_code, u32 error_code)
+                               bool has_error_code, u32 error_code,
+                               bool reinject)
  {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 intr_info = nr | INTR_INFO_VALID_MASK;
@@@ -1176,9 -1273,16 +1273,16 @@@ static __init int vmx_disabled_by_bios(
        u64 msr;
  
        rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
-       return (msr & (FEATURE_CONTROL_LOCKED |
-                      FEATURE_CONTROL_VMXON_ENABLED))
-           == FEATURE_CONTROL_LOCKED;
+       if (msr & FEATURE_CONTROL_LOCKED) {
+               if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
+                       && tboot_enabled())
+                       return 1;
+               if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+                       && !tboot_enabled())
+                       return 1;
+       }
+       return 0;
        /* locked but not enabled */
  }
  
@@@ -1186,21 -1290,23 +1290,23 @@@ static int hardware_enable(void *garbag
  {
        int cpu = raw_smp_processor_id();
        u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
-       u64 old;
+       u64 old, test_bits;
  
        if (read_cr4() & X86_CR4_VMXE)
                return -EBUSY;
  
        INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
        rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
-       if ((old & (FEATURE_CONTROL_LOCKED |
-                   FEATURE_CONTROL_VMXON_ENABLED))
-           != (FEATURE_CONTROL_LOCKED |
-               FEATURE_CONTROL_VMXON_ENABLED))
+       test_bits = FEATURE_CONTROL_LOCKED;
+       test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+       if (tboot_enabled())
+               test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
+       if ((old & test_bits) != test_bits) {
                /* enable and lock */
-               wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
-                      FEATURE_CONTROL_LOCKED |
-                      FEATURE_CONTROL_VMXON_ENABLED);
+               wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
+       }
        write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
        asm volatile (ASM_VMX_VMXON_RAX
                      : : "a"(&phys_addr), "m"(phys_addr)
@@@ -1521,7 -1627,7 +1627,7 @@@ static gva_t rmode_tss_base(struct kvm 
                struct kvm_memslots *slots;
                gfn_t base_gfn;
  
-               slots = rcu_dereference(kvm->memslots);
+               slots = kvm_memslots(kvm);
                base_gfn = kvm->memslots->memslots[0].base_gfn +
                                 kvm->memslots->memslots[0].npages - 3;
                return base_gfn << PAGE_SHIFT;
@@@ -1649,6 -1755,7 +1755,7 @@@ static void exit_lmode(struct kvm_vcpu 
        vmcs_write32(VM_ENTRY_CONTROLS,
                     vmcs_read32(VM_ENTRY_CONTROLS)
                     & ~VM_ENTRY_IA32E_MODE);
+       vmx_set_efer(vcpu, vcpu->arch.efer);
  }
  
  #endif
@@@ -1934,28 -2041,28 +2041,28 @@@ static void vmx_get_cs_db_l_bits(struc
        *l = (ar >> 13) & 1;
  }
  
- static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+ static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  {
-       dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
-       dt->base = vmcs_readl(GUEST_IDTR_BASE);
+       dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
+       dt->address = vmcs_readl(GUEST_IDTR_BASE);
  }
  
- static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+ static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  {
-       vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
-       vmcs_writel(GUEST_IDTR_BASE, dt->base);
+       vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
+       vmcs_writel(GUEST_IDTR_BASE, dt->address);
  }
  
- static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+ static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  {
-       dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
-       dt->base = vmcs_readl(GUEST_GDTR_BASE);
+       dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
+       dt->address = vmcs_readl(GUEST_GDTR_BASE);
  }
  
- static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+ static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  {
-       vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
-       vmcs_writel(GUEST_GDTR_BASE, dt->base);
+       vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
+       vmcs_writel(GUEST_GDTR_BASE, dt->address);
  }
  
  static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
@@@ -2296,6 -2403,16 +2403,16 @@@ static void allocate_vpid(struct vcpu_v
        spin_unlock(&vmx_vpid_lock);
  }
  
+ static void free_vpid(struct vcpu_vmx *vmx)
+ {
+       if (!enable_vpid)
+               return;
+       spin_lock(&vmx_vpid_lock);
+       if (vmx->vpid != 0)
+               __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+       spin_unlock(&vmx_vpid_lock);
+ }
  static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
  {
        int f = sizeof(unsigned long);
@@@ -2334,7 -2451,7 +2451,7 @@@ static int vmx_vcpu_setup(struct vcpu_v
        u32 junk;
        u64 host_pat, tsc_this, tsc_base;
        unsigned long a;
-       struct descriptor_table dt;
+       struct desc_ptr dt;
        int i;
        unsigned long kvm_vmx_return;
        u32 exec_control;
  
        vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
  
-       kvm_get_idt(&dt);
-       vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
+       native_store_idt(&dt);
+       vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
  
        asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
        vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
        vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
        vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+       vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
        vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+       vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
  
        rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
        vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
@@@ -2703,7 -2822,8 +2822,7 @@@ static int vmx_nmi_allowed(struct kvm_v
                return 0;
  
        return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
 -                      (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
 -                              GUEST_INTR_STATE_NMI));
 +                      (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI));
  }
  
  static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
@@@ -2947,22 -3067,20 +3066,20 @@@ static int handle_io(struct kvm_vcpu *v
        int size, in, string;
        unsigned port;
  
-       ++vcpu->stat.io_exits;
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        string = (exit_qualification & 16) != 0;
+       in = (exit_qualification & 8) != 0;
  
-       if (string) {
-               if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO)
-                       return 0;
-               return 1;
-       }
+       ++vcpu->stat.io_exits;
  
-       size = (exit_qualification & 7) + 1;
-       in = (exit_qualification & 8) != 0;
-       port = exit_qualification >> 16;
+       if (string || in)
+               return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
  
+       port = exit_qualification >> 16;
+       size = (exit_qualification & 7) + 1;
        skip_emulated_instruction(vcpu);
-       return kvm_emulate_pio(vcpu, in, size, port);
+       return kvm_fast_pio_out(vcpu, size, port);
  }
  
  static void
@@@ -3053,19 -3171,9 +3170,9 @@@ static int handle_cr(struct kvm_vcpu *v
        return 0;
  }
  
  static int handle_dr(struct kvm_vcpu *vcpu)
  {
        unsigned long exit_qualification;
-       unsigned long val;
        int dr, reg;
  
        /* Do not handle if the CPL > 0, will trigger GP on re-entry */
        dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
        reg = DEBUG_REG_ACCESS_REG(exit_qualification);
        if (exit_qualification & TYPE_MOV_FROM_DR) {
-               switch (dr) {
-               case 0 ... 3:
-                       val = vcpu->arch.db[dr];
-                       break;
-               case 4:
-                       if (check_dr_alias(vcpu) < 0)
-                               return 1;
-                       /* fall through */
-               case 6:
-                       val = vcpu->arch.dr6;
-                       break;
-               case 5:
-                       if (check_dr_alias(vcpu) < 0)
-                               return 1;
-                       /* fall through */
-               default: /* 7 */
-                       val = vcpu->arch.dr7;
-                       break;
-               }
-               kvm_register_write(vcpu, reg, val);
-       } else {
-               val = vcpu->arch.regs[reg];
-               switch (dr) {
-               case 0 ... 3:
-                       vcpu->arch.db[dr] = val;
-                       if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
-                               vcpu->arch.eff_db[dr] = val;
-                       break;
-               case 4:
-                       if (check_dr_alias(vcpu) < 0)
-                               return 1;
-                       /* fall through */
-               case 6:
-                       if (val & 0xffffffff00000000ULL) {
-                               kvm_inject_gp(vcpu, 0);
-                               return 1;
-                       }
-                       vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
-                       break;
-               case 5:
-                       if (check_dr_alias(vcpu) < 0)
-                               return 1;
-                       /* fall through */
-               default: /* 7 */
-                       if (val & 0xffffffff00000000ULL) {
-                               kvm_inject_gp(vcpu, 0);
-                               return 1;
-                       }
-                       vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
-                       if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
-                               vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
-                               vcpu->arch.switch_db_regs =
-                                       (val & DR7_BP_EN_MASK);
-                       }
-                       break;
-               }
-       }
+               unsigned long val;
+               if (!kvm_get_dr(vcpu, dr, &val))
+                       kvm_register_write(vcpu, reg, val);
+       } else
+               kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
        skip_emulated_instruction(vcpu);
        return 1;
  }
  
+ static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+ {
+       vmcs_writel(GUEST_DR7, val);
+ }
  static int handle_cpuid(struct kvm_vcpu *vcpu)
  {
        kvm_emulate_cpuid(vcpu);
@@@ -3292,6 -3353,8 +3352,8 @@@ static int handle_task_switch(struct kv
  {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qualification;
+       bool has_error_code = false;
+       u32 error_code = 0;
        u16 tss_selector;
        int reason, type, idt_v;
  
                        kvm_clear_interrupt_queue(vcpu);
                        break;
                case INTR_TYPE_HARD_EXCEPTION:
+                       if (vmx->idt_vectoring_info &
+                           VECTORING_INFO_DELIVER_CODE_MASK) {
+                               has_error_code = true;
+                               error_code =
+                                       vmcs_read32(IDT_VECTORING_ERROR_CODE);
+                       }
+                       /* fall through */
                case INTR_TYPE_SOFT_EXCEPTION:
                        kvm_clear_exception_queue(vcpu);
                        break;
                       type != INTR_TYPE_NMI_INTR))
                skip_emulated_instruction(vcpu);
  
-       if (!kvm_task_switch(vcpu, tss_selector, reason))
+       if (kvm_task_switch(vcpu, tss_selector, reason,
+                               has_error_code, error_code) == EMULATE_FAIL) {
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+               vcpu->run->internal.ndata = 0;
                return 0;
+       }
  
        /* clear all local breakpoint enable flags */
        vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
@@@ -3574,7 -3649,7 +3648,7 @@@ static int vmx_handle_exit(struct kvm_v
        u32 exit_reason = vmx->exit_reason;
        u32 vectoring_info = vmx->idt_vectoring_info;
  
-       trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
+       trace_kvm_exit(exit_reason, vcpu);
  
        /* If guest state is invalid, start emulating */
        if (vmx->emulation_required && emulate_invalid_guest_state)
@@@ -3923,10 -3998,7 +3997,7 @@@ static void vmx_free_vcpu(struct kvm_vc
  {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
  
-       spin_lock(&vmx_vpid_lock);
-       if (vmx->vpid != 0)
-               __clear_bit(vmx->vpid, vmx_vpid_bitmap);
-       spin_unlock(&vmx_vpid_lock);
+       free_vpid(vmx);
        vmx_free_vmcs(vcpu);
        kfree(vmx->guest_msrs);
        kvm_vcpu_uninit(vcpu);
@@@ -3988,6 -4060,7 +4059,7 @@@ free_msrs
  uninit_vcpu:
        kvm_vcpu_uninit(&vmx->vcpu);
  free_vcpu:
+       free_vpid(vmx);
        kmem_cache_free(kvm_vcpu_cache, vmx);
        return ERR_PTR(err);
  }
@@@ -4118,6 -4191,10 +4190,10 @@@ static void vmx_cpuid_update(struct kvm
        }
  }
  
+ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+ {
+ }
  static struct kvm_x86_ops vmx_x86_ops = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
        .set_idt = vmx_set_idt,
        .get_gdt = vmx_get_gdt,
        .set_gdt = vmx_set_gdt,
+       .set_dr7 = vmx_set_dr7,
        .cache_reg = vmx_cache_reg,
        .get_rflags = vmx_get_rflags,
        .set_rflags = vmx_set_rflags,
        .cpuid_update = vmx_cpuid_update,
  
        .rdtscp_supported = vmx_rdtscp_supported,
+       .set_supported_cpuid = vmx_set_supported_cpuid,
  };
  
  static int __init vmx_init(void)
  
        set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
  
-       r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+       r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+                    __alignof__(struct vcpu_vmx), THIS_MODULE);
        if (r)
                goto out3;
  
diff --combined arch/x86/kvm/x86.c
@@@ -42,7 -42,7 +42,7 @@@
  #include <linux/slab.h>
  #include <linux/perf_event.h>
  #include <trace/events/kvm.h>
- #undef TRACE_INCLUDE_FILE
  #define CREATE_TRACE_POINTS
  #include "trace.h"
  
@@@ -224,34 -224,6 +224,6 @@@ static void drop_user_return_notifiers(
                kvm_on_user_return(&smsr->urn);
  }
  
- unsigned long segment_base(u16 selector)
- {
-       struct descriptor_table gdt;
-       struct desc_struct *d;
-       unsigned long table_base;
-       unsigned long v;
-       if (selector == 0)
-               return 0;
-       kvm_get_gdt(&gdt);
-       table_base = gdt.base;
-       if (selector & 4) {           /* from ldt */
-               u16 ldt_selector = kvm_read_ldt();
-               table_base = segment_base(ldt_selector);
-       }
-       d = (struct desc_struct *)(table_base + (selector & ~7));
-       v = get_desc_base(d);
- #ifdef CONFIG_X86_64
-       if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
-               v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
- #endif
-       return v;
- }
- EXPORT_SYMBOL_GPL(segment_base);
  u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  {
        if (irqchip_in_kernel(vcpu->kvm))
@@@ -293,7 -265,8 +265,8 @@@ static int exception_class(int vector
  }
  
  static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
-               unsigned nr, bool has_error, u32 error_code)
+               unsigned nr, bool has_error, u32 error_code,
+               bool reinject)
  {
        u32 prev_nr;
        int class1, class2;
                vcpu->arch.exception.has_error_code = has_error;
                vcpu->arch.exception.nr = nr;
                vcpu->arch.exception.error_code = error_code;
+               vcpu->arch.exception.reinject = reinject;
                return;
        }
  
  
  void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  {
-       kvm_multiple_exception(vcpu, nr, false, 0);
+       kvm_multiple_exception(vcpu, nr, false, 0, false);
  }
  EXPORT_SYMBOL_GPL(kvm_queue_exception);
  
+ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
+ {
+       kvm_multiple_exception(vcpu, nr, false, 0, true);
+ }
+ EXPORT_SYMBOL_GPL(kvm_requeue_exception);
  void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
                           u32 error_code)
  {
@@@ -352,10 -332,16 +332,16 @@@ EXPORT_SYMBOL_GPL(kvm_inject_nmi)
  
  void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  {
-       kvm_multiple_exception(vcpu, nr, true, error_code);
+       kvm_multiple_exception(vcpu, nr, true, error_code, false);
  }
  EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  
+ void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
+ {
+       kvm_multiple_exception(vcpu, nr, true, error_code, true);
+ }
+ EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
  /*
   * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
   * a #GP and return false.
@@@ -476,7 -462,6 +462,6 @@@ void kvm_set_cr0(struct kvm_vcpu *vcpu
        }
  
        kvm_x86_ops->set_cr0(vcpu, cr0);
-       vcpu->arch.cr0 = cr0;
  
        kvm_mmu_reset_context(vcpu);
        return;
@@@ -485,7 -470,7 +470,7 @@@ EXPORT_SYMBOL_GPL(kvm_set_cr0)
  
  void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  {
-       kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
+       kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
  }
  EXPORT_SYMBOL_GPL(kvm_lmsw);
  
@@@ -517,7 -502,6 +502,6 @@@ void kvm_set_cr4(struct kvm_vcpu *vcpu
        }
        kvm_x86_ops->set_cr4(vcpu, cr4);
        vcpu->arch.cr4 = cr4;
-       vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
        kvm_mmu_reset_context(vcpu);
  }
  EXPORT_SYMBOL_GPL(kvm_set_cr4);
@@@ -592,6 -576,80 +576,80 @@@ unsigned long kvm_get_cr8(struct kvm_vc
  }
  EXPORT_SYMBOL_GPL(kvm_get_cr8);
  
+ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
+ {
+       switch (dr) {
+       case 0 ... 3:
+               vcpu->arch.db[dr] = val;
+               if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
+                       vcpu->arch.eff_db[dr] = val;
+               break;
+       case 4:
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+                       kvm_queue_exception(vcpu, UD_VECTOR);
+                       return 1;
+               }
+               /* fall through */
+       case 6:
+               if (val & 0xffffffff00000000ULL) {
+                       kvm_inject_gp(vcpu, 0);
+                       return 1;
+               }
+               vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
+               break;
+       case 5:
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+                       kvm_queue_exception(vcpu, UD_VECTOR);
+                       return 1;
+               }
+               /* fall through */
+       default: /* 7 */
+               if (val & 0xffffffff00000000ULL) {
+                       kvm_inject_gp(vcpu, 0);
+                       return 1;
+               }
+               vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
+               if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
+                       kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
+                       vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
+               }
+               break;
+       }
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_set_dr);
+ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
+ {
+       switch (dr) {
+       case 0 ... 3:
+               *val = vcpu->arch.db[dr];
+               break;
+       case 4:
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+                       kvm_queue_exception(vcpu, UD_VECTOR);
+                       return 1;
+               }
+               /* fall through */
+       case 6:
+               *val = vcpu->arch.dr6;
+               break;
+       case 5:
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+                       kvm_queue_exception(vcpu, UD_VECTOR);
+                       return 1;
+               }
+               /* fall through */
+       default: /* 7 */
+               *val = vcpu->arch.dr7;
+               break;
+       }
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_get_dr);
  static inline u32 bit(int bitno)
  {
        return 1 << (bitno & 31);
   * kvm-specific. Those are put in the beginning of the list.
   */
  
- #define KVM_SAVE_MSRS_BEGIN   5
+ #define KVM_SAVE_MSRS_BEGIN   7
  static u32 msrs_to_save[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+       MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
        HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
        HV_X64_MSR_APIC_ASSIST_PAGE,
        MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
@@@ -625,48 -684,42 +684,42 @@@ static u32 emulated_msrs[] = 
        MSR_IA32_MISC_ENABLE,
  };
  
- static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
+ static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
  {
-       if (efer & efer_reserved_bits) {
-               kvm_inject_gp(vcpu, 0);
-               return;
-       }
+       if (efer & efer_reserved_bits)
+               return 1;
  
        if (is_paging(vcpu)
-           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
-               kvm_inject_gp(vcpu, 0);
-               return;
-       }
+           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+               return 1;
  
        if (efer & EFER_FFXSR) {
                struct kvm_cpuid_entry2 *feat;
  
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-               if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
-                       kvm_inject_gp(vcpu, 0);
-                       return;
-               }
+               if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
+                       return 1;
        }
  
        if (efer & EFER_SVME) {
                struct kvm_cpuid_entry2 *feat;
  
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-               if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
-                       kvm_inject_gp(vcpu, 0);
-                       return;
-               }
+               if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
+                       return 1;
        }
  
        efer &= ~EFER_LMA;
        efer |= vcpu->arch.efer & EFER_LMA;
  
+       kvm_x86_ops->set_efer(vcpu, efer);
        vcpu->arch.efer = efer;
  
        vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
        kvm_mmu_reset_context(vcpu);
+       return 0;
  }
  
  void kvm_enable_efer_bits(u64 mask)
@@@ -696,14 -749,22 +749,22 @@@ static int do_set_msr(struct kvm_vcpu *
  
  static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  {
-       static int version;
+       int version;
+       int r;
        struct pvclock_wall_clock wc;
        struct timespec boot;
  
        if (!wall_clock)
                return;
  
-       version++;
+       r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
+       if (r)
+               return;
+       if (version & 1)
+               ++version;  /* first time write, random junk */
+       ++version;
  
        kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  
@@@ -796,6 -857,8 +857,8 @@@ static void kvm_write_guest_time(struc
        vcpu->hv_clock.system_time = ts.tv_nsec +
                                     (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
  
+       vcpu->hv_clock.flags = 0;
        /*
         * The interface expects us to write an even number signaling that the
         * update is finished. Since the guest won't see the intermediate
@@@ -1087,10 -1150,10 +1150,10 @@@ int kvm_set_msr_common(struct kvm_vcpu 
  {
        switch (msr) {
        case MSR_EFER:
-               set_efer(vcpu, data);
-               break;
+               return set_efer(vcpu, data);
        case MSR_K7_HWCR:
                data &= ~(u64)0x40;     /* ignore flush filter disable */
+               data &= ~(u64)0x100;    /* ignore ignne emulation enable */
                if (data != 0) {
                        pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
                                data);
        case MSR_IA32_MISC_ENABLE:
                vcpu->arch.ia32_misc_enable_msr = data;
                break;
+       case MSR_KVM_WALL_CLOCK_NEW:
        case MSR_KVM_WALL_CLOCK:
                vcpu->kvm->arch.wall_clock = data;
                kvm_write_wall_clock(vcpu->kvm, data);
                break;
+       case MSR_KVM_SYSTEM_TIME_NEW:
        case MSR_KVM_SYSTEM_TIME: {
                if (vcpu->arch.time_page) {
                        kvm_release_page_dirty(vcpu->arch.time_page);
@@@ -1408,9 -1473,11 +1473,11 @@@ int kvm_get_msr_common(struct kvm_vcpu 
                data = vcpu->arch.efer;
                break;
        case MSR_KVM_WALL_CLOCK:
+       case MSR_KVM_WALL_CLOCK_NEW:
                data = vcpu->kvm->arch.wall_clock;
                break;
        case MSR_KVM_SYSTEM_TIME:
+       case MSR_KVM_SYSTEM_TIME_NEW:
                data = vcpu->arch.time;
                break;
        case MSR_IA32_P5_MC_ADDR:
@@@ -1549,6 -1616,7 +1616,7 @@@ int kvm_dev_ioctl_check_extension(long 
        case KVM_CAP_HYPERV_VAPIC:
        case KVM_CAP_HYPERV_SPIN:
        case KVM_CAP_PCI_SEGMENT:
+       case KVM_CAP_DEBUGREGS:
        case KVM_CAP_X86_ROBUST_SINGLESTEP:
                r = 1;
                break;
@@@ -1713,7 -1781,6 +1781,7 @@@ static int kvm_vcpu_ioctl_set_cpuid(str
        if (copy_from_user(cpuid_entries, entries,
                           cpuid->nent * sizeof(struct kvm_cpuid_entry)))
                goto out_free;
 +      vcpu_load(vcpu);
        for (i = 0; i < cpuid->nent; i++) {
                vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
                vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
        r = 0;
        kvm_apic_set_version(vcpu);
        kvm_x86_ops->cpuid_update(vcpu);
 +      vcpu_put(vcpu);
  
  out_free:
        vfree(cpuid_entries);
@@@ -1752,11 -1818,9 +1820,11 @@@ static int kvm_vcpu_ioctl_set_cpuid2(st
        if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
                           cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
                goto out;
 +      vcpu_load(vcpu);
        vcpu->arch.cpuid_nent = cpuid->nent;
        kvm_apic_set_version(vcpu);
        kvm_x86_ops->cpuid_update(vcpu);
 +      vcpu_put(vcpu);
        return 0;
  
  out:
@@@ -1769,6 -1833,7 +1837,7 @@@ static int kvm_vcpu_ioctl_get_cpuid2(st
  {
        int r;
  
+       vcpu_load(vcpu);
        r = -E2BIG;
        if (cpuid->nent < vcpu->arch.cpuid_nent)
                goto out;
  
  out:
        cpuid->nent = vcpu->arch.cpuid_nent;
+       vcpu_put(vcpu);
        return r;
  }
  
@@@ -1910,6 -1976,24 +1980,24 @@@ static void do_cpuid_ent(struct kvm_cpu
                }
                break;
        }
+       case KVM_CPUID_SIGNATURE: {
+               char signature[12] = "KVMKVMKVM\0\0";
+               u32 *sigptr = (u32 *)signature;
+               entry->eax = 0;
+               entry->ebx = sigptr[0];
+               entry->ecx = sigptr[1];
+               entry->edx = sigptr[2];
+               break;
+       }
+       case KVM_CPUID_FEATURES:
+               entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
+                            (1 << KVM_FEATURE_NOP_IO_DELAY) |
+                            (1 << KVM_FEATURE_CLOCKSOURCE2) |
+                            (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+               entry->ebx = 0;
+               entry->ecx = 0;
+               entry->edx = 0;
+               break;
        case 0x80000000:
                entry->eax = min(entry->eax, 0x8000001a);
                break;
                entry->ecx &= kvm_supported_word6_x86_features;
                break;
        }
+       kvm_x86_ops->set_supported_cpuid(function, entry);
        put_cpu();
  }
  
@@@ -1953,6 -2040,23 +2044,23 @@@ static int kvm_dev_ioctl_get_supported_
        for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
                do_cpuid_ent(&cpuid_entries[nent], func, 0,
                             &nent, cpuid->nent);
+       r = -E2BIG;
+       if (nent >= cpuid->nent)
+               goto out_free;
+       do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
+                    cpuid->nent);
+       r = -E2BIG;
+       if (nent >= cpuid->nent)
+               goto out_free;
+       do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
+                    cpuid->nent);
        r = -E2BIG;
        if (nent >= cpuid->nent)
                goto out_free;
@@@ -2032,6 -2136,7 +2140,7 @@@ static int kvm_vcpu_ioctl_x86_setup_mce
        int r;
        unsigned bank_num = mcg_cap & 0xff, bank;
  
+       vcpu_load(vcpu);
        r = -EINVAL;
        if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
                goto out;
        for (bank = 0; bank < bank_num; bank++)
                vcpu->arch.mce_banks[bank*4] = ~(u64)0;
  out:
+       vcpu_put(vcpu);
        return r;
  }
  
@@@ -2105,14 -2211,20 +2215,20 @@@ static void kvm_vcpu_ioctl_x86_get_vcpu
  {
        vcpu_load(vcpu);
  
-       events->exception.injected = vcpu->arch.exception.pending;
+       events->exception.injected =
+               vcpu->arch.exception.pending &&
+               !kvm_exception_is_soft(vcpu->arch.exception.nr);
        events->exception.nr = vcpu->arch.exception.nr;
        events->exception.has_error_code = vcpu->arch.exception.has_error_code;
        events->exception.error_code = vcpu->arch.exception.error_code;
  
-       events->interrupt.injected = vcpu->arch.interrupt.pending;
+       events->interrupt.injected =
+               vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
        events->interrupt.nr = vcpu->arch.interrupt.nr;
-       events->interrupt.soft = vcpu->arch.interrupt.soft;
+       events->interrupt.soft = 0;
+       events->interrupt.shadow =
+               kvm_x86_ops->get_interrupt_shadow(vcpu,
+                       KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
  
        events->nmi.injected = vcpu->arch.nmi_injected;
        events->nmi.pending = vcpu->arch.nmi_pending;
        events->sipi_vector = vcpu->arch.sipi_vector;
  
        events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
-                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
+                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+                        | KVM_VCPUEVENT_VALID_SHADOW);
  
        vcpu_put(vcpu);
  }
@@@ -2130,7 -2243,8 +2247,8 @@@ static int kvm_vcpu_ioctl_x86_set_vcpu_
                                              struct kvm_vcpu_events *events)
  {
        if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
-                             | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
+                             | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+                             | KVM_VCPUEVENT_VALID_SHADOW))
                return -EINVAL;
  
        vcpu_load(vcpu);
        vcpu->arch.interrupt.soft = events->interrupt.soft;
        if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
                kvm_pic_clear_isr_ack(vcpu->kvm);
+       if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
+               kvm_x86_ops->set_interrupt_shadow(vcpu,
+                                                 events->interrupt.shadow);
  
        vcpu->arch.nmi_injected = events->nmi.injected;
        if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
        return 0;
  }
  
+ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
+                                            struct kvm_debugregs *dbgregs)
+ {
+       vcpu_load(vcpu);
+       memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
+       dbgregs->dr6 = vcpu->arch.dr6;
+       dbgregs->dr7 = vcpu->arch.dr7;
+       dbgregs->flags = 0;
+       vcpu_put(vcpu);
+ }
+ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+                                           struct kvm_debugregs *dbgregs)
+ {
+       if (dbgregs->flags)
+               return -EINVAL;
+       vcpu_load(vcpu);
+       memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+       vcpu->arch.dr6 = dbgregs->dr6;
+       vcpu->arch.dr7 = dbgregs->dr7;
+       vcpu_put(vcpu);
+       return 0;
+ }
  long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
  {
                r = -EFAULT;
                if (copy_from_user(&mce, argp, sizeof mce))
                        goto out;
+               vcpu_load(vcpu);
                r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
+               vcpu_put(vcpu);
                break;
        }
        case KVM_GET_VCPU_EVENTS: {
                r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
                break;
        }
+       case KVM_GET_DEBUGREGS: {
+               struct kvm_debugregs dbgregs;
+               kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
+               r = -EFAULT;
+               if (copy_to_user(argp, &dbgregs,
+                                sizeof(struct kvm_debugregs)))
+                       break;
+               r = 0;
+               break;
+       }
+       case KVM_SET_DEBUGREGS: {
+               struct kvm_debugregs dbgregs;
+               r = -EFAULT;
+               if (copy_from_user(&dbgregs, argp,
+                                  sizeof(struct kvm_debugregs)))
+                       break;
+               r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
+               break;
+       }
        default:
                r = -EINVAL;
        }
@@@ -2390,7 -2562,7 +2566,7 @@@ gfn_t unalias_gfn_instantiation(struct 
        struct kvm_mem_alias *alias;
        struct kvm_mem_aliases *aliases;
  
-       aliases = rcu_dereference(kvm->arch.aliases);
+       aliases = kvm_aliases(kvm);
  
        for (i = 0; i < aliases->naliases; ++i) {
                alias = &aliases->aliases[i];
@@@ -2409,7 -2581,7 +2585,7 @@@ gfn_t unalias_gfn(struct kvm *kvm, gfn_
        struct kvm_mem_alias *alias;
        struct kvm_mem_aliases *aliases;
  
-       aliases = rcu_dereference(kvm->arch.aliases);
+       aliases = kvm_aliases(kvm);
  
        for (i = 0; i < aliases->naliases; ++i) {
                alias = &aliases->aliases[i];
@@@ -2804,11 -2976,13 +2980,13 @@@ long kvm_arch_vm_ioctl(struct file *fil
                r = -EFAULT;
                if (copy_from_user(&irq_event, argp, sizeof irq_event))
                        goto out;
+               r = -ENXIO;
                if (irqchip_in_kernel(kvm)) {
                        __s32 status;
                        status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
                                        irq_event.irq, irq_event.level);
                        if (ioctl == KVM_IRQ_LINE_STATUS) {
+                               r = -EFAULT;
                                irq_event.status = status;
                                if (copy_to_user(argp, &irq_event,
                                                        sizeof irq_event))
@@@ -3024,6 -3198,18 +3202,18 @@@ static int vcpu_mmio_read(struct kvm_vc
        return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
  }
  
+ static void kvm_set_segment(struct kvm_vcpu *vcpu,
+                       struct kvm_segment *var, int seg)
+ {
+       kvm_x86_ops->set_segment(vcpu, var, seg);
+ }
+ void kvm_get_segment(struct kvm_vcpu *vcpu,
+                    struct kvm_segment *var, int seg)
+ {
+       kvm_x86_ops->get_segment(vcpu, var, seg);
+ }
  gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
@@@ -3104,14 -3290,17 +3294,17 @@@ static int kvm_read_guest_virt_system(g
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
  }
  
- static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
-                               struct kvm_vcpu *vcpu, u32 *error)
+ static int kvm_write_guest_virt_system(gva_t addr, void *val,
+                                      unsigned int bytes,
+                                      struct kvm_vcpu *vcpu,
+                                      u32 *error)
  {
        void *data = val;
        int r = X86EMUL_CONTINUE;
  
        while (bytes) {
-               gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
+               gpa_t gpa =  vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
+                                                      PFERR_WRITE_MASK, error);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
@@@ -3134,7 -3323,6 +3327,6 @@@ out
        return r;
  }
  
  static int emulator_read_emulated(unsigned long addr,
                                  void *val,
                                  unsigned int bytes,
@@@ -3237,9 -3425,9 +3429,9 @@@ mmio
  }
  
  int emulator_write_emulated(unsigned long addr,
-                                  const void *val,
-                                  unsigned int bytes,
-                                  struct kvm_vcpu *vcpu)
+                           const void *val,
+                           unsigned int bytes,
+                           struct kvm_vcpu *vcpu)
  {
        /* Crossing a page boundary? */
        if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  }
  EXPORT_SYMBOL_GPL(emulator_write_emulated);
  
+ #define CMPXCHG_TYPE(t, ptr, old, new) \
+       (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
+ #ifdef CONFIG_X86_64
+ #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
+ #else
+ #  define CMPXCHG64(ptr, old, new) \
+       (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
+ #endif
  static int emulator_cmpxchg_emulated(unsigned long addr,
                                     const void *old,
                                     const void *new,
                                     unsigned int bytes,
                                     struct kvm_vcpu *vcpu)
  {
-       printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
- #ifndef CONFIG_X86_64
-       /* guests cmpxchg8b have to be emulated atomically */
-       if (bytes == 8) {
-               gpa_t gpa;
-               struct page *page;
-               char *kaddr;
-               u64 val;
+       gpa_t gpa;
+       struct page *page;
+       char *kaddr;
+       bool exchanged;
  
-               gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
+       /* guests cmpxchg8b have to be emulated atomically */
+       if (bytes > 8 || (bytes & (bytes - 1)))
+               goto emul_write;
  
-               if (gpa == UNMAPPED_GVA ||
-                  (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
-                       goto emul_write;
+       gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
  
-               if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
-                       goto emul_write;
+       if (gpa == UNMAPPED_GVA ||
+           (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+               goto emul_write;
  
-               val = *(u64 *)new;
+       if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
+               goto emul_write;
  
-               page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+       page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  
-               kaddr = kmap_atomic(page, KM_USER0);
-               set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
-               kunmap_atomic(kaddr, KM_USER0);
-               kvm_release_page_dirty(page);
+       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr += offset_in_page(gpa);
+       switch (bytes) {
+       case 1:
+               exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
+               break;
+       case 2:
+               exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
+               break;
+       case 4:
+               exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
+               break;
+       case 8:
+               exchanged = CMPXCHG64(kaddr, old, new);
+               break;
+       default:
+               BUG();
        }
+       kunmap_atomic(kaddr, KM_USER0);
+       kvm_release_page_dirty(page);
+       if (!exchanged)
+               return X86EMUL_CMPXCHG_FAILED;
+       kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
+       return X86EMUL_CONTINUE;
  emul_write:
- #endif
+       printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
  
        return emulator_write_emulated(addr, new, bytes, vcpu);
  }
  
+ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
+ {
+       /* TODO: String I/O for in kernel device */
+       int r;
+       if (vcpu->arch.pio.in)
+               r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
+                                   vcpu->arch.pio.size, pd);
+       else
+               r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
+                                    vcpu->arch.pio.port, vcpu->arch.pio.size,
+                                    pd);
+       return r;
+ }
+ static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
+                            unsigned int count, struct kvm_vcpu *vcpu)
+ {
+       if (vcpu->arch.pio.count)
+               goto data_avail;
+       trace_kvm_pio(1, port, size, 1);
+       vcpu->arch.pio.port = port;
+       vcpu->arch.pio.in = 1;
+       vcpu->arch.pio.count  = count;
+       vcpu->arch.pio.size = size;
+       if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+       data_avail:
+               memcpy(val, vcpu->arch.pio_data, size * count);
+               vcpu->arch.pio.count = 0;
+               return 1;
+       }
+       vcpu->run->exit_reason = KVM_EXIT_IO;
+       vcpu->run->io.direction = KVM_EXIT_IO_IN;
+       vcpu->run->io.size = size;
+       vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
+       vcpu->run->io.count = count;
+       vcpu->run->io.port = port;
+       return 0;
+ }
+ static int emulator_pio_out_emulated(int size, unsigned short port,
+                             const void *val, unsigned int count,
+                             struct kvm_vcpu *vcpu)
+ {
+       trace_kvm_pio(0, port, size, 1);
+       vcpu->arch.pio.port = port;
+       vcpu->arch.pio.in = 0;
+       vcpu->arch.pio.count = count;
+       vcpu->arch.pio.size = size;
+       memcpy(vcpu->arch.pio_data, val, size * count);
+       if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+               vcpu->arch.pio.count = 0;
+               return 1;
+       }
+       vcpu->run->exit_reason = KVM_EXIT_IO;
+       vcpu->run->io.direction = KVM_EXIT_IO_OUT;
+       vcpu->run->io.size = size;
+       vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
+       vcpu->run->io.count = count;
+       vcpu->run->io.port = port;
+       return 0;
+ }
  static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  {
        return kvm_x86_ops->get_segment_base(vcpu, seg);
@@@ -3316,14 -3609,14 +3613,14 @@@ int emulate_clts(struct kvm_vcpu *vcpu
  
  int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
  {
-       return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
+       return kvm_get_dr(ctxt->vcpu, dr, dest);
  }
  
  int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  {
        unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  
-       return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
+       return kvm_set_dr(ctxt->vcpu, dr, value & mask);
  }
  
  void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
  }
  EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
  
- static struct x86_emulate_ops emulate_ops = {
-       .read_std            = kvm_read_guest_virt_system,
-       .fetch               = kvm_fetch_guest_virt,
-       .read_emulated       = emulator_read_emulated,
-       .write_emulated      = emulator_write_emulated,
-       .cmpxchg_emulated    = emulator_cmpxchg_emulated,
- };
- static void cache_all_regs(struct kvm_vcpu *vcpu)
+ static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  {
-       kvm_register_read(vcpu, VCPU_REGS_RAX);
-       kvm_register_read(vcpu, VCPU_REGS_RSP);
-       kvm_register_read(vcpu, VCPU_REGS_RIP);
-       vcpu->arch.regs_dirty = ~0;
+       return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  }
  
- int emulate_instruction(struct kvm_vcpu *vcpu,
-                       unsigned long cr2,
-                       u16 error_code,
-                       int emulation_type)
+ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
  {
-       int r, shadow_mask;
-       struct decode_cache *c;
-       struct kvm_run *run = vcpu->run;
-       kvm_clear_exception_queue(vcpu);
-       vcpu->arch.mmio_fault_cr2 = cr2;
-       /*
-        * TODO: fix emulate.c to use guest_read/write_register
-        * instead of direct ->regs accesses, can save hundred cycles
-        * on Intel for instructions that don't read/change RSP, for
-        * for example.
-        */
-       cache_all_regs(vcpu);
-       vcpu->mmio_is_write = 0;
-       vcpu->arch.pio.string = 0;
+       unsigned long value;
+       switch (cr) {
+       case 0:
+               value = kvm_read_cr0(vcpu);
+               break;
+       case 2:
+               value = vcpu->arch.cr2;
+               break;
+       case 3:
+               value = vcpu->arch.cr3;
+               break;
+       case 4:
+               value = kvm_read_cr4(vcpu);
+               break;
+       case 8:
+               value = kvm_get_cr8(vcpu);
+               break;
+       default:
+               vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
+               return 0;
+       }
+       return value;
+ }
+ static void emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
+ {
+       switch (cr) {
+       case 0:
+               kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
+               break;
+       case 2:
+               vcpu->arch.cr2 = val;
+               break;
+       case 3:
+               kvm_set_cr3(vcpu, val);
+               break;
+       case 4:
+               kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
+               break;
+       case 8:
+               kvm_set_cr8(vcpu, val & 0xfUL);
+               break;
+       default:
+               vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
+       }
+ }
+ static int emulator_get_cpl(struct kvm_vcpu *vcpu)
+ {
+       return kvm_x86_ops->get_cpl(vcpu);
+ }
+ static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
+ {
+       kvm_x86_ops->get_gdt(vcpu, dt);
+ }
+ static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
+                                          struct kvm_vcpu *vcpu)
+ {
+       struct kvm_segment var;
+       kvm_get_segment(vcpu, &var, seg);
+       if (var.unusable)
+               return false;
+       if (var.g)
+               var.limit >>= 12;
+       set_desc_limit(desc, var.limit);
+       set_desc_base(desc, (unsigned long)var.base);
+       desc->type = var.type;
+       desc->s = var.s;
+       desc->dpl = var.dpl;
+       desc->p = var.present;
+       desc->avl = var.avl;
+       desc->l = var.l;
+       desc->d = var.db;
+       desc->g = var.g;
+       return true;
+ }
+ static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
+                                          struct kvm_vcpu *vcpu)
+ {
+       struct kvm_segment var;
+       /* needed to preserve selector */
+       kvm_get_segment(vcpu, &var, seg);
+       var.base = get_desc_base(desc);
+       var.limit = get_desc_limit(desc);
+       if (desc->g)
+               var.limit = (var.limit << 12) | 0xfff;
+       var.type = desc->type;
+       var.present = desc->p;
+       var.dpl = desc->dpl;
+       var.db = desc->d;
+       var.s = desc->s;
+       var.l = desc->l;
+       var.g = desc->g;
+       var.avl = desc->avl;
+       var.present = desc->p;
+       var.unusable = !var.present;
+       var.padding = 0;
+       kvm_set_segment(vcpu, &var, seg);
+       return;
+ }
+ static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
+ {
+       struct kvm_segment kvm_seg;
+       kvm_get_segment(vcpu, &kvm_seg, seg);
+       return kvm_seg.selector;
+ }
+ static void emulator_set_segment_selector(u16 sel, int seg,
+                                         struct kvm_vcpu *vcpu)
+ {
+       struct kvm_segment kvm_seg;
+       kvm_get_segment(vcpu, &kvm_seg, seg);
+       kvm_seg.selector = sel;
+       kvm_set_segment(vcpu, &kvm_seg, seg);
+ }
+ static void emulator_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+ {
+       kvm_x86_ops->set_rflags(vcpu, rflags);
+ }
+ static struct x86_emulate_ops emulate_ops = {
+       .read_std            = kvm_read_guest_virt_system,
+       .write_std           = kvm_write_guest_virt_system,
+       .fetch               = kvm_fetch_guest_virt,
+       .read_emulated       = emulator_read_emulated,
+       .write_emulated      = emulator_write_emulated,
+       .cmpxchg_emulated    = emulator_cmpxchg_emulated,
+       .pio_in_emulated     = emulator_pio_in_emulated,
+       .pio_out_emulated    = emulator_pio_out_emulated,
+       .get_cached_descriptor = emulator_get_cached_descriptor,
+       .set_cached_descriptor = emulator_set_cached_descriptor,
+       .get_segment_selector = emulator_get_segment_selector,
+       .set_segment_selector = emulator_set_segment_selector,
+       .get_gdt             = emulator_get_gdt,
+       .get_cr              = emulator_get_cr,
+       .set_cr              = emulator_set_cr,
+       .cpl                 = emulator_get_cpl,
+       .set_rflags          = emulator_set_rflags,
+ };
+ static void cache_all_regs(struct kvm_vcpu *vcpu)
+ {
+       kvm_register_read(vcpu, VCPU_REGS_RAX);
+       kvm_register_read(vcpu, VCPU_REGS_RSP);
+       kvm_register_read(vcpu, VCPU_REGS_RIP);
+       vcpu->arch.regs_dirty = ~0;
+ }
+ int emulate_instruction(struct kvm_vcpu *vcpu,
+                       unsigned long cr2,
+                       u16 error_code,
+                       int emulation_type)
+ {
+       int r, shadow_mask;
+       struct decode_cache *c;
+       struct kvm_run *run = vcpu->run;
+       kvm_clear_exception_queue(vcpu);
+       vcpu->arch.mmio_fault_cr2 = cr2;
+       /*
+        * TODO: fix emulate.c to use guest_read/write_register
+        * instead of direct ->regs accesses, can save hundred cycles
+        * on Intel for instructions that don't read/change RSP, for
+        * for example.
+        */
+       cache_all_regs(vcpu);
+       vcpu->mmio_is_write = 0;
  
        if (!(emulation_type & EMULTYPE_NO_DECODE)) {
                int cs_db, cs_l;
                kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  
                vcpu->arch.emulate_ctxt.vcpu = vcpu;
-               vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
+               vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+               vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
                vcpu->arch.emulate_ctxt.mode =
                        (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
                        (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
                        ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  
                r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
+               trace_kvm_emulate_insn_start(vcpu);
  
                /* Only allow emulation of specific instructions on #UD
                 * (namely VMMCALL, sysenter, sysexit, syscall)*/
                ++vcpu->stat.insn_emulation;
                if (r)  {
                        ++vcpu->stat.insn_emulation_fail;
+                       trace_kvm_emulate_insn_failed(vcpu);
                        if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
                                return EMULATE_DONE;
                        return EMULATE_FAIL;
                return EMULATE_DONE;
        }
  
+ restart:
        r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
        shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
  
        if (r == 0)
                kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
  
-       if (vcpu->arch.pio.string)
+       if (vcpu->arch.pio.count) {
+               if (!vcpu->arch.pio.in)
+                       vcpu->arch.pio.count = 0;
                return EMULATE_DO_MMIO;
+       }
  
-       if ((r || vcpu->mmio_is_write) && run) {
+       if (r || vcpu->mmio_is_write) {
                run->exit_reason = KVM_EXIT_MMIO;
                run->mmio.phys_addr = vcpu->mmio_phys_addr;
                memcpy(run->mmio.data, vcpu->mmio_data, 8);
  
        if (r) {
                if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
-                       return EMULATE_DONE;
+                       goto done;
                if (!vcpu->mmio_needed) {
+                       ++vcpu->stat.insn_emulation_fail;
+                       trace_kvm_emulate_insn_failed(vcpu);
                        kvm_report_emulation_failure(vcpu, "mmio");
                        return EMULATE_FAIL;
                }
                return EMULATE_DO_MMIO;
        }
  
-       kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
        if (vcpu->mmio_is_write) {
                vcpu->mmio_needed = 0;
                return EMULATE_DO_MMIO;
        }
  
-       return EMULATE_DONE;
- }
- EXPORT_SYMBOL_GPL(emulate_instruction);
- static int pio_copy_data(struct kvm_vcpu *vcpu)
- {
-       void *p = vcpu->arch.pio_data;
-       gva_t q = vcpu->arch.pio.guest_gva;
-       unsigned bytes;
-       int ret;
-       u32 error_code;
-       bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
-       if (vcpu->arch.pio.in)
-               ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
-       else
-               ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
-       if (ret == X86EMUL_PROPAGATE_FAULT)
-               kvm_inject_page_fault(vcpu, q, error_code);
-       return ret;
- }
- int complete_pio(struct kvm_vcpu *vcpu)
- {
-       struct kvm_pio_request *io = &vcpu->arch.pio;
-       long delta;
-       int r;
-       unsigned long val;
-       if (!io->string) {
-               if (io->in) {
-                       val = kvm_register_read(vcpu, VCPU_REGS_RAX);
-                       memcpy(&val, vcpu->arch.pio_data, io->size);
-                       kvm_register_write(vcpu, VCPU_REGS_RAX, val);
-               }
-       } else {
-               if (io->in) {
-                       r = pio_copy_data(vcpu);
-                       if (r)
-                               goto out;
-               }
-               delta = 1;
-               if (io->rep) {
-                       delta *= io->cur_count;
-                       /*
-                        * The size of the register should really depend on
-                        * current address size.
-                        */
-                       val = kvm_register_read(vcpu, VCPU_REGS_RCX);
-                       val -= delta;
-                       kvm_register_write(vcpu, VCPU_REGS_RCX, val);
-               }
-               if (io->down)
-                       delta = -delta;
-               delta *= io->size;
-               if (io->in) {
-                       val = kvm_register_read(vcpu, VCPU_REGS_RDI);
-                       val += delta;
-                       kvm_register_write(vcpu, VCPU_REGS_RDI, val);
-               } else {
-                       val = kvm_register_read(vcpu, VCPU_REGS_RSI);
-                       val += delta;
-                       kvm_register_write(vcpu, VCPU_REGS_RSI, val);
-               }
-       }
- out:
-       io->count -= io->cur_count;
-       io->cur_count = 0;
-       return 0;
- }
- static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
- {
-       /* TODO: String I/O for in kernel device */
-       int r;
-       if (vcpu->arch.pio.in)
-               r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
-                                   vcpu->arch.pio.size, pd);
-       else
-               r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
-                                    vcpu->arch.pio.port, vcpu->arch.pio.size,
-                                    pd);
-       return r;
- }
- static int pio_string_write(struct kvm_vcpu *vcpu)
- {
-       struct kvm_pio_request *io = &vcpu->arch.pio;
-       void *pd = vcpu->arch.pio_data;
-       int i, r = 0;
-       for (i = 0; i < io->cur_count; i++) {
-               if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
-                                    io->port, io->size, pd)) {
-                       r = -EOPNOTSUPP;
-                       break;
-               }
-               pd += io->size;
-       }
-       return r;
- }
- int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
- {
-       unsigned long val;
+ done:
+       if (vcpu->arch.exception.pending)
+               vcpu->arch.emulate_ctxt.restart = false;
  
-       trace_kvm_pio(!in, port, size, 1);
+       if (vcpu->arch.emulate_ctxt.restart)
+               goto restart;
  
-       vcpu->run->exit_reason = KVM_EXIT_IO;
-       vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
-       vcpu->run->io.size = vcpu->arch.pio.size = size;
-       vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
-       vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
-       vcpu->run->io.port = vcpu->arch.pio.port = port;
-       vcpu->arch.pio.in = in;
-       vcpu->arch.pio.string = 0;
-       vcpu->arch.pio.down = 0;
-       vcpu->arch.pio.rep = 0;
-       if (!vcpu->arch.pio.in) {
-               val = kvm_register_read(vcpu, VCPU_REGS_RAX);
-               memcpy(vcpu->arch.pio_data, &val, 4);
-       }
-       if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
-               complete_pio(vcpu);
-               return 1;
-       }
-       return 0;
+       return EMULATE_DONE;
  }
- EXPORT_SYMBOL_GPL(kvm_emulate_pio);
+ EXPORT_SYMBOL_GPL(emulate_instruction);
  
- int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
-                 int size, unsigned long count, int down,
-                 gva_t address, int rep, unsigned port)
+ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
  {
-       unsigned now, in_page;
-       int ret = 0;
-       trace_kvm_pio(!in, port, size, count);
-       vcpu->run->exit_reason = KVM_EXIT_IO;
-       vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
-       vcpu->run->io.size = vcpu->arch.pio.size = size;
-       vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
-       vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
-       vcpu->run->io.port = vcpu->arch.pio.port = port;
-       vcpu->arch.pio.in = in;
-       vcpu->arch.pio.string = 1;
-       vcpu->arch.pio.down = down;
-       vcpu->arch.pio.rep = rep;
-       if (!count) {
-               kvm_x86_ops->skip_emulated_instruction(vcpu);
-               return 1;
-       }
-       if (!down)
-               in_page = PAGE_SIZE - offset_in_page(address);
-       else
-               in_page = offset_in_page(address) + size;
-       now = min(count, (unsigned long)in_page / size);
-       if (!now)
-               now = 1;
-       if (down) {
-               /*
-                * String I/O in reverse.  Yuck.  Kill the guest, fix later.
-                */
-               pr_unimpl(vcpu, "guest string pio down\n");
-               kvm_inject_gp(vcpu, 0);
-               return 1;
-       }
-       vcpu->run->io.count = now;
-       vcpu->arch.pio.cur_count = now;
-       if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
-               kvm_x86_ops->skip_emulated_instruction(vcpu);
-       vcpu->arch.pio.guest_gva = address;
-       if (!vcpu->arch.pio.in) {
-               /* string PIO write */
-               ret = pio_copy_data(vcpu);
-               if (ret == X86EMUL_PROPAGATE_FAULT)
-                       return 1;
-               if (ret == 0 && !pio_string_write(vcpu)) {
-                       complete_pio(vcpu);
-                       if (vcpu->arch.pio.count == 0)
-                               ret = 1;
-               }
-       }
-       /* no string PIO read support yet */
+       unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
+       /* do not return to emulator after return from userspace */
+       vcpu->arch.pio.count = 0;
        return ret;
  }
- EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
+ EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
  
  static void bounce_off(void *info)
  {
@@@ -3996,85 -4269,20 +4273,20 @@@ int kvm_fix_hypercall(struct kvm_vcpu *
        return emulator_write_emulated(rip, instruction, 3, vcpu);
  }
  
- static u64 mk_cr_64(u64 curr_cr, u32 new_val)
- {
-       return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
- }
  void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  {
-       struct descriptor_table dt = { limit, base };
+       struct desc_ptr dt = { limit, base };
  
        kvm_x86_ops->set_gdt(vcpu, &dt);
  }
  
  void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  {
-       struct descriptor_table dt = { limit, base };
+       struct desc_ptr dt = { limit, base };
  
        kvm_x86_ops->set_idt(vcpu, &dt);
  }
  
- void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
-                  unsigned long *rflags)
- {
-       kvm_lmsw(vcpu, msw);
-       *rflags = kvm_get_rflags(vcpu);
- }
- unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
- {
-       unsigned long value;
-       switch (cr) {
-       case 0:
-               value = kvm_read_cr0(vcpu);
-               break;
-       case 2:
-               value = vcpu->arch.cr2;
-               break;
-       case 3:
-               value = vcpu->arch.cr3;
-               break;
-       case 4:
-               value = kvm_read_cr4(vcpu);
-               break;
-       case 8:
-               value = kvm_get_cr8(vcpu);
-               break;
-       default:
-               vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
-               return 0;
-       }
-       return value;
- }
- void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
-                    unsigned long *rflags)
- {
-       switch (cr) {
-       case 0:
-               kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
-               *rflags = kvm_get_rflags(vcpu);
-               break;
-       case 2:
-               vcpu->arch.cr2 = val;
-               break;
-       case 3:
-               kvm_set_cr3(vcpu, val);
-               break;
-       case 4:
-               kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
-               break;
-       case 8:
-               kvm_set_cr8(vcpu, val & 0xfUL);
-               break;
-       default:
-               vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
-       }
- }
  static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
  {
        struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
@@@ -4138,9 -4346,13 +4350,13 @@@ int cpuid_maxphyaddr(struct kvm_vcpu *v
  {
        struct kvm_cpuid_entry2 *best;
  
+       best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+       if (!best || best->eax < 0x80000008)
+               goto not_found;
        best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
        if (best)
                return best->eax & 0xff;
+ not_found:
        return 36;
  }
  
@@@ -4254,9 -4466,13 +4470,13 @@@ static void inject_pending_event(struc
  {
        /* try to reinject previous events if any */
        if (vcpu->arch.exception.pending) {
+               trace_kvm_inj_exception(vcpu->arch.exception.nr,
+                                       vcpu->arch.exception.has_error_code,
+                                       vcpu->arch.exception.error_code);
                kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
                                          vcpu->arch.exception.has_error_code,
-                                         vcpu->arch.exception.error_code);
+                                         vcpu->arch.exception.error_code,
+                                         vcpu->arch.exception.reinject);
                return;
        }
  
@@@ -4486,7 -4702,6 +4706,6 @@@ static int __vcpu_run(struct kvm_vcpu *
        }
  
        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
-       post_kvm_run_save(vcpu);
  
        vapic_exit(vcpu);
  
@@@ -4514,26 -4729,17 +4733,17 @@@ int kvm_arch_vcpu_ioctl_run(struct kvm_
        if (!irqchip_in_kernel(vcpu->kvm))
                kvm_set_cr8(vcpu, kvm_run->cr8);
  
-       if (vcpu->arch.pio.cur_count) {
-               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-               r = complete_pio(vcpu);
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-               if (r)
-                       goto out;
-       }
-       if (vcpu->mmio_needed) {
-               memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
-               vcpu->mmio_read_completed = 1;
-               vcpu->mmio_needed = 0;
+       if (vcpu->arch.pio.count || vcpu->mmio_needed ||
+           vcpu->arch.emulate_ctxt.restart) {
+               if (vcpu->mmio_needed) {
+                       memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+                       vcpu->mmio_read_completed = 1;
+                       vcpu->mmio_needed = 0;
+               }
                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-               r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
-                                       EMULTYPE_NO_DECODE);
+               r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
                srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
                if (r == EMULATE_DO_MMIO) {
-                       /*
-                        * Read-modify-write.  Back to userspace.
-                        */
                        r = 0;
                        goto out;
                }
        r = __vcpu_run(vcpu);
  
  out:
+       post_kvm_run_save(vcpu);
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  
@@@ -4616,12 -4823,6 +4827,6 @@@ int kvm_arch_vcpu_ioctl_set_regs(struc
        return 0;
  }
  
- void kvm_get_segment(struct kvm_vcpu *vcpu,
-                    struct kvm_segment *var, int seg)
- {
-       kvm_x86_ops->get_segment(vcpu, var, seg);
- }
  void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  {
        struct kvm_segment cs;
@@@ -4635,7 -4836,7 +4840,7 @@@ EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits)
  int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
  {
-       struct descriptor_table dt;
+       struct desc_ptr dt;
  
        vcpu_load(vcpu);
  
        kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  
        kvm_x86_ops->get_idt(vcpu, &dt);
-       sregs->idt.limit = dt.limit;
-       sregs->idt.base = dt.base;
+       sregs->idt.limit = dt.size;
+       sregs->idt.base = dt.address;
        kvm_x86_ops->get_gdt(vcpu, &dt);
-       sregs->gdt.limit = dt.limit;
-       sregs->gdt.base = dt.base;
+       sregs->gdt.limit = dt.size;
+       sregs->gdt.base = dt.address;
  
        sregs->cr0 = kvm_read_cr0(vcpu);
        sregs->cr2 = vcpu->arch.cr2;
@@@ -4693,563 -4894,33 +4898,33 @@@ int kvm_arch_vcpu_ioctl_set_mpstate(str
        return 0;
  }
  
static void kvm_set_segment(struct kvm_vcpu *vcpu,
-                       struct kvm_segment *var, int seg)
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
+                   bool has_error_code, u32 error_code)
  {
-       kvm_x86_ops->set_segment(vcpu, var, seg);
- }
- static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
-                                  struct kvm_segment *kvm_desct)
- {
-       kvm_desct->base = get_desc_base(seg_desc);
-       kvm_desct->limit = get_desc_limit(seg_desc);
-       if (seg_desc->g) {
-               kvm_desct->limit <<= 12;
-               kvm_desct->limit |= 0xfff;
-       }
-       kvm_desct->selector = selector;
-       kvm_desct->type = seg_desc->type;
-       kvm_desct->present = seg_desc->p;
-       kvm_desct->dpl = seg_desc->dpl;
-       kvm_desct->db = seg_desc->d;
-       kvm_desct->s = seg_desc->s;
-       kvm_desct->l = seg_desc->l;
-       kvm_desct->g = seg_desc->g;
-       kvm_desct->avl = seg_desc->avl;
-       if (!selector)
-               kvm_desct->unusable = 1;
-       else
-               kvm_desct->unusable = 0;
-       kvm_desct->padding = 0;
- }
- static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
-                                         u16 selector,
-                                         struct descriptor_table *dtable)
- {
-       if (selector & 1 << 2) {
-               struct kvm_segment kvm_seg;
-               kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
-               if (kvm_seg.unusable)
-                       dtable->limit = 0;
-               else
-                       dtable->limit = kvm_seg.limit;
-               dtable->base = kvm_seg.base;
-       }
-       else
-               kvm_x86_ops->get_gdt(vcpu, dtable);
- }
- /* allowed just for 8 bytes segments */
- static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
-                                        struct desc_struct *seg_desc)
- {
-       struct descriptor_table dtable;
-       u16 index = selector >> 3;
-       int ret;
-       u32 err;
-       gva_t addr;
-       get_segment_descriptor_dtable(vcpu, selector, &dtable);
-       if (dtable.limit < index * 8 + 7) {
-               kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
-               return X86EMUL_PROPAGATE_FAULT;
-       }
-       addr = dtable.base + index * 8;
-       ret = kvm_read_guest_virt_system(addr, seg_desc, sizeof(*seg_desc),
-                                        vcpu,  &err);
-       if (ret == X86EMUL_PROPAGATE_FAULT)
-               kvm_inject_page_fault(vcpu, addr, err);
-        return ret;
- }
- /* allowed just for 8 bytes segments */
- static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
-                                        struct desc_struct *seg_desc)
- {
-       struct descriptor_table dtable;
-       u16 index = selector >> 3;
-       get_segment_descriptor_dtable(vcpu, selector, &dtable);
-       if (dtable.limit < index * 8 + 7)
-               return 1;
-       return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
- }
- static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
-                              struct desc_struct *seg_desc)
- {
-       u32 base_addr = get_desc_base(seg_desc);
-       return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
- }
- static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
-                            struct desc_struct *seg_desc)
- {
-       u32 base_addr = get_desc_base(seg_desc);
-       return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
- }
- static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
- {
-       struct kvm_segment kvm_seg;
-       kvm_get_segment(vcpu, &kvm_seg, seg);
-       return kvm_seg.selector;
- }
- static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
- {
-       struct kvm_segment segvar = {
-               .base = selector << 4,
-               .limit = 0xffff,
-               .selector = selector,
-               .type = 3,
-               .present = 1,
-               .dpl = 3,
-               .db = 0,
-               .s = 1,
-               .l = 0,
-               .g = 0,
-               .avl = 0,
-               .unusable = 0,
-       };
-       kvm_x86_ops->set_segment(vcpu, &segvar, seg);
-       return X86EMUL_CONTINUE;
- }
- static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
- {
-       return (seg != VCPU_SREG_LDTR) &&
-               (seg != VCPU_SREG_TR) &&
-               (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
- }
- int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
- {
-       struct kvm_segment kvm_seg;
-       struct desc_struct seg_desc;
-       u8 dpl, rpl, cpl;
-       unsigned err_vec = GP_VECTOR;
-       u32 err_code = 0;
-       bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
-       int ret;
+       int cs_db, cs_l, ret;
+       cache_all_regs(vcpu);
  
-       if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
-               return kvm_load_realmode_segment(vcpu, selector, seg);
+       kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  
-       /* NULL selector is not valid for TR, CS and SS */
-       if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
-           && null_selector)
-               goto exception;
+       vcpu->arch.emulate_ctxt.vcpu = vcpu;
+       vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+       vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
+       vcpu->arch.emulate_ctxt.mode =
+               (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
+               (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
+               ? X86EMUL_MODE_VM86 : cs_l
+               ? X86EMUL_MODE_PROT64 : cs_db
+               ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  
-       /* TR should be in GDT only */
-       if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
-               goto exception;
+       ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
+                                  tss_selector, reason, has_error_code,
+                                  error_code);
  
-       ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
        if (ret)
-               return ret;
-       seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
-       if (null_selector) { /* for NULL selector skip all following checks */
-               kvm_seg.unusable = 1;
-               goto load;
-       }
-       err_code = selector & 0xfffc;
-       err_vec = GP_VECTOR;
-       /* can't load system descriptor into segment selecor */
-       if (seg <= VCPU_SREG_GS && !kvm_seg.s)
-               goto exception;
-       if (!kvm_seg.present) {
-               err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
-               goto exception;
-       }
-       rpl = selector & 3;
-       dpl = kvm_seg.dpl;
-       cpl = kvm_x86_ops->get_cpl(vcpu);
-       switch (seg) {
-       case VCPU_SREG_SS:
-               /*
-                * segment is not a writable data segment or segment
-                * selector's RPL != CPL or segment selector's RPL != CPL
-                */
-               if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
-                       goto exception;
-               break;
-       case VCPU_SREG_CS:
-               if (!(kvm_seg.type & 8))
-                       goto exception;
-               if (kvm_seg.type & 4) {
-                       /* conforming */
-                       if (dpl > cpl)
-                               goto exception;
-               } else {
-                       /* nonconforming */
-                       if (rpl > cpl || dpl != cpl)
-                               goto exception;
-               }
-               /* CS(RPL) <- CPL */
-               selector = (selector & 0xfffc) | cpl;
-             break;
-       case VCPU_SREG_TR:
-               if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
-                       goto exception;
-               break;
-       case VCPU_SREG_LDTR:
-               if (kvm_seg.s || kvm_seg.type != 2)
-                       goto exception;
-               break;
-       default: /*  DS, ES, FS, or GS */
-               /*
-                * segment is not a data or readable code segment or
-                * ((segment is a data or nonconforming code segment)
-                * and (both RPL and CPL > DPL))
-                */
-               if ((kvm_seg.type & 0xa) == 0x8 ||
-                   (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
-                       goto exception;
-               break;
-       }
-       if (!kvm_seg.unusable && kvm_seg.s) {
-               /* mark segment as accessed */
-               kvm_seg.type |= 1;
-               seg_desc.type |= 1;
-               save_guest_segment_descriptor(vcpu, selector, &seg_desc);
-       }
- load:
-       kvm_set_segment(vcpu, &kvm_seg, seg);
-       return X86EMUL_CONTINUE;
- exception:
-       kvm_queue_exception_e(vcpu, err_vec, err_code);
-       return X86EMUL_PROPAGATE_FAULT;
- }
- static void save_state_to_tss32(struct kvm_vcpu *vcpu,
-                               struct tss_segment_32 *tss)
- {
-       tss->cr3 = vcpu->arch.cr3;
-       tss->eip = kvm_rip_read(vcpu);
-       tss->eflags = kvm_get_rflags(vcpu);
-       tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
-       tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
-       tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
-       tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
-       tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
-       tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
-       tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
-       tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
-       tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
-       tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
-       tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
-       tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
-       tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
-       tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
- }
- static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
- {
-       struct kvm_segment kvm_seg;
-       kvm_get_segment(vcpu, &kvm_seg, seg);
-       kvm_seg.selector = sel;
-       kvm_set_segment(vcpu, &kvm_seg, seg);
- }
- static int load_state_from_tss32(struct kvm_vcpu *vcpu,
-                                 struct tss_segment_32 *tss)
- {
-       kvm_set_cr3(vcpu, tss->cr3);
-       kvm_rip_write(vcpu, tss->eip);
-       kvm_set_rflags(vcpu, tss->eflags | 2);
-       kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
-       kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
-       kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
-       kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
-       kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
-       kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
-       kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
-       kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
-       /*
-        * SDM says that segment selectors are loaded before segment
-        * descriptors
-        */
-       kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
-       kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
-       kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
-       kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
-       kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
-       kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
-       kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
-       /*
-        * Now load segment descriptors. If fault happenes at this stage
-        * it is handled in a context of new task
-        */
-       if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
-               return 1;
-       if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
-               return 1;
-       if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
-               return 1;
-       if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
-               return 1;
-       if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
-               return 1;
-       if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
-               return 1;
-       if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
-               return 1;
-       return 0;
- }
- static void save_state_to_tss16(struct kvm_vcpu *vcpu,
-                               struct tss_segment_16 *tss)
- {
-       tss->ip = kvm_rip_read(vcpu);
-       tss->flag = kvm_get_rflags(vcpu);
-       tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
-       tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
-       tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
-       tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
-       tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
-       tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
-       tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
-       tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
-       tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
-       tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
-       tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
-       tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
- }
- static int load_state_from_tss16(struct kvm_vcpu *vcpu,
-                                struct tss_segment_16 *tss)
- {
-       kvm_rip_write(vcpu, tss->ip);
-       kvm_set_rflags(vcpu, tss->flag | 2);
-       kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
-       kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
-       kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
-       kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
-       kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
-       kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
-       kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
-       kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
-       /*
-        * SDM says that segment selectors are loaded before segment
-        * descriptors
-        */
-       kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
-       kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
-       kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
-       kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
-       kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
-       /*
-        * Now load segment descriptors. If fault happenes at this stage
-        * it is handled in a context of new task
-        */
-       if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
-               return 1;
-       if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
-               return 1;
-       if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
-               return 1;
-       if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
-               return 1;
-       if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
-               return 1;
-       return 0;
- }
- static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
-                             u16 old_tss_sel, u32 old_tss_base,
-                             struct desc_struct *nseg_desc)
- {
-       struct tss_segment_16 tss_segment_16;
-       int ret = 0;
-       if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
-                          sizeof tss_segment_16))
-               goto out;
-       save_state_to_tss16(vcpu, &tss_segment_16);
-       if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
-                           sizeof tss_segment_16))
-               goto out;
-       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
-                          &tss_segment_16, sizeof tss_segment_16))
-               goto out;
-       if (old_tss_sel != 0xffff) {
-               tss_segment_16.prev_task_link = old_tss_sel;
-               if (kvm_write_guest(vcpu->kvm,
-                                   get_tss_base_addr_write(vcpu, nseg_desc),
-                                   &tss_segment_16.prev_task_link,
-                                   sizeof tss_segment_16.prev_task_link))
-                       goto out;
-       }
-       if (load_state_from_tss16(vcpu, &tss_segment_16))
-               goto out;
+               return EMULATE_FAIL;
  
-       ret = 1;
- out:
-       return ret;
- }
- static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
-                      u16 old_tss_sel, u32 old_tss_base,
-                      struct desc_struct *nseg_desc)
- {
-       struct tss_segment_32 tss_segment_32;
-       int ret = 0;
-       if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
-                          sizeof tss_segment_32))
-               goto out;
-       save_state_to_tss32(vcpu, &tss_segment_32);
-       if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
-                           sizeof tss_segment_32))
-               goto out;
-       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
-                          &tss_segment_32, sizeof tss_segment_32))
-               goto out;
-       if (old_tss_sel != 0xffff) {
-               tss_segment_32.prev_task_link = old_tss_sel;
-               if (kvm_write_guest(vcpu->kvm,
-                                   get_tss_base_addr_write(vcpu, nseg_desc),
-                                   &tss_segment_32.prev_task_link,
-                                   sizeof tss_segment_32.prev_task_link))
-                       goto out;
-       }
-       if (load_state_from_tss32(vcpu, &tss_segment_32))
-               goto out;
-       ret = 1;
- out:
-       return ret;
- }
- int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
- {
-       struct kvm_segment tr_seg;
-       struct desc_struct cseg_desc;
-       struct desc_struct nseg_desc;
-       int ret = 0;
-       u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
-       u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
-       u32 desc_limit;
-       old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
-       /* FIXME: Handle errors. Failure to read either TSS or their
-        * descriptors should generate a pagefault.
-        */
-       if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
-               goto out;
-       if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
-               goto out;
-       if (reason != TASK_SWITCH_IRET) {
-               int cpl;
-               cpl = kvm_x86_ops->get_cpl(vcpu);
-               if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
-                       kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
-                       return 1;
-               }
-       }
-       desc_limit = get_desc_limit(&nseg_desc);
-       if (!nseg_desc.p ||
-           ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
-            desc_limit < 0x2b)) {
-               kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
-               return 1;
-       }
-       if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
-               cseg_desc.type &= ~(1 << 1); //clear the B flag
-               save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
-       }
-       if (reason == TASK_SWITCH_IRET) {
-               u32 eflags = kvm_get_rflags(vcpu);
-               kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
-       }
-       /* set back link to prev task only if NT bit is set in eflags
-          note that old_tss_sel is not used afetr this point */
-       if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
-               old_tss_sel = 0xffff;
-       if (nseg_desc.type & 8)
-               ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
-                                        old_tss_base, &nseg_desc);
-       else
-               ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
-                                        old_tss_base, &nseg_desc);
-       if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
-               u32 eflags = kvm_get_rflags(vcpu);
-               kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT);
-       }
-       if (reason != TASK_SWITCH_IRET) {
-               nseg_desc.type |= (1 << 1);
-               save_guest_segment_descriptor(vcpu, tss_selector,
-                                             &nseg_desc);
-       }
-       kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
-       seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
-       tr_seg.type = 11;
-       kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
- out:
-       return ret;
+       kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       return EMULATE_DONE;
  }
  EXPORT_SYMBOL_GPL(kvm_task_switch);
  
@@@ -5258,15 -4929,15 +4933,15 @@@ int kvm_arch_vcpu_ioctl_set_sregs(struc
  {
        int mmu_reset_needed = 0;
        int pending_vec, max_bits;
-       struct descriptor_table dt;
+       struct desc_ptr dt;
  
        vcpu_load(vcpu);
  
-       dt.limit = sregs->idt.limit;
-       dt.base = sregs->idt.base;
+       dt.size = sregs->idt.limit;
+       dt.address = sregs->idt.base;
        kvm_x86_ops->set_idt(vcpu, &dt);
-       dt.limit = sregs->gdt.limit;
-       dt.base = sregs->gdt.base;
+       dt.size = sregs->gdt.limit;
+       dt.address = sregs->gdt.base;
        kvm_x86_ops->set_gdt(vcpu, &dt);
  
        vcpu->arch.cr2 = sregs->cr2;
@@@ -5365,11 -5036,9 +5040,9 @@@ int kvm_arch_vcpu_ioctl_set_guest_debug
                vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
        }
  
-       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
-               vcpu->arch.singlestep_cs =
-                       get_segment_selector(vcpu, VCPU_SREG_CS);
-               vcpu->arch.singlestep_rip = kvm_rip_read(vcpu);
-       }
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+               vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
+                       get_segment_base(vcpu, VCPU_SREG_CS);
  
        /*
         * Trigger an rflags update that will inject or remove the trace
@@@ -5860,13 -5529,22 +5533,22 @@@ int kvm_arch_interrupt_allowed(struct k
        return kvm_x86_ops->interrupt_allowed(vcpu);
  }
  
+ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
+ {
+       unsigned long current_rip = kvm_rip_read(vcpu) +
+               get_segment_base(vcpu, VCPU_SREG_CS);
+       return current_rip == linear_rip;
+ }
+ EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
  unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
  {
        unsigned long rflags;
  
        rflags = kvm_x86_ops->get_rflags(vcpu);
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
-               rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
+               rflags &= ~X86_EFLAGS_TF;
        return rflags;
  }
  EXPORT_SYMBOL_GPL(kvm_get_rflags);
  void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  {
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
-           vcpu->arch.singlestep_cs ==
-                       get_segment_selector(vcpu, VCPU_SREG_CS) &&
-           vcpu->arch.singlestep_rip == kvm_rip_read(vcpu))
-               rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
+           kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
+               rflags |= X86_EFLAGS_TF;
        kvm_x86_ops->set_rflags(vcpu, rflags);
  }
  EXPORT_SYMBOL_GPL(kvm_set_rflags);
@@@ -5893,3 -5569,4 +5573,4 @@@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested
  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
diff --combined virt/kvm/iommu.c
@@@ -32,30 -32,12 +32,30 @@@ static int kvm_iommu_unmap_memslots(str
  static void kvm_iommu_put_pages(struct kvm *kvm,
                                gfn_t base_gfn, unsigned long npages);
  
 +static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
 +                         gfn_t gfn, unsigned long size)
 +{
 +      gfn_t end_gfn;
 +      pfn_t pfn;
 +
 +      pfn     = gfn_to_pfn_memslot(kvm, slot, gfn);
 +      end_gfn = gfn + (size >> PAGE_SHIFT);
 +      gfn    += 1;
 +
 +      if (is_error_pfn(pfn))
 +              return pfn;
 +
 +      while (gfn < end_gfn)
 +              gfn_to_pfn_memslot(kvm, slot, gfn++);
 +
 +      return pfn;
 +}
 +
  int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
  {
 -      gfn_t gfn = slot->base_gfn;
 -      unsigned long npages = slot->npages;
 +      gfn_t gfn, end_gfn;
        pfn_t pfn;
 -      int i, r = 0;
 +      int r = 0;
        struct iommu_domain *domain = kvm->arch.iommu_domain;
        int flags;
  
        if (!domain)
                return 0;
  
 +      gfn     = slot->base_gfn;
 +      end_gfn = gfn + slot->npages;
 +
        flags = IOMMU_READ | IOMMU_WRITE;
        if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
                flags |= IOMMU_CACHE;
  
 -      for (i = 0; i < npages; i++) {
 -              /* check if already mapped */
 -              if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
 +
 +      while (gfn < end_gfn) {
 +              unsigned long page_size;
 +
 +              /* Check if already mapped */
 +              if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
 +                      gfn += 1;
 +                      continue;
 +              }
 +
 +              /* Get the page size we could use to map */
 +              page_size = kvm_host_page_size(kvm, gfn);
 +
 +              /* Make sure the page_size does not exceed the memslot */
 +              while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
 +                      page_size >>= 1;
 +
 +              /* Make sure gfn is aligned to the page size we want to map */
 +              while ((gfn << PAGE_SHIFT) & (page_size - 1))
 +                      page_size >>= 1;
 +
 +              /*
 +               * Pin all pages we are about to map in memory. This is
 +               * important because we unmap and unpin in 4kb steps later.
 +               */
 +              pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
 +              if (is_error_pfn(pfn)) {
 +                      gfn += 1;
                        continue;
 +              }
  
 -              pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
 -              r = iommu_map_range(domain,
 -                                  gfn_to_gpa(gfn),
 -                                  pfn_to_hpa(pfn),
 -                                  PAGE_SIZE, flags);
 +              /* Map into IO address space */
 +              r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
 +                            get_order(page_size), flags);
                if (r) {
                        printk(KERN_ERR "kvm_iommu_map_address:"
                               "iommu failed to map pfn=%lx\n", pfn);
                        goto unmap_pages;
                }
 -              gfn++;
 +
 +              gfn += page_size >> PAGE_SHIFT;
 +
 +
        }
 +
        return 0;
  
  unmap_pages:
 -      kvm_iommu_put_pages(kvm, slot->base_gfn, i);
 +      kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
        return r;
  }
  
@@@ -127,7 -78,7 +127,7 @@@ static int kvm_iommu_map_memslots(struc
        int i, r = 0;
        struct kvm_memslots *slots;
  
-       slots = rcu_dereference(kvm->memslots);
+       slots = kvm_memslots(kvm);
  
        for (i = 0; i < slots->nmemslots; i++) {
                r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
@@@ -238,47 -189,27 +238,47 @@@ out_unmap
        return r;
  }
  
 +static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
 +{
 +      unsigned long i;
 +
 +      for (i = 0; i < npages; ++i)
 +              kvm_release_pfn_clean(pfn + i);
 +}
 +
  static void kvm_iommu_put_pages(struct kvm *kvm,
                                gfn_t base_gfn, unsigned long npages)
  {
 -      gfn_t gfn = base_gfn;
 +      struct iommu_domain *domain;
 +      gfn_t end_gfn, gfn;
        pfn_t pfn;
 -      struct iommu_domain *domain = kvm->arch.iommu_domain;
 -      unsigned long i;
        u64 phys;
  
 +      domain  = kvm->arch.iommu_domain;
 +      end_gfn = base_gfn + npages;
 +      gfn     = base_gfn;
 +
        /* check if iommu exists and in use */
        if (!domain)
                return;
  
 -      for (i = 0; i < npages; i++) {
 +      while (gfn < end_gfn) {
 +              unsigned long unmap_pages;
 +              int order;
 +
 +              /* Get physical address */
                phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
 -              pfn = phys >> PAGE_SHIFT;
 -              kvm_release_pfn_clean(pfn);
 -              gfn++;
 -      }
 +              pfn  = phys >> PAGE_SHIFT;
 +
 +              /* Unmap address from IO address space */
 +              order       = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
 +              unmap_pages = 1ULL << order;
  
 -      iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages);
 +              /* Unpin all pages we just unmapped to not leak any memory */
 +              kvm_unpin_pages(kvm, pfn, unmap_pages);
 +
 +              gfn += unmap_pages;
 +      }
  }
  
  static int kvm_iommu_unmap_memslots(struct kvm *kvm)
        int i;
        struct kvm_memslots *slots;
  
-       slots = rcu_dereference(kvm->memslots);
+       slots = kvm_memslots(kvm);
  
        for (i = 0; i < slots->nmemslots; i++) {
                kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,