KVM: x86: accessors for guest registers
[safe/jmp/linux-2.6] / arch / x86 / kvm / x86.c
index d1db5aa..2f0696b 100644 (file)
@@ -19,6 +19,7 @@
 #include "mmu.h"
 #include "i8254.h"
 #include "tss.h"
+#include "kvm_cache_regs.h"
 
 #include <linux/clocksource.h>
 #include <linux/kvm.h>
@@ -61,6 +62,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
                                    struct kvm_cpuid_entry2 __user *entries);
 
 struct kvm_x86_ops *kvm_x86_ops;
+EXPORT_SYMBOL_GPL(kvm_x86_ops);
 
 struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -883,6 +885,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_PIT:
        case KVM_CAP_NOP_IO_DELAY:
        case KVM_CAP_MP_STATE:
+       case KVM_CAP_SYNC_MMU:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -1495,6 +1498,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
                goto out;
 
        down_write(&kvm->slots_lock);
+       spin_lock(&kvm->mmu_lock);
 
        p = &kvm->arch.aliases[alias->slot];
        p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
@@ -1506,6 +1510,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
                        break;
        kvm->arch.naliases = n;
 
+       spin_unlock(&kvm->mmu_lock);
        kvm_mmu_zap_all(kvm);
 
        up_write(&kvm->slots_lock);
@@ -2077,7 +2082,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 {
        u8 opcodes[4];
-       unsigned long rip = vcpu->arch.rip;
+       unsigned long rip = kvm_rip_read(vcpu);
        unsigned long rip_linear;
 
        if (!printk_ratelimit())
@@ -2099,6 +2104,14 @@ static struct x86_emulate_ops emulate_ops = {
        .cmpxchg_emulated    = emulator_cmpxchg_emulated,
 };
 
+static void cache_all_regs(struct kvm_vcpu *vcpu)
+{
+       kvm_register_read(vcpu, VCPU_REGS_RAX);
+       kvm_register_read(vcpu, VCPU_REGS_RSP);
+       kvm_register_read(vcpu, VCPU_REGS_RIP);
+       vcpu->arch.regs_dirty = ~0;
+}
+
 int emulate_instruction(struct kvm_vcpu *vcpu,
                        struct kvm_run *run,
                        unsigned long cr2,
@@ -2109,7 +2122,13 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
        struct decode_cache *c;
 
        vcpu->arch.mmio_fault_cr2 = cr2;
-       kvm_x86_ops->cache_regs(vcpu);
+       /*
+        * TODO: fix x86_emulate.c to use guest_read/write_register
+        * instead of direct ->regs accesses, can save hundred cycles
+        * on Intel for instructions that don't read/change RSP, for
+        * for example.
+        */
+       cache_all_regs(vcpu);
 
        vcpu->mmio_is_write = 0;
        vcpu->arch.pio.string = 0;
@@ -2126,27 +2145,6 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                        ? X86EMUL_MODE_PROT64 : cs_db
                        ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 
-               if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
-                       vcpu->arch.emulate_ctxt.cs_base = 0;
-                       vcpu->arch.emulate_ctxt.ds_base = 0;
-                       vcpu->arch.emulate_ctxt.es_base = 0;
-                       vcpu->arch.emulate_ctxt.ss_base = 0;
-               } else {
-                       vcpu->arch.emulate_ctxt.cs_base =
-                                       get_segment_base(vcpu, VCPU_SREG_CS);
-                       vcpu->arch.emulate_ctxt.ds_base =
-                                       get_segment_base(vcpu, VCPU_SREG_DS);
-                       vcpu->arch.emulate_ctxt.es_base =
-                                       get_segment_base(vcpu, VCPU_SREG_ES);
-                       vcpu->arch.emulate_ctxt.ss_base =
-                                       get_segment_base(vcpu, VCPU_SREG_SS);
-               }
-
-               vcpu->arch.emulate_ctxt.gs_base =
-                                       get_segment_base(vcpu, VCPU_SREG_GS);
-               vcpu->arch.emulate_ctxt.fs_base =
-                                       get_segment_base(vcpu, VCPU_SREG_FS);
-
                r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
 
                /* Reject the instructions other than VMCALL/VMMCALL when
@@ -2190,7 +2188,6 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                return EMULATE_DO_MMIO;
        }
 
-       kvm_x86_ops->decache_regs(vcpu);
        kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
 
        if (vcpu->mmio_is_write) {
@@ -2243,20 +2240,19 @@ int complete_pio(struct kvm_vcpu *vcpu)
        struct kvm_pio_request *io = &vcpu->arch.pio;
        long delta;
        int r;
-
-       kvm_x86_ops->cache_regs(vcpu);
+       unsigned long val;
 
        if (!io->string) {
-               if (io->in)
-                       memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
-                              io->size);
+               if (io->in) {
+                       val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+                       memcpy(&val, vcpu->arch.pio_data, io->size);
+                       kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+               }
        } else {
                if (io->in) {
                        r = pio_copy_data(vcpu);
-                       if (r) {
-                               kvm_x86_ops->cache_regs(vcpu);
+                       if (r)
                                return r;
-                       }
                }
 
                delta = 1;
@@ -2266,19 +2262,24 @@ int complete_pio(struct kvm_vcpu *vcpu)
                         * The size of the register should really depend on
                         * current address size.
                         */
-                       vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
+                       val = kvm_register_read(vcpu, VCPU_REGS_RCX);
+                       val -= delta;
+                       kvm_register_write(vcpu, VCPU_REGS_RCX, val);
                }
                if (io->down)
                        delta = -delta;
                delta *= io->size;
-               if (io->in)
-                       vcpu->arch.regs[VCPU_REGS_RDI] += delta;
-               else
-                       vcpu->arch.regs[VCPU_REGS_RSI] += delta;
+               if (io->in) {
+                       val = kvm_register_read(vcpu, VCPU_REGS_RDI);
+                       val += delta;
+                       kvm_register_write(vcpu, VCPU_REGS_RDI, val);
+               } else {
+                       val = kvm_register_read(vcpu, VCPU_REGS_RSI);
+                       val += delta;
+                       kvm_register_write(vcpu, VCPU_REGS_RSI, val);
+               }
        }
 
-       kvm_x86_ops->decache_regs(vcpu);
-
        io->count -= io->cur_count;
        io->cur_count = 0;
 
@@ -2331,6 +2332,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
                  int size, unsigned port)
 {
        struct kvm_io_device *pio_dev;
+       unsigned long val;
 
        vcpu->run->exit_reason = KVM_EXIT_IO;
        vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -2351,8 +2353,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
                KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
                            handler);
 
-       kvm_x86_ops->cache_regs(vcpu);
-       memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
+       val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       memcpy(vcpu->arch.pio_data, &val, 4);
 
        kvm_x86_ops->skip_emulated_instruction(vcpu);
 
@@ -2537,13 +2539,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        unsigned long nr, a0, a1, a2, a3, ret;
        int r = 1;
 
-       kvm_x86_ops->cache_regs(vcpu);
-
-       nr = vcpu->arch.regs[VCPU_REGS_RAX];
-       a0 = vcpu->arch.regs[VCPU_REGS_RBX];
-       a1 = vcpu->arch.regs[VCPU_REGS_RCX];
-       a2 = vcpu->arch.regs[VCPU_REGS_RDX];
-       a3 = vcpu->arch.regs[VCPU_REGS_RSI];
+       nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
+       a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
+       a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
 
        KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
 
@@ -2566,8 +2566,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
                ret = -KVM_ENOSYS;
                break;
        }
-       vcpu->arch.regs[VCPU_REGS_RAX] = ret;
-       kvm_x86_ops->decache_regs(vcpu);
+       kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
        ++vcpu->stat.hypercalls;
        return r;
 }
@@ -2577,6 +2576,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
 {
        char instruction[3];
        int ret = 0;
+       unsigned long rip = kvm_rip_read(vcpu);
 
 
        /*
@@ -2586,9 +2586,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
         */
        kvm_mmu_zap_all(vcpu->kvm);
 
-       kvm_x86_ops->cache_regs(vcpu);
        kvm_x86_ops->patch_hypercall(vcpu, instruction);
-       if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
+       if (emulator_write_emulated(rip, instruction, 3, vcpu)
            != X86EMUL_CONTINUE)
                ret = -EFAULT;
 
@@ -2718,13 +2717,12 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
        u32 function, index;
        struct kvm_cpuid_entry2 *e, *best;
 
-       kvm_x86_ops->cache_regs(vcpu);
-       function = vcpu->arch.regs[VCPU_REGS_RAX];
-       index = vcpu->arch.regs[VCPU_REGS_RCX];
-       vcpu->arch.regs[VCPU_REGS_RAX] = 0;
-       vcpu->arch.regs[VCPU_REGS_RBX] = 0;
-       vcpu->arch.regs[VCPU_REGS_RCX] = 0;
-       vcpu->arch.regs[VCPU_REGS_RDX] = 0;
+       function = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
        best = NULL;
        for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
                e = &vcpu->arch.cpuid_entries[i];
@@ -2742,18 +2740,17 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
                                best = e;
        }
        if (best) {
-               vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
-               vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
-               vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
-               vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
+               kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
+               kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
+               kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
+               kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
        }
-       kvm_x86_ops->decache_regs(vcpu);
        kvm_x86_ops->skip_emulated_instruction(vcpu);
        KVMTRACE_5D(CPUID, vcpu, function,
-                   (u32)vcpu->arch.regs[VCPU_REGS_RAX],
-                   (u32)vcpu->arch.regs[VCPU_REGS_RBX],
-                   (u32)vcpu->arch.regs[VCPU_REGS_RCX],
-                   (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler);
+                   (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
+                   (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
+                   (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
+                   (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
 
@@ -2808,8 +2805,10 @@ static void vapic_exit(struct kvm_vcpu *vcpu)
        if (!apic || !apic->vapic_addr)
                return;
 
+       down_read(&vcpu->kvm->slots_lock);
        kvm_release_page_dirty(apic->vapic_page);
        mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+       up_read(&vcpu->kvm->slots_lock);
 }
 
 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -2933,8 +2932,8 @@ again:
         * Profile KVM exit RIPs:
         */
        if (unlikely(prof_on == KVM_PROFILING)) {
-               kvm_x86_ops->cache_regs(vcpu);
-               profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
+               unsigned long rip = kvm_rip_read(vcpu);
+               profile_hit(KVM_PROFILING, (void *)rip);
        }
 
        if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
@@ -2965,9 +2964,7 @@ out:
 
        post_kvm_run_save(vcpu, kvm_run);
 
-       down_read(&vcpu->kvm->slots_lock);
        vapic_exit(vcpu);
-       up_read(&vcpu->kvm->slots_lock);
 
        return r;
 }
@@ -2979,15 +2976,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        vcpu_load(vcpu);
 
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                kvm_vcpu_block(vcpu);
-               vcpu_put(vcpu);
-               return -EAGAIN;
+               r = -EAGAIN;
+               goto out;
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
        /* re-sync apic's tpr */
        if (!irqchip_in_kernel(vcpu->kvm))
                kvm_set_cr8(vcpu, kvm_run->cr8);
@@ -3017,11 +3014,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                }
        }
 #endif
-       if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
-               kvm_x86_ops->cache_regs(vcpu);
-               vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
-               kvm_x86_ops->decache_regs(vcpu);
-       }
+       if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
+               kvm_register_write(vcpu, VCPU_REGS_RAX,
+                                    kvm_run->hypercall.ret);
 
        r = __vcpu_run(vcpu, kvm_run);
 
@@ -3037,28 +3032,26 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
        vcpu_load(vcpu);
 
-       kvm_x86_ops->cache_regs(vcpu);
-
-       regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
-       regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
-       regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
-       regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
-       regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
-       regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
-       regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
-       regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
+       regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+       regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+       regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
+       regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
+       regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+       regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
 #ifdef CONFIG_X86_64
-       regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
-       regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
-       regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
-       regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
-       regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
-       regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
-       regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
-       regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
+       regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
+       regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
+       regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
+       regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
+       regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
+       regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
+       regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
+       regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
 #endif
 
-       regs->rip = vcpu->arch.rip;
+       regs->rip = kvm_rip_read(vcpu);
        regs->rflags = kvm_x86_ops->get_rflags(vcpu);
 
        /*
@@ -3076,29 +3069,29 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
        vcpu_load(vcpu);
 
-       vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
-       vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
-       vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
-       vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
-       vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
-       vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
-       vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
-       vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
+       kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
+       kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
+       kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
+       kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
+       kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
 #ifdef CONFIG_X86_64
-       vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
-       vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
-       vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
-       vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
-       vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
-       vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
-       vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
-       vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
+       kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
+       kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
+       kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
+       kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
+       kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
+       kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
+       kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
+       kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
+
 #endif
 
-       vcpu->arch.rip = regs->rip;
+       kvm_rip_write(vcpu, regs->rip);
        kvm_x86_ops->set_rflags(vcpu, regs->rflags);
 
-       kvm_x86_ops->decache_regs(vcpu);
 
        vcpu->arch.exception.pending = false;
 
@@ -3205,6 +3198,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
        kvm_desct->base |= seg_desc->base2 << 24;
        kvm_desct->limit = seg_desc->limit0;
        kvm_desct->limit |= seg_desc->limit << 16;
+       if (seg_desc->g) {
+               kvm_desct->limit <<= 12;
+               kvm_desct->limit |= 0xfff;
+       }
        kvm_desct->selector = selector;
        kvm_desct->type = seg_desc->type;
        kvm_desct->present = seg_desc->p;
@@ -3244,6 +3241,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                         struct desc_struct *seg_desc)
 {
+       gpa_t gpa;
        struct descriptor_table dtable;
        u16 index = selector >> 3;
 
@@ -3253,13 +3251,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
                return 1;
        }
-       return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
+       gpa += index * 8;
+       return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
 }
 
 /* allowed just for 8 bytes segments */
 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                         struct desc_struct *seg_desc)
 {
+       gpa_t gpa;
        struct descriptor_table dtable;
        u16 index = selector >> 3;
 
@@ -3267,7 +3268,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
 
        if (dtable.limit < index * 8 + 7)
                return 1;
-       return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
+       gpa += index * 8;
+       return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
 }
 
 static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
@@ -3279,55 +3282,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
        base_addr |= (seg_desc->base1 << 16);
        base_addr |= (seg_desc->base2 << 24);
 
-       return base_addr;
-}
-
-static int load_tss_segment32(struct kvm_vcpu *vcpu,
-                             struct desc_struct *seg_desc,
-                             struct tss_segment_32 *tss)
-{
-       u32 base_addr;
-
-       base_addr = get_tss_base_addr(vcpu, seg_desc);
-
-       return kvm_read_guest(vcpu->kvm, base_addr, tss,
-                             sizeof(struct tss_segment_32));
-}
-
-static int save_tss_segment32(struct kvm_vcpu *vcpu,
-                             struct desc_struct *seg_desc,
-                             struct tss_segment_32 *tss)
-{
-       u32 base_addr;
-
-       base_addr = get_tss_base_addr(vcpu, seg_desc);
-
-       return kvm_write_guest(vcpu->kvm, base_addr, tss,
-                              sizeof(struct tss_segment_32));
-}
-
-static int load_tss_segment16(struct kvm_vcpu *vcpu,
-                             struct desc_struct *seg_desc,
-                             struct tss_segment_16 *tss)
-{
-       u32 base_addr;
-
-       base_addr = get_tss_base_addr(vcpu, seg_desc);
-
-       return kvm_read_guest(vcpu->kvm, base_addr, tss,
-                             sizeof(struct tss_segment_16));
-}
-
-static int save_tss_segment16(struct kvm_vcpu *vcpu,
-                             struct desc_struct *seg_desc,
-                             struct tss_segment_16 *tss)
-{
-       u32 base_addr;
-
-       base_addr = get_tss_base_addr(vcpu, seg_desc);
-
-       return kvm_write_guest(vcpu->kvm, base_addr, tss,
-                              sizeof(struct tss_segment_16));
+       return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
 }
 
 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -3372,17 +3327,16 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
                                struct tss_segment_32 *tss)
 {
        tss->cr3 = vcpu->arch.cr3;
-       tss->eip = vcpu->arch.rip;
+       tss->eip = kvm_rip_read(vcpu);
        tss->eflags = kvm_x86_ops->get_rflags(vcpu);
-       tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
-       tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
-       tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
-       tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
-       tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
-       tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
-       tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
-       tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
-
+       tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+       tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+       tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+       tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+       tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
+       tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
        tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
        tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
        tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
@@ -3398,17 +3352,17 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
 {
        kvm_set_cr3(vcpu, tss->cr3);
 
-       vcpu->arch.rip = tss->eip;
+       kvm_rip_write(vcpu, tss->eip);
        kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
 
-       vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
-       vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
-       vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
-       vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
-       vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
-       vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
-       vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
-       vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
+       kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
+       kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
+       kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
+       kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
+       kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
 
        if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
                return 1;
@@ -3436,16 +3390,16 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
 static void save_state_to_tss16(struct kvm_vcpu *vcpu,
                                struct tss_segment_16 *tss)
 {
-       tss->ip = vcpu->arch.rip;
+       tss->ip = kvm_rip_read(vcpu);
        tss->flag = kvm_x86_ops->get_rflags(vcpu);
-       tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
-       tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
-       tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
-       tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
-       tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
-       tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
-       tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
-       tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
+       tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+       tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+       tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+       tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+       tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
+       tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
 
        tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
        tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
@@ -3458,16 +3412,16 @@ static void save_state_to_tss16(struct kvm_vcpu *vcpu,
 static int load_state_from_tss16(struct kvm_vcpu *vcpu,
                                 struct tss_segment_16 *tss)
 {
-       vcpu->arch.rip = tss->ip;
+       kvm_rip_write(vcpu, tss->ip);
        kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
-       vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
-       vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
-       vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
-       vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
-       vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
-       vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
-       vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
-       vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
+       kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
+       kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
+       kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
+       kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
+       kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
 
        if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
                return 1;
@@ -3487,20 +3441,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
 }
 
 static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
-                      struct desc_struct *cseg_desc,
+                      u32 old_tss_base,
                       struct desc_struct *nseg_desc)
 {
        struct tss_segment_16 tss_segment_16;
        int ret = 0;
 
-       if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
+       if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
+                          sizeof tss_segment_16))
                goto out;
 
        save_state_to_tss16(vcpu, &tss_segment_16);
-       save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
 
-       if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
+       if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
+                           sizeof tss_segment_16))
                goto out;
+
+       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+                          &tss_segment_16, sizeof tss_segment_16))
+               goto out;
+
        if (load_state_from_tss16(vcpu, &tss_segment_16))
                goto out;
 
@@ -3510,20 +3470,26 @@ out:
 }
 
 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
-                      struct desc_struct *cseg_desc,
+                      u32 old_tss_base,
                       struct desc_struct *nseg_desc)
 {
        struct tss_segment_32 tss_segment_32;
        int ret = 0;
 
-       if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
+       if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
+                          sizeof tss_segment_32))
                goto out;
 
        save_state_to_tss32(vcpu, &tss_segment_32);
-       save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
 
-       if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
+       if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
+                           sizeof tss_segment_32))
+               goto out;
+
+       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+                          &tss_segment_32, sizeof tss_segment_32))
                goto out;
+
        if (load_state_from_tss32(vcpu, &tss_segment_32))
                goto out;
 
@@ -3538,16 +3504,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        struct desc_struct cseg_desc;
        struct desc_struct nseg_desc;
        int ret = 0;
+       u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
+       u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
 
-       kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+       old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
 
+       /* FIXME: Handle errors. Failure to read either TSS or their
+        * descriptors should generate a pagefault.
+        */
        if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
                goto out;
 
-       if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
+       if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
                goto out;
 
-
        if (reason != TASK_SWITCH_IRET) {
                int cpl;
 
@@ -3565,8 +3535,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
 
        if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
                cseg_desc.type &= ~(1 << 1); //clear the B flag
-               save_guest_segment_descriptor(vcpu, tr_seg.selector,
-                                             &cseg_desc);
+               save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
        }
 
        if (reason == TASK_SWITCH_IRET) {
@@ -3575,13 +3544,12 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        }
 
        kvm_x86_ops->skip_emulated_instruction(vcpu);
-       kvm_x86_ops->cache_regs(vcpu);
 
        if (nseg_desc.type & 8)
-               ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
+               ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
                                         &nseg_desc);
        else
-               ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
+               ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
                                         &nseg_desc);
 
        if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
@@ -3600,7 +3568,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        tr_seg.type = 11;
        kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
 out:
-       kvm_x86_ops->decache_regs(vcpu);
        return ret;
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);
@@ -3788,14 +3755,14 @@ void fx_init(struct kvm_vcpu *vcpu)
         * allocate ram with GFP_KERNEL.
         */
        if (!used_math())
-               fx_save(&vcpu->arch.host_fx_image);
+               kvm_fx_save(&vcpu->arch.host_fx_image);
 
        /* Initialize guest FPU by resetting ours and saving into guest's */
        preempt_disable();
-       fx_save(&vcpu->arch.host_fx_image);
-       fx_finit();
-       fx_save(&vcpu->arch.guest_fx_image);
-       fx_restore(&vcpu->arch.host_fx_image);
+       kvm_fx_save(&vcpu->arch.host_fx_image);
+       kvm_fx_finit();
+       kvm_fx_save(&vcpu->arch.guest_fx_image);
+       kvm_fx_restore(&vcpu->arch.host_fx_image);
        preempt_enable();
 
        vcpu->arch.cr0 |= X86_CR0_ET;
@@ -3812,8 +3779,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
                return;
 
        vcpu->guest_fpu_loaded = 1;
-       fx_save(&vcpu->arch.host_fx_image);
-       fx_restore(&vcpu->arch.guest_fx_image);
+       kvm_fx_save(&vcpu->arch.host_fx_image);
+       kvm_fx_restore(&vcpu->arch.guest_fx_image);
 }
 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
 
@@ -3823,8 +3790,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
                return;
 
        vcpu->guest_fpu_loaded = 0;
-       fx_save(&vcpu->arch.guest_fx_image);
-       fx_restore(&vcpu->arch.host_fx_image);
+       kvm_fx_save(&vcpu->arch.guest_fx_image);
+       kvm_fx_restore(&vcpu->arch.host_fx_image);
        ++vcpu->stat.fpu_reload;
 }
 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
@@ -4016,16 +3983,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
         */
        if (!user_alloc) {
                if (npages && !old.rmap) {
+                       unsigned long userspace_addr;
+
                        down_write(&current->mm->mmap_sem);
-                       memslot->userspace_addr = do_mmap(NULL, 0,
-                                                    npages * PAGE_SIZE,
-                                                    PROT_READ | PROT_WRITE,
-                                                    MAP_SHARED | MAP_ANONYMOUS,
-                                                    0);
+                       userspace_addr = do_mmap(NULL, 0,
+                                                npages * PAGE_SIZE,
+                                                PROT_READ | PROT_WRITE,
+                                                MAP_SHARED | MAP_ANONYMOUS,
+                                                0);
                        up_write(&current->mm->mmap_sem);
 
-                       if (IS_ERR((void *)memslot->userspace_addr))
-                               return PTR_ERR((void *)memslot->userspace_addr);
+                       if (IS_ERR((void *)userspace_addr))
+                               return PTR_ERR((void *)userspace_addr);
+
+                       /* set userspace_addr atomically for kvm_hva_to_rmapp */
+                       spin_lock(&kvm->mmu_lock);
+                       memslot->userspace_addr = userspace_addr;
+                       spin_unlock(&kvm->mmu_lock);
                } else {
                        if (!old.user_alloc && old.rmap) {
                                int ret;
@@ -4053,6 +4027,11 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
        return 0;
 }
 
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+       kvm_mmu_zap_all(kvm);
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE