include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / powerpc / kvm / book3s.c
index bb8873d..25da07f 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/kvm_ppc.h>
 #include <asm/kvm_book3s.h>
 #include <asm/mmu_context.h>
+#include <linux/gfp.h>
 #include <linux/sched.h>
 #include <linux/vmalloc.h>
 
@@ -33,6 +34,9 @@
 
 /* #define EXIT_DEBUG */
 /* #define EXIT_DEBUG_SIMPLE */
+/* #define DEBUG_EXT */
+
+static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
 
 struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "exits",       VCPU_STAT(sum_exits) },
@@ -66,13 +70,21 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
+       memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
+              sizeof(get_paca()->shadow_vcpu));
        get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
 }
 
 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
 {
        memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
+       memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
+              sizeof(get_paca()->shadow_vcpu));
        to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
+
+       kvmppc_giveup_ext(vcpu, MSR_FP);
+       kvmppc_giveup_ext(vcpu, MSR_VEC);
+       kvmppc_giveup_ext(vcpu, MSR_VSX);
 }
 
 #if defined(EXIT_DEBUG)
@@ -83,6 +95,23 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
 }
 #endif
 
+static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.shadow_msr = vcpu->arch.msr;
+       /* Guest MSR values */
+       vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE |
+                                MSR_BE | MSR_DE;
+       /* Process MSR values */
+       vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR |
+                                MSR_EE;
+       /* External providers the guest reserved */
+       vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext);
+       /* 64-bit Process MSR values */
+#ifdef CONFIG_PPC_BOOK3S_64
+       vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV;
+#endif
+}
+
 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
 {
        ulong old_msr = vcpu->arch.msr;
@@ -90,12 +119,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
 #ifdef EXIT_DEBUG
        printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
 #endif
+
        msr &= to_book3s(vcpu)->msr_mask;
        vcpu->arch.msr = msr;
-       vcpu->arch.shadow_msr = msr | MSR_USER32;
-       vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 |
-                                  MSR_USER64 | MSR_SE | MSR_BE | MSR_DE |
-                                  MSR_FE1);
+       kvmppc_recalc_shadow_msr(vcpu);
 
        if (msr & (MSR_WE|MSR_POW)) {
                if (!vcpu->arch.pending_exceptions) {
@@ -164,8 +191,9 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
 }
 
 
-void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
+void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
 {
+       to_book3s(vcpu)->prog_flags = flags;
        kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
 }
 
@@ -194,6 +222,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
 {
        int deliver = 1;
        int vec = 0;
+       ulong flags = 0ULL;
 
        switch (priority) {
        case BOOK3S_IRQPRIO_DECREMENTER:
@@ -227,6 +256,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
                break;
        case BOOK3S_IRQPRIO_PROGRAM:
                vec = BOOK3S_INTERRUPT_PROGRAM;
+               flags = to_book3s(vcpu)->prog_flags;
                break;
        case BOOK3S_IRQPRIO_VSX:
                vec = BOOK3S_INTERRUPT_VSX;
@@ -257,7 +287,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
 #endif
 
        if (deliver)
-               kvmppc_inject_interrupt(vcpu, vec, 0ULL);
+               kvmppc_inject_interrupt(vcpu, vec, flags);
 
        return deliver;
 }
@@ -510,14 +540,14 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                /* Page not found in guest PTE entries */
                vcpu->arch.dear = vcpu->arch.fault_dear;
                to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
-               vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
+               vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
                kvmppc_book3s_queue_irqprio(vcpu, vec);
        } else if (page_found == -EPERM) {
                /* Storage protection */
                vcpu->arch.dear = vcpu->arch.fault_dear;
                to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
                to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
-               vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
+               vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
                kvmppc_book3s_queue_irqprio(vcpu, vec);
        } else if (page_found == -EINVAL) {
                /* Page not found in guest SLB */
@@ -539,13 +569,122 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                r = kvmppc_emulate_mmio(run, vcpu);
                if ( r == RESUME_HOST_NV )
                        r = RESUME_HOST;
-               if ( r == RESUME_GUEST_NV )
-                       r = RESUME_GUEST;
        }
 
        return r;
 }
 
+static inline int get_fpr_index(int i)
+{
+#ifdef CONFIG_VSX
+       i *= 2;
+#endif
+       return i;
+}
+
+/* Give up external provider (FPU, Altivec, VSX) */
+static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
+{
+       struct thread_struct *t = &current->thread;
+       u64 *vcpu_fpr = vcpu->arch.fpr;
+       u64 *vcpu_vsx = vcpu->arch.vsr;
+       u64 *thread_fpr = (u64*)t->fpr;
+       int i;
+
+       if (!(vcpu->arch.guest_owned_ext & msr))
+               return;
+
+#ifdef DEBUG_EXT
+       printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
+#endif
+
+       switch (msr) {
+       case MSR_FP:
+               giveup_fpu(current);
+               for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
+                       vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
+
+               vcpu->arch.fpscr = t->fpscr.val;
+               break;
+       case MSR_VEC:
+#ifdef CONFIG_ALTIVEC
+               giveup_altivec(current);
+               memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
+               vcpu->arch.vscr = t->vscr;
+#endif
+               break;
+       case MSR_VSX:
+#ifdef CONFIG_VSX
+               __giveup_vsx(current);
+               for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
+                       vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
+#endif
+               break;
+       default:
+               BUG();
+       }
+
+       vcpu->arch.guest_owned_ext &= ~msr;
+       current->thread.regs->msr &= ~msr;
+       kvmppc_recalc_shadow_msr(vcpu);
+}
+
+/* Handle external providers (FPU, Altivec, VSX) */
+static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
+                            ulong msr)
+{
+       struct thread_struct *t = &current->thread;
+       u64 *vcpu_fpr = vcpu->arch.fpr;
+       u64 *vcpu_vsx = vcpu->arch.vsr;
+       u64 *thread_fpr = (u64*)t->fpr;
+       int i;
+
+       if (!(vcpu->arch.msr & msr)) {
+               kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+               return RESUME_GUEST;
+       }
+
+#ifdef DEBUG_EXT
+       printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
+#endif
+
+       current->thread.regs->msr |= msr;
+
+       switch (msr) {
+       case MSR_FP:
+               for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
+                       thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
+
+               t->fpscr.val = vcpu->arch.fpscr;
+               t->fpexc_mode = 0;
+               kvmppc_load_up_fpu();
+               break;
+       case MSR_VEC:
+#ifdef CONFIG_ALTIVEC
+               memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+               t->vscr = vcpu->arch.vscr;
+               t->vrsave = -1;
+               kvmppc_load_up_altivec();
+#endif
+               break;
+       case MSR_VSX:
+#ifdef CONFIG_VSX
+               for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
+                       thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
+               kvmppc_load_up_vsx();
+#endif
+               break;
+       default:
+               BUG();
+       }
+
+       vcpu->arch.guest_owned_ext |= msr;
+
+       kvmppc_recalc_shadow_msr(vcpu);
+
+       return RESUME_GUEST;
+}
+
 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        unsigned int exit_nr)
 {
@@ -570,7 +709,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
        case BOOK3S_INTERRUPT_INST_STORAGE:
                vcpu->stat.pf_instruc++;
                /* only care about PTEG not found errors, but leave NX alone */
-               if (vcpu->arch.shadow_msr & 0x40000000) {
+               if (vcpu->arch.shadow_srr1 & 0x40000000) {
                        r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
                        vcpu->stat.sp_instruc++;
                } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -582,7 +721,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                         */
                        kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
                } else {
-                       vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000);
+                       vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000;
                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                        kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
                        r = RESUME_GUEST;
@@ -628,6 +767,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
        case BOOK3S_INTERRUPT_PROGRAM:
        {
                enum emulation_result er;
+               ulong flags;
+
+               flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
 
                if (vcpu->arch.msr & MSR_PR) {
 #ifdef EXIT_DEBUG
@@ -635,7 +777,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 #endif
                        if ((vcpu->arch.last_inst & 0xff0007ff) !=
                            (INS_DCBZ & 0xfffffff7)) {
-                               kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+                               kvmppc_core_queue_program(vcpu, flags);
                                r = RESUME_GUEST;
                                break;
                        }
@@ -645,12 +787,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                er = kvmppc_emulate_instruction(run, vcpu);
                switch (er) {
                case EMULATE_DONE:
-                       r = RESUME_GUEST;
+                       r = RESUME_GUEST_NV;
                        break;
                case EMULATE_FAIL:
                        printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
                               __func__, vcpu->arch.pc, vcpu->arch.last_inst);
-                       kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+                       kvmppc_core_queue_program(vcpu, flags);
                        r = RESUME_GUEST;
                        break;
                default:
@@ -660,23 +802,30 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
        }
        case BOOK3S_INTERRUPT_SYSCALL:
 #ifdef EXIT_DEBUG
-               printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]);
+               printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
 #endif
                vcpu->stat.syscall_exits++;
                kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                r = RESUME_GUEST;
                break;
-       case BOOK3S_INTERRUPT_MACHINE_CHECK:
        case BOOK3S_INTERRUPT_FP_UNAVAIL:
-       case BOOK3S_INTERRUPT_TRACE:
+               r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP);
+               break;
        case BOOK3S_INTERRUPT_ALTIVEC:
+               r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC);
+               break;
        case BOOK3S_INTERRUPT_VSX:
+               r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX);
+               break;
+       case BOOK3S_INTERRUPT_MACHINE_CHECK:
+       case BOOK3S_INTERRUPT_TRACE:
                kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                r = RESUME_GUEST;
                break;
        default:
                /* Ugh - bork here! What did we get? */
-               printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr);
+               printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
+                       exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1);
                r = RESUME_HOST;
                BUG();
                break;
@@ -719,10 +868,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        int i;
 
        regs->pc = vcpu->arch.pc;
-       regs->cr = vcpu->arch.cr;
+       regs->cr = kvmppc_get_cr(vcpu);
        regs->ctr = vcpu->arch.ctr;
        regs->lr = vcpu->arch.lr;
-       regs->xer = vcpu->arch.xer;
+       regs->xer = kvmppc_get_xer(vcpu);
        regs->msr = vcpu->arch.msr;
        regs->srr0 = vcpu->arch.srr0;
        regs->srr1 = vcpu->arch.srr1;
@@ -736,7 +885,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        regs->sprg7 = vcpu->arch.sprg6;
 
        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
-               regs->gpr[i] = vcpu->arch.gpr[i];
+               regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
 
        return 0;
 }
@@ -746,10 +895,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        int i;
 
        vcpu->arch.pc = regs->pc;
-       vcpu->arch.cr = regs->cr;
+       kvmppc_set_cr(vcpu, regs->cr);
        vcpu->arch.ctr = regs->ctr;
        vcpu->arch.lr = regs->lr;
-       vcpu->arch.xer = regs->xer;
+       kvmppc_set_xer(vcpu, regs->xer);
        kvmppc_set_msr(vcpu, regs->msr);
        vcpu->arch.srr0 = regs->srr0;
        vcpu->arch.srr1 = regs->srr1;
@@ -761,8 +910,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        vcpu->arch.sprg6 = regs->sprg5;
        vcpu->arch.sprg7 = regs->sprg6;
 
-       for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
-               vcpu->arch.gpr[i] = regs->gpr[i];
+       for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+               kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
 
        return 0;
 }
@@ -857,7 +1006,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        int is_dirty = 0;
        int r, n;
 
-       down_write(&kvm->slots_lock);
+       mutex_lock(&kvm->slots_lock);
 
        r = kvm_get_dirty_log(kvm, log, &is_dirty);
        if (r)
@@ -879,7 +1028,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 
        r = 0;
 out:
-       up_write(&kvm->slots_lock);
+       mutex_unlock(&kvm->slots_lock);
        return r;
 }
 
@@ -917,6 +1066,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
        vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
        vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
        vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+       vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
 
        vcpu->arch.shadow_msr = MSR_USER64;
 
@@ -950,6 +1100,10 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        int ret;
+       struct thread_struct ext_bkp;
+       bool save_vec = current->thread.used_vr;
+       bool save_vsx = current->thread.used_vsr;
+       ulong ext_msr;
 
        /* No need to go into the guest when all we do is going out */
        if (signal_pending(current)) {
@@ -957,6 +1111,35 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                return -EINTR;
        }
 
+       /* Save FPU state in stack */
+       if (current->thread.regs->msr & MSR_FP)
+               giveup_fpu(current);
+       memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr));
+       ext_bkp.fpscr = current->thread.fpscr;
+       ext_bkp.fpexc_mode = current->thread.fpexc_mode;
+
+#ifdef CONFIG_ALTIVEC
+       /* Save Altivec state in stack */
+       if (save_vec) {
+               if (current->thread.regs->msr & MSR_VEC)
+                       giveup_altivec(current);
+               memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr));
+               ext_bkp.vscr = current->thread.vscr;
+               ext_bkp.vrsave = current->thread.vrsave;
+       }
+       ext_bkp.used_vr = current->thread.used_vr;
+#endif
+
+#ifdef CONFIG_VSX
+       /* Save VSX state in stack */
+       if (save_vsx && (current->thread.regs->msr & MSR_VSX))
+                       __giveup_vsx(current);
+       ext_bkp.used_vsr = current->thread.used_vsr;
+#endif
+
+       /* Remember the MSR with disabled extensions */
+       ext_msr = current->thread.regs->msr;
+
        /* XXX we get called with irq disabled - change that! */
        local_irq_enable();
 
@@ -964,6 +1147,32 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
        local_irq_disable();
 
+       current->thread.regs->msr = ext_msr;
+
+       /* Make sure we save the guest FPU/Altivec/VSX state */
+       kvmppc_giveup_ext(vcpu, MSR_FP);
+       kvmppc_giveup_ext(vcpu, MSR_VEC);
+       kvmppc_giveup_ext(vcpu, MSR_VSX);
+
+       /* Restore FPU state from stack */
+       memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr));
+       current->thread.fpscr = ext_bkp.fpscr;
+       current->thread.fpexc_mode = ext_bkp.fpexc_mode;
+
+#ifdef CONFIG_ALTIVEC
+       /* Restore Altivec state from stack */
+       if (save_vec && current->thread.used_vr) {
+               memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr));
+               current->thread.vscr = ext_bkp.vscr;
+               current->thread.vrsave= ext_bkp.vrsave;
+       }
+       current->thread.used_vr = ext_bkp.used_vr;
+#endif
+
+#ifdef CONFIG_VSX
+       current->thread.used_vsr = ext_bkp.used_vsr;
+#endif
+
        return ret;
 }