ecryptfs: add bdi backing to mount session
[safe/jmp/linux-2.6] / arch / s390 / kvm / interrupt.c
index f62588c..35c21bf 100644 (file)
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
 
-#include <asm/lowcore.h>
-#include <asm/uaccess.h>
+#include <linux/interrupt.h>
 #include <linux/kvm_host.h>
+#include <linux/hrtimer.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <asm/asm-offsets.h>
+#include <asm/uaccess.h>
 #include "kvm-s390.h"
 #include "gaccess.h"
 
@@ -31,7 +35,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
 }
 
 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
-                                     struct interrupt_info *inti)
+                                     struct kvm_s390_interrupt_info *inti)
 {
        switch (inti->type) {
        case KVM_S390_INT_EMERGENCY:
@@ -91,7 +95,7 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
 }
 
 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
-                                     struct interrupt_info *inti)
+                                     struct kvm_s390_interrupt_info *inti)
 {
        switch (inti->type) {
        case KVM_S390_INT_EMERGENCY:
@@ -111,7 +115,7 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
 }
 
 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
-                                  struct interrupt_info *inti)
+                                  struct kvm_s390_interrupt_info *inti)
 {
        const unsigned short table[] = { 2, 4, 4, 6 };
        int rc, exception = 0;
@@ -159,7 +163,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
                break;
 
        case KVM_S390_INT_VIRTIO:
-               VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
+               VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
                           inti->ext.ext_params, inti->ext.ext_params2);
                vcpu->stat.deliver_virtio_interrupt++;
                rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
@@ -184,8 +188,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
                if (rc == -EFAULT)
                        exception = 1;
 
-               rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM,
-                       inti->ext.ext_params2);
+               rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
+                                  inti->ext.ext_params2);
                if (rc == -EFAULT)
                        exception = 1;
                break;
@@ -246,15 +250,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
        default:
                BUG();
        }
-
        if (exception) {
-               VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering"
-                          " interrupt");
-               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-               if (inti->type == KVM_S390_PROGRAM_INT) {
-                       printk(KERN_WARNING "kvm: recursive program check\n");
-                       BUG();
-               }
+               printk("kvm: The guest lowcore is not mapped during interrupt "
+                       "delivery, killing userspace\n");
+               do_exit(SIGKILL);
        }
 }
 
@@ -277,22 +276,19 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
                __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
                exception = 1;
-
        if (exception) {
-               VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \
-                          " ckc interrupt");
-               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-               return 0;
+               printk("kvm: The guest lowcore is not mapped during interrupt "
+                       "delivery, killing userspace\n");
+               do_exit(SIGKILL);
        }
-
        return 1;
 }
 
-int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
+static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
 {
-       struct local_interrupt *li = &vcpu->arch.local_int;
-       struct float_interrupt *fi = vcpu->arch.local_int.float_int;
-       struct interrupt_info  *inti;
+       struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+       struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
+       struct kvm_s390_interrupt_info  *inti;
        int rc = 0;
 
        if (atomic_read(&li->active)) {
@@ -306,13 +302,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
        }
 
        if ((!rc) && atomic_read(&fi->active)) {
-               spin_lock_bh(&fi->lock);
+               spin_lock(&fi->lock);
                list_for_each_entry(inti, &fi->list, list)
                        if (__interrupt_is_deliverable(vcpu, inti)) {
                                rc = 1;
                                break;
                        }
-               spin_unlock_bh(&fi->lock);
+               spin_unlock(&fi->lock);
        }
 
        if ((!rc) && (vcpu->arch.sie_block->ckc <
@@ -325,6 +321,11 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
        return rc;
 }
 
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
 {
        u64 now, sltime;
@@ -334,10 +335,15 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
        if (kvm_cpu_has_interrupt(vcpu))
                return 0;
 
+       __set_cpu_idle(vcpu);
+       spin_lock_bh(&vcpu->arch.local_int.lock);
+       vcpu->arch.local_int.timer_due = 0;
+       spin_unlock_bh(&vcpu->arch.local_int.lock);
+
        if (psw_interrupts_disabled(vcpu)) {
                VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
                __unset_cpu_idle(vcpu);
-               return -ENOTSUPP; /* disabled wait */
+               return -EOPNOTSUPP; /* disabled wait */
        }
 
        if (psw_extint_disabled(vcpu) ||
@@ -352,17 +358,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
                return 0;
        }
 
-       sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1;
+       sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
 
-       vcpu->arch.ckc_timer.expires = jiffies + sltime;
-
-       add_timer(&vcpu->arch.ckc_timer);
-       VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
+       hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
+       VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
 no_timer:
-       spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
+       spin_lock(&vcpu->arch.local_int.float_int->lock);
        spin_lock_bh(&vcpu->arch.local_int.lock);
-       __set_cpu_idle(vcpu);
-       vcpu->arch.local_int.timer_due = 0;
        add_wait_queue(&vcpu->arch.local_int.wq, &wait);
        while (list_empty(&vcpu->arch.local_int.list) &&
                list_empty(&vcpu->arch.local_int.float_int->list) &&
@@ -370,39 +372,52 @@ no_timer:
                !signal_pending(current)) {
                set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock_bh(&vcpu->arch.local_int.lock);
-               spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
+               spin_unlock(&vcpu->arch.local_int.float_int->lock);
                vcpu_put(vcpu);
                schedule();
                vcpu_load(vcpu);
-               spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
+               spin_lock(&vcpu->arch.local_int.float_int->lock);
                spin_lock_bh(&vcpu->arch.local_int.lock);
        }
        __unset_cpu_idle(vcpu);
        __set_current_state(TASK_RUNNING);
-       remove_wait_queue(&vcpu->wq, &wait);
+       remove_wait_queue(&vcpu->arch.local_int.wq, &wait);
        spin_unlock_bh(&vcpu->arch.local_int.lock);
-       spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
-       del_timer(&vcpu->arch.ckc_timer);
+       spin_unlock(&vcpu->arch.local_int.float_int->lock);
+       hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
        return 0;
 }
 
-void kvm_s390_idle_wakeup(unsigned long data)
+void kvm_s390_tasklet(unsigned long parm)
 {
-       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
 
-       spin_lock_bh(&vcpu->arch.local_int.lock);
+       spin_lock(&vcpu->arch.local_int.lock);
        vcpu->arch.local_int.timer_due = 1;
        if (waitqueue_active(&vcpu->arch.local_int.wq))
                wake_up_interruptible(&vcpu->arch.local_int.wq);
-       spin_unlock_bh(&vcpu->arch.local_int.lock);
+       spin_unlock(&vcpu->arch.local_int.lock);
 }
 
+/*
+ * low level hrtimer wake routine. Because this runs in hardirq context
+ * we schedule a tasklet to do the real work.
+ */
+enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
+       tasklet_schedule(&vcpu->arch.tasklet);
+
+       return HRTIMER_NORESTART;
+}
 
 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
 {
-       struct local_interrupt *li = &vcpu->arch.local_int;
-       struct float_interrupt *fi = vcpu->arch.local_int.float_int;
-       struct interrupt_info  *n, *inti = NULL;
+       struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+       struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
+       struct kvm_s390_interrupt_info  *n, *inti = NULL;
        int deliver;
 
        __reset_intercept_indicators(vcpu);
@@ -435,7 +450,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
        if (atomic_read(&fi->active)) {
                do {
                        deliver = 0;
-                       spin_lock_bh(&fi->lock);
+                       spin_lock(&fi->lock);
                        list_for_each_entry_safe(inti, n, &fi->list, list) {
                                if (__interrupt_is_deliverable(vcpu, inti)) {
                                        list_del(&inti->list);
@@ -446,7 +461,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
                        }
                        if (list_empty(&fi->list))
                                atomic_set(&fi->active, 0);
-                       spin_unlock_bh(&fi->lock);
+                       spin_unlock(&fi->lock);
                        if (deliver) {
                                __do_deliver_interrupt(vcpu, inti);
                                kfree(inti);
@@ -457,14 +472,14 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
 
 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
 {
-       struct local_interrupt *li = &vcpu->arch.local_int;
-       struct interrupt_info *inti;
+       struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+       struct kvm_s390_interrupt_info *inti;
 
        inti = kzalloc(sizeof(*inti), GFP_KERNEL);
        if (!inti)
                return -ENOMEM;
 
-       inti->type = KVM_S390_PROGRAM_INT;;
+       inti->type = KVM_S390_PROGRAM_INT;
        inti->pgm.code = code;
 
        VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
@@ -479,9 +494,9 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
 int kvm_s390_inject_vm(struct kvm *kvm,
                       struct kvm_s390_interrupt *s390int)
 {
-       struct local_interrupt *li;
-       struct float_interrupt *fi;
-       struct interrupt_info *inti;
+       struct kvm_s390_local_interrupt *li;
+       struct kvm_s390_float_interrupt *fi;
+       struct kvm_s390_interrupt_info *inti;
        int sigcpu;
 
        inti = kzalloc(sizeof(*inti), GFP_KERNEL);
@@ -490,7 +505,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
 
        switch (s390int->type) {
        case KVM_S390_INT_VIRTIO:
-               VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
+               VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
                         s390int->parm, s390int->parm64);
                inti->type = s390int->type;
                inti->ext.ext_params = s390int->parm;
@@ -511,7 +526,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
 
        mutex_lock(&kvm->lock);
        fi = &kvm->arch.float_int;
-       spin_lock_bh(&fi->lock);
+       spin_lock(&fi->lock);
        list_add_tail(&inti->list, &fi->list);
        atomic_set(&fi->active, 1);
        sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
@@ -528,7 +543,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
        if (waitqueue_active(&li->wq))
                wake_up_interruptible(&li->wq);
        spin_unlock_bh(&li->lock);
-       spin_unlock_bh(&fi->lock);
+       spin_unlock(&fi->lock);
        mutex_unlock(&kvm->lock);
        return 0;
 }
@@ -536,8 +551,8 @@ int kvm_s390_inject_vm(struct kvm *kvm,
 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
                         struct kvm_s390_interrupt *s390int)
 {
-       struct local_interrupt *li;
-       struct interrupt_info *inti;
+       struct kvm_s390_local_interrupt *li;
+       struct kvm_s390_interrupt_info *inti;
 
        inti = kzalloc(sizeof(*inti), GFP_KERNEL);
        if (!inti)
@@ -554,9 +569,14 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
                VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
                           s390int->parm);
                break;
+       case KVM_S390_SIGP_SET_PREFIX:
+               inti->prefix.address = s390int->parm;
+               inti->type = s390int->type;
+               VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
+                          s390int->parm);
+               break;
        case KVM_S390_SIGP_STOP:
        case KVM_S390_RESTART:
-       case KVM_S390_SIGP_SET_PREFIX:
        case KVM_S390_INT_EMERGENCY:
                VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
                inti->type = s390int->type;