KVM: Avoid redelivery of edge interrupt before next edge
[safe/jmp/linux-2.6] / virt / kvm / ioapic.c
index 883fd0d..1150c6d 100644 (file)
@@ -95,8 +95,6 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
                if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
                        pent->fields.remote_irr = 1;
        }
-       if (!pent->fields.trig_mode)
-               ioapic->irr &= ~(1 << idx);
 
        return injected;
 }
@@ -136,125 +134,40 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                mask_after = ioapic->redirtbl[index].fields.mask;
                if (mask_before != mask_after)
                        kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
-               if (ioapic->irr & (1 << index))
+               if (ioapic->redirtbl[index].fields.trig_mode == IOAPIC_LEVEL_TRIG
+                   && ioapic->irr & (1 << index))
                        ioapic_service(ioapic, index);
                break;
        }
 }
 
-static int ioapic_inj_irq(struct kvm_ioapic *ioapic,
-                          struct kvm_vcpu *vcpu,
-                          u8 vector, u8 trig_mode, u8 delivery_mode)
-{
-       ioapic_debug("irq %d trig %d deliv %d\n", vector, trig_mode,
-                    delivery_mode);
-
-       ASSERT((delivery_mode == IOAPIC_FIXED) ||
-              (delivery_mode == IOAPIC_LOWEST_PRIORITY));
-
-       return kvm_apic_set_irq(vcpu, vector, trig_mode);
-}
-
-static void ioapic_inj_nmi(struct kvm_vcpu *vcpu)
-{
-       kvm_inject_nmi(vcpu);
-       kvm_vcpu_kick(vcpu);
-}
-
-u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
-                                   u8 dest_mode)
-{
-       u32 mask = 0;
-       int i;
-       struct kvm *kvm = ioapic->kvm;
-       struct kvm_vcpu *vcpu;
-
-       ioapic_debug("dest %d dest_mode %d\n", dest, dest_mode);
-
-       if (dest_mode == 0) {   /* Physical mode. */
-               if (dest == 0xFF) {     /* Broadcast. */
-                       for (i = 0; i < KVM_MAX_VCPUS; ++i)
-                               if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
-                                       mask |= 1 << i;
-                       return mask;
-               }
-               for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-                       vcpu = kvm->vcpus[i];
-                       if (!vcpu)
-                               continue;
-                       if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) {
-                               if (vcpu->arch.apic)
-                                       mask = 1 << i;
-                               break;
-                       }
-               }
-       } else if (dest != 0)   /* Logical mode, MDA non-zero. */
-               for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-                       vcpu = kvm->vcpus[i];
-                       if (!vcpu)
-                               continue;
-                       if (vcpu->arch.apic &&
-                           kvm_apic_match_logical_addr(vcpu->arch.apic, dest))
-                               mask |= 1 << vcpu->vcpu_id;
-               }
-       ioapic_debug("mask %x\n", mask);
-       return mask;
-}
-
 static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
 {
-       union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
-       unsigned long deliver_bitmask;
-       struct kvm_vcpu *vcpu;
-       int vcpu_id, r = -1;
+       union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
+       struct kvm_lapic_irq irqe;
 
        ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
                     "vector=%x trig_mode=%x\n",
-                    entry.fields.dest, entry.fields.dest_mode,
-                    entry.fields.delivery_mode, entry.fields.vector,
-                    entry.fields.trig_mode);
-
-       kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
-       if (!deliver_bitmask) {
-               ioapic_debug("no target on destination\n");
-               return 0;
-       }
+                    entry->fields.dest, entry->fields.dest_mode,
+                    entry->fields.delivery_mode, entry->fields.vector,
+                    entry->fields.trig_mode);
+
+       irqe.dest_id = entry->fields.dest_id;
+       irqe.vector = entry->fields.vector;
+       irqe.dest_mode = entry->fields.dest_mode;
+       irqe.trig_mode = entry->fields.trig_mode;
+       irqe.delivery_mode = entry->fields.delivery_mode << 8;
+       irqe.level = 1;
+       irqe.shorthand = 0;
 
-       /* Always delivery PIT interrupt to vcpu 0 */
 #ifdef CONFIG_X86
-       if (irq == 0)
-               deliver_bitmask = 1;
-#endif
-
-       for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-               if (!(deliver_bitmask & (1 << vcpu_id)))
-                       continue;
-               deliver_bitmask &= ~(1 << vcpu_id);
-               vcpu = ioapic->kvm->vcpus[vcpu_id];
-               if (vcpu) {
-                       if (entry.fields.delivery_mode ==
-                                       IOAPIC_LOWEST_PRIORITY ||
-                           entry.fields.delivery_mode == IOAPIC_FIXED) {
-                               if (r < 0)
-                                       r = 0;
-                               r += ioapic_inj_irq(ioapic, vcpu,
-                                                   entry.fields.vector,
-                                                   entry.fields.trig_mode,
-                                                   entry.fields.delivery_mode);
-                       } else if (entry.fields.delivery_mode == IOAPIC_NMI) {
-                               r = 1;
-                               ioapic_inj_nmi(vcpu);
-                       } else
-                               ioapic_debug("unsupported delivery mode %x!\n",
-                                            entry.fields.delivery_mode);
-               } else
-                       ioapic_debug("null destination vcpu: "
-                                    "mask=%x vector=%x delivery_mode=%x\n",
-                                    entry.fields.deliver_bitmask,
-                                    entry.fields.vector,
-                                    entry.fields.delivery_mode);
+       /* Always delivery PIT interrupt to vcpu 0 */
+       if (irq == 0) {
+               irqe.dest_mode = 0; /* Physical mode. */
+               irqe.dest_id = ioapic->kvm->vcpus[0]->vcpu_id;
        }
-       return r;
+#endif
+       return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
 }
 
 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
@@ -270,9 +183,10 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
                if (!level)
                        ioapic->irr &= ~mask;
                else {
+                       int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
                        ioapic->irr |= mask;
-                       if ((!entry.fields.trig_mode && old_irr != ioapic->irr)
-                           || !entry.fields.remote_irr)
+                       if ((edge && old_irr != ioapic->irr) ||
+                           (!edge && !entry.fields.remote_irr))
                                ret = ioapic_service(ioapic, irq);
                }
        }