ide_pci_generic: add quirk for Netcell ATA RAID
[safe/jmp/linux-2.6] / drivers / pci / dmar.c
index 932e5e3..fa3a113 100644 (file)
@@ -173,13 +173,23 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
        struct dmar_drhd_unit *dmaru;
        int ret = 0;
 
+       drhd = (struct acpi_dmar_hardware_unit *)header;
+       if (!drhd->address) {
+               /* Promote an attitude of violence to a BIOS engineer today */
+               WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
+                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                    dmi_get_system_info(DMI_BIOS_VENDOR),
+                    dmi_get_system_info(DMI_BIOS_VERSION),
+                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               return -ENODEV;
+       }
        dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
        if (!dmaru)
                return -ENOMEM;
 
        dmaru->hdr = header;
-       drhd = (struct acpi_dmar_hardware_unit *)header;
        dmaru->reg_base_addr = drhd->address;
+       dmaru->segment = drhd->segment;
        dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
 
        ret = alloc_iommu(dmaru);
@@ -790,14 +800,41 @@ end:
 }
 
 /*
+ * Enable queued invalidation.
+ */
+static void __dmar_enable_qi(struct intel_iommu *iommu)
+{
+       u32 cmd, sts;
+       unsigned long flags;
+       struct q_inval *qi = iommu->qi;
+
+       qi->free_head = qi->free_tail = 0;
+       qi->free_cnt = QI_LENGTH;
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+
+       /* write zero to the tail reg */
+       writel(0, iommu->reg + DMAR_IQT_REG);
+
+       dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
+
+       cmd = iommu->gcmd | DMA_GCMD_QIE;
+       iommu->gcmd |= DMA_GCMD_QIE;
+       writel(cmd, iommu->reg + DMAR_GCMD_REG);
+
+       /* Make sure hardware complete it */
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+/*
  * Enable Queued Invalidation interface. This is a must to support
  * interrupt-remapping. Also used by DMA-remapping, which replaces
  * register based IOTLB invalidation.
  */
 int dmar_enable_qi(struct intel_iommu *iommu)
 {
-       u32 cmd, sts;
-       unsigned long flags;
        struct q_inval *qi;
 
        if (!ecap_qis(iommu->ecap))
@@ -809,20 +846,20 @@ int dmar_enable_qi(struct intel_iommu *iommu)
        if (iommu->qi)
                return 0;
 
-       iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL);
+       iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
        if (!iommu->qi)
                return -ENOMEM;
 
        qi = iommu->qi;
 
-       qi->desc = (void *)(get_zeroed_page(GFP_KERNEL));
+       qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
        if (!qi->desc) {
                kfree(qi);
                iommu->qi = 0;
                return -ENOMEM;
        }
 
-       qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL);
+       qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
        if (!qi->desc_status) {
                free_page((unsigned long) qi->desc);
                kfree(qi);
@@ -835,19 +872,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
 
        spin_lock_init(&qi->q_lock);
 
-       spin_lock_irqsave(&iommu->register_lock, flags);
-       /* write zero to the tail reg */
-       writel(0, iommu->reg + DMAR_IQT_REG);
-
-       dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
-
-       cmd = iommu->gcmd | DMA_GCMD_QIE;
-       iommu->gcmd |= DMA_GCMD_QIE;
-       writel(cmd, iommu->reg + DMAR_GCMD_REG);
-
-       /* Make sure hardware complete it */
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
-       spin_unlock_irqrestore(&iommu->register_lock, flags);
+       __dmar_enable_qi(iommu);
 
        return 0;
 }
@@ -982,7 +1007,7 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
 }
 
 #define PRIMARY_FAULT_REG_LEN (16)
-static irqreturn_t dmar_fault(int irq, void *dev_id)
+irqreturn_t dmar_fault(int irq, void *dev_id)
 {
        struct intel_iommu *iommu = dev_id;
        int reg, fault_index;
@@ -1074,9 +1099,6 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
                return 0;
        }
 
-       /* Force fault register is cleared */
-       dmar_fault(irq, iommu);
-
        ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
        if (ret)
                printk(KERN_ERR "IOMMU: can't request irq\n");
@@ -1105,3 +1127,28 @@ int __init enable_drhd_fault_handling(void)
 
        return 0;
 }
+
+/*
+ * Re-enable Queued Invalidation interface.
+ */
+int dmar_reenable_qi(struct intel_iommu *iommu)
+{
+       if (!ecap_qis(iommu->ecap))
+               return -ENOENT;
+
+       if (!iommu->qi)
+               return -ENOENT;
+
+       /*
+        * First disable queued invalidation.
+        */
+       dmar_disable_qi(iommu);
+       /*
+        * Then enable queued invalidation again. Since there is no pending
+        * invalidation requests now, it's safe to re-enable queued
+        * invalidation.
+        */
+       __dmar_enable_qi(iommu);
+
+       return 0;
+}