ide_pci_generic: add quirk for Netcell ATA RAID
[safe/jmp/linux-2.6] / drivers / pci / dmar.c
index 75d34bf..fa3a113 100644 (file)
@@ -173,13 +173,23 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
        struct dmar_drhd_unit *dmaru;
        int ret = 0;
 
+       drhd = (struct acpi_dmar_hardware_unit *)header;
+       if (!drhd->address) {
+               /* Promote an attitude of violence to a BIOS engineer today */
+               WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
+                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                    dmi_get_system_info(DMI_BIOS_VENDOR),
+                    dmi_get_system_info(DMI_BIOS_VERSION),
+                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               return -ENODEV;
+       }
        dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
        if (!dmaru)
                return -ENOMEM;
 
        dmaru->hdr = header;
-       drhd = (struct acpi_dmar_hardware_unit *)header;
        dmaru->reg_base_addr = drhd->address;
+       dmaru->segment = drhd->segment;
        dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
 
        ret = alloc_iommu(dmaru);
@@ -511,6 +521,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
                return -ENOMEM;
 
        iommu->seq_id = iommu_allocated++;
+       sprintf (iommu->name, "dmar%d", iommu->seq_id);
 
        iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
        if (!iommu->reg) {
@@ -753,14 +764,77 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
 }
 
 /*
+ * Disable Queued Invalidation interface.
+ */
+void dmar_disable_qi(struct intel_iommu *iommu)
+{
+       unsigned long flags;
+       u32 sts;
+       cycles_t start_time = get_cycles();
+
+       if (!ecap_qis(iommu->ecap))
+               return;
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+
+       sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
+       if (!(sts & DMA_GSTS_QIES))
+               goto end;
+
+       /*
+        * Give a chance to HW to complete the pending invalidation requests.
+        */
+       while ((readl(iommu->reg + DMAR_IQT_REG) !=
+               readl(iommu->reg + DMAR_IQH_REG)) &&
+               (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
+               cpu_relax();
+
+       iommu->gcmd &= ~DMA_GCMD_QIE;
+
+       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
+                     !(sts & DMA_GSTS_QIES), sts);
+end:
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+/*
+ * Enable queued invalidation.
+ */
+static void __dmar_enable_qi(struct intel_iommu *iommu)
+{
+       u32 cmd, sts;
+       unsigned long flags;
+       struct q_inval *qi = iommu->qi;
+
+       qi->free_head = qi->free_tail = 0;
+       qi->free_cnt = QI_LENGTH;
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+
+       /* write zero to the tail reg */
+       writel(0, iommu->reg + DMAR_IQT_REG);
+
+       dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
+
+       cmd = iommu->gcmd | DMA_GCMD_QIE;
+       iommu->gcmd |= DMA_GCMD_QIE;
+       writel(cmd, iommu->reg + DMAR_GCMD_REG);
+
+       /* Make sure hardware complete it */
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+/*
  * Enable Queued Invalidation interface. This is a must to support
  * interrupt-remapping. Also used by DMA-remapping, which replaces
  * register based IOTLB invalidation.
  */
 int dmar_enable_qi(struct intel_iommu *iommu)
 {
-       u32 cmd, sts;
-       unsigned long flags;
        struct q_inval *qi;
 
        if (!ecap_qis(iommu->ecap))
@@ -772,20 +846,20 @@ int dmar_enable_qi(struct intel_iommu *iommu)
        if (iommu->qi)
                return 0;
 
-       iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL);
+       iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
        if (!iommu->qi)
                return -ENOMEM;
 
        qi = iommu->qi;
 
-       qi->desc = (void *)(get_zeroed_page(GFP_KERNEL));
+       qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
        if (!qi->desc) {
                kfree(qi);
                iommu->qi = 0;
                return -ENOMEM;
        }
 
-       qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL);
+       qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
        if (!qi->desc_status) {
                free_page((unsigned long) qi->desc);
                kfree(qi);
@@ -798,26 +872,20 @@ int dmar_enable_qi(struct intel_iommu *iommu)
 
        spin_lock_init(&qi->q_lock);
 
-       spin_lock_irqsave(&iommu->register_lock, flags);
-       /* write zero to the tail reg */
-       writel(0, iommu->reg + DMAR_IQT_REG);
-
-       dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
-
-       cmd = iommu->gcmd | DMA_GCMD_QIE;
-       iommu->gcmd |= DMA_GCMD_QIE;
-       writel(cmd, iommu->reg + DMAR_GCMD_REG);
-
-       /* Make sure hardware complete it */
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
-       spin_unlock_irqrestore(&iommu->register_lock, flags);
+       __dmar_enable_qi(iommu);
 
        return 0;
 }
 
 /* iommu interrupt handling. Most stuff are MSI-like. */
 
-static const char *fault_reason_strings[] =
+enum faulttype {
+       DMA_REMAP,
+       INTR_REMAP,
+       UNKNOWN,
+};
+
+static const char *dma_remap_fault_reasons[] =
 {
        "Software",
        "Present bit in root entry is clear",
@@ -833,14 +901,33 @@ static const char *fault_reason_strings[] =
        "non-zero reserved fields in CTP",
        "non-zero reserved fields in PTE",
 };
+
+static const char *intr_remap_fault_reasons[] =
+{
+       "Detected reserved fields in the decoded interrupt-remapped request",
+       "Interrupt index exceeded the interrupt-remapping table size",
+       "Present field in the IRTE entry is clear",
+       "Error accessing interrupt-remapping table pointed by IRTA_REG",
+       "Detected reserved fields in the IRTE entry",
+       "Blocked a compatibility format interrupt request",
+       "Blocked an interrupt request due to source-id verification failure",
+};
+
 #define MAX_FAULT_REASON_IDX   (ARRAY_SIZE(fault_reason_strings) - 1)
 
-const char *dmar_get_fault_reason(u8 fault_reason)
+const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
 {
-       if (fault_reason > MAX_FAULT_REASON_IDX)
+       if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
+                                    ARRAY_SIZE(intr_remap_fault_reasons))) {
+               *fault_type = INTR_REMAP;
+               return intr_remap_fault_reasons[fault_reason - 0x20];
+       } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
+               *fault_type = DMA_REMAP;
+               return dma_remap_fault_reasons[fault_reason];
+       } else {
+               *fault_type = UNKNOWN;
                return "Unknown";
-       else
-               return fault_reason_strings[fault_reason];
+       }
 }
 
 void dmar_msi_unmask(unsigned int irq)
@@ -897,21 +984,30 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
                u8 fault_reason, u16 source_id, unsigned long long addr)
 {
        const char *reason;
+       int fault_type;
 
-       reason = dmar_get_fault_reason(fault_reason);
+       reason = dmar_get_fault_reason(fault_reason, &fault_type);
 
-       printk(KERN_ERR
-               "DMAR:[%s] Request device [%02x:%02x.%d] "
-               "fault addr %llx \n"
-               "DMAR:[fault reason %02d] %s\n",
-               (type ? "DMA Read" : "DMA Write"),
-               (source_id >> 8), PCI_SLOT(source_id & 0xFF),
-               PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
+       if (fault_type == INTR_REMAP)
+               printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
+                      "fault index %llx\n"
+                       "INTR-REMAP:[fault reason %02d] %s\n",
+                       (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+                       PCI_FUNC(source_id & 0xFF), addr >> 48,
+                       fault_reason, reason);
+       else
+               printk(KERN_ERR
+                      "DMAR:[%s] Request device [%02x:%02x.%d] "
+                      "fault addr %llx \n"
+                      "DMAR:[fault reason %02d] %s\n",
+                      (type ? "DMA Read" : "DMA Write"),
+                      (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+                      PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
        return 0;
 }
 
 #define PRIMARY_FAULT_REG_LEN (16)
-static irqreturn_t dmar_fault(int irq, void *dev_id)
+irqreturn_t dmar_fault(int irq, void *dev_id)
 {
        struct intel_iommu *iommu = dev_id;
        int reg, fault_index;
@@ -920,10 +1016,13 @@ static irqreturn_t dmar_fault(int irq, void *dev_id)
 
        spin_lock_irqsave(&iommu->register_lock, flag);
        fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+       if (fault_status)
+               printk(KERN_ERR "DRHD: handling fault status reg %x\n",
+                      fault_status);
 
        /* TBD: ignore advanced fault log currently */
        if (!(fault_status & DMA_FSTS_PPF))
-               goto clear_overflow;
+               goto clear_rest;
 
        fault_index = dma_fsts_fault_record_index(fault_status);
        reg = cap_fault_reg_offset(iommu->cap);
@@ -964,11 +1063,10 @@ static irqreturn_t dmar_fault(int irq, void *dev_id)
                        fault_index = 0;
                spin_lock_irqsave(&iommu->register_lock, flag);
        }
-clear_overflow:
-       /* clear primary fault overflow */
+clear_rest:
+       /* clear all the other faults */
        fault_status = readl(iommu->reg + DMAR_FSTS_REG);
-       if (fault_status & DMA_FSTS_PFO)
-               writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
+       writel(fault_status, iommu->reg + DMAR_FSTS_REG);
 
        spin_unlock_irqrestore(&iommu->register_lock, flag);
        return IRQ_HANDLED;
@@ -978,6 +1076,12 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
 {
        int irq, ret;
 
+       /*
+        * Check if the fault interrupt is already initialized.
+        */
+       if (iommu->irq)
+               return 0;
+
        irq = create_irq();
        if (!irq) {
                printk(KERN_ERR "IOMMU: no free vectors\n");
@@ -995,11 +1099,56 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
                return 0;
        }
 
-       /* Force fault register is cleared */
-       dmar_fault(irq, iommu);
-
        ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
        if (ret)
                printk(KERN_ERR "IOMMU: can't request irq\n");
        return ret;
 }
+
+int __init enable_drhd_fault_handling(void)
+{
+       struct dmar_drhd_unit *drhd;
+
+       /*
+        * Enable fault control interrupt.
+        */
+       for_each_drhd_unit(drhd) {
+               int ret;
+               struct intel_iommu *iommu = drhd->iommu;
+               ret = dmar_set_interrupt(iommu);
+
+               if (ret) {
+                       printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
+                              " interrupt, ret %d\n",
+                              (unsigned long long)drhd->reg_base_addr, ret);
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Re-enable Queued Invalidation interface.
+ */
+int dmar_reenable_qi(struct intel_iommu *iommu)
+{
+       if (!ecap_qis(iommu->ecap))
+               return -ENOENT;
+
+       if (!iommu->qi)
+               return -ENOENT;
+
+       /*
+        * First disable queued invalidation.
+        */
+       dmar_disable_qi(iommu);
+       /*
+        * Then enable queued invalidation again. Since there is no pending
+        * invalidation requests now, it's safe to re-enable queued
+        * invalidation.
+        */
+       __dmar_enable_qi(iommu);
+
+       return 0;
+}