sh: convert /proc/cpu/aligmnent, /proc/cpu/kernel_alignment to seq_file
[safe/jmp/linux-2.6] / drivers / pci / dmar.c
index 6d7f961..b952ebc 100644 (file)
 #include <linux/timer.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/tboot.h>
+#include <linux/dmi.h>
 
-#undef PREFIX
-#define PREFIX "DMAR:"
+#define PREFIX "DMAR: "
 
 /* No locks are needed as DMA remapping hardware unit
  * list is constructed at boot time and hotplug of
@@ -174,15 +175,6 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
        int ret = 0;
 
        drhd = (struct acpi_dmar_hardware_unit *)header;
-       if (!drhd->address) {
-               /* Promote an attitude of violence to a BIOS engineer today */
-               WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
-                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-                    dmi_get_system_info(DMI_BIOS_VENDOR),
-                    dmi_get_system_info(DMI_BIOS_VERSION),
-                    dmi_get_system_info(DMI_PRODUCT_VERSION));
-               return -ENODEV;
-       }
        dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
        if (!dmaru)
                return -ENOMEM;
@@ -353,6 +345,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
        struct acpi_dmar_hardware_unit *drhd;
        struct acpi_dmar_reserved_memory *rmrr;
        struct acpi_dmar_atsr *atsr;
+       struct acpi_dmar_rhsa *rhsa;
 
        switch (header->type) {
        case ACPI_DMAR_TYPE_HARDWARE_UNIT:
@@ -374,6 +367,12 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
                atsr = container_of(header, struct acpi_dmar_atsr, header);
                printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
                break;
+       case ACPI_DMAR_HARDWARE_AFFINITY:
+               rhsa = container_of(header, struct acpi_dmar_rhsa, header);
+               printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
+                      (unsigned long long)rhsa->base_address,
+                      rhsa->proximity_domain);
+               break;
        }
 }
 
@@ -413,6 +412,12 @@ parse_dmar_table(void)
         */
        dmar_table_detect();
 
+       /*
+        * ACPI tables may not be DMA protected by tboot, so use DMAR copy
+        * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
+        */
+       dmar_tbl = tboot_get_dmar_table(dmar_tbl);
+
        dmar = (struct acpi_table_dmar *)dmar_tbl;
        if (!dmar)
                return -ENODEV;
@@ -452,9 +457,13 @@ parse_dmar_table(void)
                        ret = dmar_parse_one_atsr(entry_header);
 #endif
                        break;
+               case ACPI_DMAR_HARDWARE_AFFINITY:
+                       /* We don't do anything with RHSA (yet?) */
+                       break;
                default:
                        printk(KERN_WARNING PREFIX
-                               "Unknown DMAR structure type\n");
+                               "Unknown DMAR structure type %d\n",
+                               entry_header->type);
                        ret = 0; /* for forward compatibility */
                        break;
                }
@@ -570,18 +579,56 @@ int __init dmar_table_init(void)
                printk(KERN_INFO PREFIX "No ATSR found\n");
 #endif
 
-#ifdef CONFIG_INTR_REMAP
-       parse_ioapics_under_ir();
-#endif
        return 0;
 }
 
+int __init check_zero_address(void)
+{
+       struct acpi_table_dmar *dmar;
+       struct acpi_dmar_header *entry_header;
+       struct acpi_dmar_hardware_unit *drhd;
+
+       dmar = (struct acpi_table_dmar *)dmar_tbl;
+       entry_header = (struct acpi_dmar_header *)(dmar + 1);
+
+       while (((unsigned long)entry_header) <
+                       (((unsigned long)dmar) + dmar_tbl->length)) {
+               /* Avoid looping forever on bad ACPI tables */
+               if (entry_header->length == 0) {
+                       printk(KERN_WARNING PREFIX
+                               "Invalid 0-length structure\n");
+                       return 0;
+               }
+
+               if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
+                       drhd = (void *)entry_header;
+                       if (!drhd->address) {
+                               /* Promote an attitude of violence to a BIOS engineer today */
+                               WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
+                                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                                    dmi_get_system_info(DMI_BIOS_VENDOR),
+                                    dmi_get_system_info(DMI_BIOS_VERSION),
+                                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+#ifdef CONFIG_DMAR
+                               dmar_disabled = 1;
+#endif
+                               return 0;
+                       }
+                       break;
+               }
+
+               entry_header = ((void *)entry_header + entry_header->length);
+       }
+       return 1;
+}
+
 void __init detect_intel_iommu(void)
 {
        int ret;
 
        ret = dmar_table_detect();
-
+       if (ret)
+               ret = check_zero_address();
        {
 #ifdef CONFIG_INTR_REMAP
                struct acpi_table_dmar *dmar;
@@ -632,20 +679,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
        iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
        iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
 
+       if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
+               /* Promote an attitude of violence to a BIOS engineer today */
+               WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
+                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                    drhd->reg_base_addr,
+                    dmi_get_system_info(DMI_BIOS_VENDOR),
+                    dmi_get_system_info(DMI_BIOS_VERSION),
+                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               goto err_unmap;
+       }
+
 #ifdef CONFIG_DMAR
        agaw = iommu_calculate_agaw(iommu);
        if (agaw < 0) {
                printk(KERN_ERR
                       "Cannot get a valid agaw for iommu (seq_id = %d)\n",
                       iommu->seq_id);
-               goto error;
+               goto err_unmap;
        }
        msagaw = iommu_calculate_max_sagaw(iommu);
        if (msagaw < 0) {
                printk(KERN_ERR
                        "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
                        iommu->seq_id);
-               goto error;
+               goto err_unmap;
        }
 #endif
        iommu->agaw = agaw;
@@ -665,7 +723,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
        }
 
        ver = readl(iommu->reg + DMAR_VER_REG);
-       pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
+       pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
                (unsigned long long)drhd->reg_base_addr,
                DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
                (unsigned long long)iommu->cap,
@@ -675,7 +733,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
 
        drhd->iommu = iommu;
        return 0;
-error:
+
+ err_unmap:
+       iounmap(iommu->reg);
+ error:
        kfree(iommu);
        return -1;
 }
@@ -699,7 +760,8 @@ void free_iommu(struct intel_iommu *iommu)
  */
 static inline void reclaim_free_desc(struct q_inval *qi)
 {
-       while (qi->desc_status[qi->free_tail] == QI_DONE) {
+       while (qi->desc_status[qi->free_tail] == QI_DONE ||
+              qi->desc_status[qi->free_tail] == QI_ABORT) {
                qi->desc_status[qi->free_tail] = QI_FREE;
                qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
                qi->free_cnt++;
@@ -709,10 +771,13 @@ static inline void reclaim_free_desc(struct q_inval *qi)
 static int qi_check_fault(struct intel_iommu *iommu, int index)
 {
        u32 fault;
-       int head;
+       int head, tail;
        struct q_inval *qi = iommu->qi;
        int wait_index = (index + 1) % QI_LENGTH;
 
+       if (qi->desc_status[wait_index] == QI_ABORT)
+               return -EAGAIN;
+
        fault = readl(iommu->reg + DMAR_FSTS_REG);
 
        /*
@@ -722,7 +787,11 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
         */
        if (fault & DMA_FSTS_IQE) {
                head = readl(iommu->reg + DMAR_IQH_REG);
-               if ((head >> 4) == index) {
+               if ((head >> DMAR_IQ_SHIFT) == index) {
+                       printk(KERN_ERR "VT-d detected invalid descriptor: "
+                               "low=%llx, high=%llx\n",
+                               (unsigned long long)qi->desc[index].low,
+                               (unsigned long long)qi->desc[index].high);
                        memcpy(&qi->desc[index], &qi->desc[wait_index],
                                        sizeof(struct qi_desc));
                        __iommu_flush_cache(iommu, &qi->desc[index],
@@ -732,6 +801,32 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
                }
        }
 
+       /*
+        * If ITE happens, all pending wait_desc commands are aborted.
+        * No new descriptors are fetched until the ITE is cleared.
+        */
+       if (fault & DMA_FSTS_ITE) {
+               head = readl(iommu->reg + DMAR_IQH_REG);
+               head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+               head |= 1;
+               tail = readl(iommu->reg + DMAR_IQT_REG);
+               tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+
+               writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
+
+               do {
+                       if (qi->desc_status[head] == QI_IN_USE)
+                               qi->desc_status[head] = QI_ABORT;
+                       head = (head - 2 + QI_LENGTH) % QI_LENGTH;
+               } while (head != tail);
+
+               if (qi->desc_status[wait_index] == QI_ABORT)
+                       return -EAGAIN;
+       }
+
+       if (fault & DMA_FSTS_ICE)
+               writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
+
        return 0;
 }
 
@@ -741,7 +836,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
  */
 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
 {
-       int rc = 0;
+       int rc;
        struct q_inval *qi = iommu->qi;
        struct qi_desc *hw, wait_desc;
        int wait_index, index;
@@ -752,6 +847,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
 
        hw = qi->desc;
 
+restart:
+       rc = 0;
+
        spin_lock_irqsave(&qi->q_lock, flags);
        while (qi->free_cnt < 3) {
                spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -782,7 +880,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
         * update the HW tail register indicating the presence of
         * new descriptors.
         */
-       writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
+       writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
 
        while (qi->desc_status[wait_index] != QI_DONE) {
                /*
@@ -794,18 +892,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
                 */
                rc = qi_check_fault(iommu, index);
                if (rc)
-                       goto out;
+                       break;
 
                spin_unlock(&qi->q_lock);
                cpu_relax();
                spin_lock(&qi->q_lock);
        }
-out:
-       qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
+
+       qi->desc_status[index] = QI_DONE;
 
        reclaim_free_desc(qi);
        spin_unlock_irqrestore(&qi->q_lock, flags);
 
+       if (rc == -EAGAIN)
+               goto restart;
+
        return rc;
 }
 
@@ -857,6 +958,27 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
        qi_submit_sync(&desc, iommu);
 }
 
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+                       u64 addr, unsigned mask)
+{
+       struct qi_desc desc;
+
+       if (mask) {
+               BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
+               addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+               desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+       } else
+               desc.high = QI_DEV_IOTLB_ADDR(addr);
+
+       if (qdep >= QI_DEV_IOTLB_MAX_INVS)
+               qdep = 0;
+
+       desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
+                  QI_DIOTLB_TYPE;
+
+       qi_submit_sync(&desc, iommu);
+}
+
 /*
  * Disable Queued Invalidation interface.
  */
@@ -1151,7 +1273,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
                                source_id, guest_addr);
 
                fault_index++;
-               if (fault_index > cap_num_fault_regs(iommu->cap))
+               if (fault_index >= cap_num_fault_regs(iommu->cap))
                        fault_index = 0;
                spin_lock_irqsave(&iommu->register_lock, flag);
        }
@@ -1244,3 +1366,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
 
        return 0;
 }
+
+/*
+ * Check interrupt remapping support in DMAR table description.
+ */
+int dmar_ir_support(void)
+{
+       struct acpi_table_dmar *dmar;
+       dmar = (struct acpi_table_dmar *)dmar_tbl;
+       return dmar->flags & 0x1;
+}