x86/amd-iommu: Workaround for erratum 63
[safe/jmp/linux-2.6] / arch / x86 / kernel / amd_iommu.c
index 543822b..f95dfe5 100644 (file)
@@ -62,6 +62,10 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
                                      unsigned long start_page,
                                      unsigned int pages);
 
+#ifndef BUS_NOTIFY_UNBOUND_DRIVER
+#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
+#endif
+
 #ifdef CONFIG_AMD_IOMMU_STATS
 
 /*
@@ -218,7 +222,7 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
 {
        struct amd_iommu *iommu;
 
-       list_for_each_entry(iommu, &amd_iommu_list, list)
+       for_each_iommu(iommu)
                iommu_poll_events(iommu);
 
        return IRQ_HANDLED;
@@ -430,6 +434,16 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
        iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
 }
 
+/* Flush the whole IO/TLB for a given protection domain - including PDE */
+static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
+{
+       u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+
+       INC_STATS_COUNTER(domain_flush_single);
+
+       iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
+}
+
 /*
  * This function is used to flush the IO/TLB for a given protection domain
  * on every IOMMU in the system
@@ -445,7 +459,7 @@ static void iommu_flush_domain(u16 domid)
        __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
                                      domid, 1, 1);
 
-       list_for_each_entry(iommu, &amd_iommu_list, list) {
+       for_each_iommu(iommu) {
                spin_lock_irqsave(&iommu->lock, flags);
                __iommu_queue_command(iommu, &cmd);
                __iommu_completion_wait(iommu);
@@ -454,6 +468,35 @@ static void iommu_flush_domain(u16 domid)
        }
 }
 
+void amd_iommu_flush_all_domains(void)
+{
+       int i;
+
+       for (i = 1; i < MAX_DOMAIN_ID; ++i) {
+               if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
+                       continue;
+               iommu_flush_domain(i);
+       }
+}
+
+void amd_iommu_flush_all_devices(void)
+{
+       struct amd_iommu *iommu;
+       int i;
+
+       for (i = 0; i <= amd_iommu_last_bdf; ++i) {
+               if (amd_iommu_pd_table[i] == NULL)
+                       continue;
+
+               iommu = amd_iommu_rlookup_table[i];
+               if (!iommu)
+                       continue;
+
+               iommu_queue_inv_dev_entry(iommu, i);
+               iommu_completion_wait(iommu);
+       }
+}
+
 /****************************************************************************
  *
  * The functions below are used the create the page table mappings for
@@ -661,6 +704,10 @@ static int alloc_new_range(struct amd_iommu *iommu,
        int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
        int i;
 
+#ifdef CONFIG_IOMMU_STRESS
+       populate = false;
+#endif
+
        if (index >= APERTURE_MAX_RANGES)
                return -ENOMEM;
 
@@ -820,6 +867,11 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
 
        BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
 
+#ifdef CONFIG_IOMMU_STRESS
+       if (i < 4)
+               return;
+#endif
+
        if (address >= dom->next_address)
                dom->need_flush = true;
 
@@ -1036,7 +1088,13 @@ static void attach_device(struct amd_iommu *iommu,
        amd_iommu_pd_table[devid] = domain;
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 
+       /*
+        * We might boot into a crash-kernel here. The crashed kernel
+        * left the caches in the IOMMU dirty. So we have to flush
+        * here to evict all dirty stuff.
+        */
        iommu_queue_inv_dev_entry(iommu, devid);
+       iommu_flush_tlb_pde(iommu, domain->id);
 }
 
 /*
@@ -1056,6 +1114,8 @@ static void __detach_device(struct protection_domain *domain, u16 devid)
        amd_iommu_dev_table[devid].data[1] = 0;
        amd_iommu_dev_table[devid].data[2] = 0;
 
+       amd_iommu_apply_erratum_63(devid);
+
        /* decrease reference counter */
        domain->dev_cnt -= 1;
 
@@ -1103,17 +1163,7 @@ static int device_change_notifier(struct notifier_block *nb,
                          "to a non-dma-ops domain\n", dev_name(dev));
 
        switch (action) {
-       case BUS_NOTIFY_BOUND_DRIVER:
-               if (domain)
-                       goto out;
-               dma_domain = find_protection_domain(devid);
-               if (!dma_domain)
-                       dma_domain = iommu->default_dom;
-               attach_device(iommu, &dma_domain->domain, devid);
-               printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
-                      "device %s\n", dma_domain->domain.id, dev_name(dev));
-               break;
-       case BUS_NOTIFY_UNBIND_DRIVER:
+       case BUS_NOTIFY_UNBOUND_DRIVER:
                if (!domain)
                        goto out;
                detach_device(domain, devid);
@@ -1144,7 +1194,7 @@ out:
        return 0;
 }
 
-struct notifier_block device_nb = {
+static struct notifier_block device_nb = {
        .notifier_call = device_change_notifier,
 };
 
@@ -1234,8 +1284,8 @@ static int get_device_resources(struct device *dev,
                        dma_dom = (*iommu)->default_dom;
                *domain = &dma_dom->domain;
                attach_device(*iommu, *domain, *bdf);
-               printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
-                               "device %s\n", (*domain)->id, dev_name(dev));
+               DUMP_printk("Using protection domain %d for device %s\n",
+                           (*domain)->id, dev_name(dev));
        }
 
        if (domain_for_device(_bdf) == NULL)
@@ -1715,7 +1765,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
        flag |= __GFP_ZERO;
        virt_addr = (void *)__get_free_pages(flag, get_order(size));
        if (!virt_addr)
-               return 0;
+               return NULL;
 
        paddr = virt_to_phys(virt_addr);
 
@@ -1735,8 +1785,10 @@ static void *alloc_coherent(struct device *dev, size_t size,
        *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
                                 size, DMA_BIDIRECTIONAL, true, dma_mask);
 
-       if (*dma_addr == bad_dma_address)
+       if (*dma_addr == bad_dma_address) {
+               spin_unlock_irqrestore(&domain->lock, flags);
                goto out_free;
+       }
 
        iommu_completion_wait(iommu);
 
@@ -1868,7 +1920,7 @@ int __init amd_iommu_init_dma_ops(void)
         * found in the system. Devices not assigned to any other
         * protection domain will be assigned to the default one.
         */
-       list_for_each_entry(iommu, &amd_iommu_list, list) {
+       for_each_iommu(iommu) {
                iommu->default_dom = dma_ops_domain_alloc(iommu);
                if (iommu->default_dom == NULL)
                        return -ENOMEM;
@@ -1906,7 +1958,7 @@ int __init amd_iommu_init_dma_ops(void)
 
 free_domains:
 
-       list_for_each_entry(iommu, &amd_iommu_list, list) {
+       for_each_iommu(iommu) {
                if (iommu->default_dom)
                        dma_ops_domain_free(iommu->default_dom);
        }
@@ -2038,7 +2090,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
 
        old_domain = domain_for_device(devid);
        if (old_domain)
-               return -EBUSY;
+               detach_device(old_domain, devid);
 
        attach_device(iommu, domain, devid);