acer-wmi: Unmark as 'experimental'
[safe/jmp/linux-2.6] / drivers / pci / intel-iommu.c
index 65aa1d4..f3f6865 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mempool.h>
 #include <linux/timer.h>
 #include <linux/iova.h>
+#include <linux/iommu.h>
 #include <linux/intel-iommu.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
 #define DMA_32BIT_PFN          IOVA_PFN(DMA_32BIT_MASK)
 #define DMA_64BIT_PFN          IOVA_PFN(DMA_64BIT_MASK)
 
+/* global iommu list, set NULL for ignored DMAR units */
+static struct intel_iommu **g_iommus;
+
+static int rwbf_quirk;
+
 /*
  * 0: Present
  * 1-11: Reserved
@@ -200,9 +206,17 @@ static inline bool dma_pte_present(struct dma_pte *pte)
        return (pte->val & 3) != 0;
 }
 
+/* devices under the same p2p bridge are owned in one domain */
+#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
+
+/* domain represents a virtual machine, more than one devices
+ * across iommus may be owned in one domain, e.g. kvm guest.
+ */
+#define DOMAIN_FLAG_VIRTUAL_MACHINE    (1 << 1)
+
 struct dmar_domain {
        int     id;                     /* domain id */
-       struct intel_iommu *iommu;      /* back pointer to owning iommu */
+       unsigned long iommu_bmp;        /* bitmap of iommus this domain uses*/
 
        struct list_head devices;       /* all devices' list */
        struct iova_domain iovad;       /* iova's that belong to this domain */
@@ -214,8 +228,12 @@ struct dmar_domain {
        /* adjusted guest address width, 0 is level 2 30-bit */
        int             agaw;
 
-#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
-       int             flags;
+       int             flags;          /* flags to find out type of domain */
+
+       int             iommu_coherency;/* indicate coherency of iommu access */
+       int             iommu_count;    /* reference count of iommu */
+       spinlock_t      iommu_lock;     /* protect iommu set in domain */
+       u64             max_addr;       /* maximum mapped address */
 };
 
 /* PCI domain-device relationship */
@@ -252,7 +270,12 @@ static long list_size;
 
 static void domain_remove_dev_info(struct dmar_domain *domain);
 
-int dmar_disabled;
+#ifdef CONFIG_DMAR_DEFAULT_ON
+int dmar_disabled = 0;
+#else
+int dmar_disabled = 1;
+#endif /*CONFIG_DMAR_DEFAULT_ON*/
+
 static int __initdata dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
@@ -261,14 +284,19 @@ static int intel_iommu_strict;
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
+static struct iommu_ops intel_iommu_ops;
+
 static int __init intel_iommu_setup(char *str)
 {
        if (!str)
                return -EINVAL;
        while (*str) {
-               if (!strncmp(str, "off", 3)) {
+               if (!strncmp(str, "on", 2)) {
+                       dmar_disabled = 0;
+                       printk(KERN_INFO "Intel-IOMMU: enabled\n");
+               } else if (!strncmp(str, "off", 3)) {
                        dmar_disabled = 1;
-                       printk(KERN_INFO"Intel-IOMMU: disabled\n");
+                       printk(KERN_INFO "Intel-IOMMU: disabled\n");
                } else if (!strncmp(str, "igfx_off", 8)) {
                        dmar_map_gfx = 0;
                        printk(KERN_INFO
@@ -357,6 +385,88 @@ void free_iova_mem(struct iova *iova)
        kmem_cache_free(iommu_iova_cache, iova);
 }
 
+
+static inline int width_to_agaw(int width);
+
+/* calculate agaw for each iommu.
+ * "SAGAW" may be different across iommus, use a default agaw, and
+ * get a supported less agaw for iommus that don't support the default agaw.
+ */
+int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+       unsigned long sagaw;
+       int agaw = -1;
+
+       sagaw = cap_sagaw(iommu->cap);
+       for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+            agaw >= 0; agaw--) {
+               if (test_bit(agaw, &sagaw))
+                       break;
+       }
+
+       return agaw;
+}
+
+/* in native case, each domain is related to only one iommu */
+static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
+{
+       int iommu_id;
+
+       BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
+
+       iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+       if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
+               return NULL;
+
+       return g_iommus[iommu_id];
+}
+
+/* "Coherency" capability may be different across iommus */
+static void domain_update_iommu_coherency(struct dmar_domain *domain)
+{
+       int i;
+
+       domain->iommu_coherency = 1;
+
+       i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+       for (; i < g_num_of_iommus; ) {
+               if (!ecap_coherent(g_iommus[i]->ecap)) {
+                       domain->iommu_coherency = 0;
+                       break;
+               }
+               i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
+       }
+}
+
+static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
+{
+       struct dmar_drhd_unit *drhd = NULL;
+       int i;
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+
+               for (i = 0; i < drhd->devices_cnt; i++)
+                       if (drhd->devices[i] &&
+                           drhd->devices[i]->bus->number == bus &&
+                           drhd->devices[i]->devfn == devfn)
+                               return drhd->iommu;
+
+               if (drhd->include_all)
+                       return drhd->iommu;
+       }
+
+       return NULL;
+}
+
+static void domain_flush_cache(struct dmar_domain *domain,
+                              void *addr, int size)
+{
+       if (!domain->iommu_coherency)
+               clflush_cache_range(addr, size);
+}
+
 /* Gets context entry for a given bus and devfn */
 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
                u8 bus, u8 devfn)
@@ -520,8 +630,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
                                        flags);
                                return NULL;
                        }
-                       __iommu_flush_cache(domain->iommu, tmp_page,
-                                       PAGE_SIZE);
+                       domain_flush_cache(domain, tmp_page, PAGE_SIZE);
                        dma_set_pte_addr(pte, virt_to_phys(tmp_page));
                        /*
                         * high level table always sets r/w, last level page
@@ -529,7 +638,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
                         */
                        dma_set_pte_readable(pte);
                        dma_set_pte_writable(pte);
-                       __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
+                       domain_flush_cache(domain, pte, sizeof(*pte));
                }
                parent = phys_to_virt(dma_pte_addr(pte));
                level--;
@@ -572,7 +681,7 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
 
        if (pte) {
                dma_clear_pte(pte);
-               __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
+               domain_flush_cache(domain, pte, sizeof(*pte));
        }
 }
 
@@ -620,8 +729,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
                                free_pgtable_page(
                                        phys_to_virt(dma_pte_addr(pte)));
                                dma_clear_pte(pte);
-                               __iommu_flush_cache(domain->iommu,
-                                               pte, sizeof(*pte));
+                               domain_flush_cache(domain, pte, sizeof(*pte));
                        }
                        tmp += level_size(level);
                }
@@ -679,7 +787,7 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
        u32 val;
        unsigned long flag;
 
-       if (!cap_rwbf(iommu->cap))
+       if (!rwbf_quirk && !cap_rwbf(iommu->cap))
                return;
        val = iommu->gcmd | DMA_GCMD_WBF;
 
@@ -1123,17 +1231,28 @@ static int iommu_init_domains(struct intel_iommu *iommu)
 
 
 static void domain_exit(struct dmar_domain *domain);
+static void vm_domain_exit(struct dmar_domain *domain);
 
 void free_dmar_iommu(struct intel_iommu *iommu)
 {
        struct dmar_domain *domain;
        int i;
+       unsigned long flags;
 
        i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
        for (; i < cap_ndoms(iommu->cap); ) {
                domain = iommu->domains[i];
                clear_bit(i, iommu->domain_ids);
-               domain_exit(domain);
+
+               spin_lock_irqsave(&domain->iommu_lock, flags);
+               if (--domain->iommu_count == 0) {
+                       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                               vm_domain_exit(domain);
+                       else
+                               domain_exit(domain);
+               }
+               spin_unlock_irqrestore(&domain->iommu_lock, flags);
+
                i = find_next_bit(iommu->domain_ids,
                        cap_ndoms(iommu->cap), i+1);
        }
@@ -1151,6 +1270,17 @@ void free_dmar_iommu(struct intel_iommu *iommu)
        kfree(iommu->domains);
        kfree(iommu->domain_ids);
 
+       g_iommus[iommu->seq_id] = NULL;
+
+       /* if all iommus are freed, free g_iommus */
+       for (i = 0; i < g_num_of_iommus; i++) {
+               if (g_iommus[i])
+                       break;
+       }
+
+       if (i == g_num_of_iommus)
+               kfree(g_iommus);
+
        /* free context mapping */
        free_context_table(iommu);
 }
@@ -1179,7 +1309,8 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
 
        set_bit(num, iommu->domain_ids);
        domain->id = num;
-       domain->iommu = iommu;
+       memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+       set_bit(iommu->seq_id, &domain->iommu_bmp);
        domain->flags = 0;
        iommu->domains[num] = domain;
        spin_unlock_irqrestore(&iommu->lock, flags);
@@ -1190,10 +1321,13 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
 static void iommu_free_domain(struct dmar_domain *domain)
 {
        unsigned long flags;
+       struct intel_iommu *iommu;
 
-       spin_lock_irqsave(&domain->iommu->lock, flags);
-       clear_bit(domain->id, domain->iommu->domain_ids);
-       spin_unlock_irqrestore(&domain->iommu->lock, flags);
+       iommu = domain_get_iommu(domain);
+
+       spin_lock_irqsave(&iommu->lock, flags);
+       clear_bit(domain->id, iommu->domain_ids);
+       spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
 static struct iova_domain reserved_iova_list;
@@ -1268,11 +1402,12 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
        init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
        spin_lock_init(&domain->mapping_lock);
+       spin_lock_init(&domain->iommu_lock);
 
        domain_reserve_special_ranges(domain);
 
        /* calculate AGAW */
-       iommu = domain->iommu;
+       iommu = domain_get_iommu(domain);
        if (guest_width > cap_mgaw(iommu->cap))
                guest_width = cap_mgaw(iommu->cap);
        domain->gaw = guest_width;
@@ -1289,6 +1424,13 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
        domain->agaw = agaw;
        INIT_LIST_HEAD(&domain->devices);
 
+       if (ecap_coherent(iommu->ecap))
+               domain->iommu_coherency = 1;
+       else
+               domain->iommu_coherency = 0;
+
+       domain->iommu_count = 1;
+
        /* always allocate the top pgd */
        domain->pgd = (struct dma_pte *)alloc_pgtable_page();
        if (!domain->pgd)
@@ -1325,12 +1467,22 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                u8 bus, u8 devfn)
 {
        struct context_entry *context;
-       struct intel_iommu *iommu = domain->iommu;
        unsigned long flags;
+       struct intel_iommu *iommu;
+       struct dma_pte *pgd;
+       unsigned long num;
+       unsigned long ndomains;
+       int id;
+       int agaw;
 
        pr_debug("Set context mapping for %02x:%02x.%d\n",
                bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
        BUG_ON(!domain->pgd);
+
+       iommu = device_to_iommu(bus, devfn);
+       if (!iommu)
+               return -ENODEV;
+
        context = device_to_context_entry(iommu, bus, devfn);
        if (!context)
                return -ENOMEM;
@@ -1340,13 +1492,57 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                return 0;
        }
 
-       context_set_domain_id(context, domain->id);
-       context_set_address_width(context, domain->agaw);
-       context_set_address_root(context, virt_to_phys(domain->pgd));
+       id = domain->id;
+       pgd = domain->pgd;
+
+       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+               int found = 0;
+
+               /* find an available domain id for this device in iommu */
+               ndomains = cap_ndoms(iommu->cap);
+               num = find_first_bit(iommu->domain_ids, ndomains);
+               for (; num < ndomains; ) {
+                       if (iommu->domains[num] == domain) {
+                               id = num;
+                               found = 1;
+                               break;
+                       }
+                       num = find_next_bit(iommu->domain_ids,
+                                           cap_ndoms(iommu->cap), num+1);
+               }
+
+               if (found == 0) {
+                       num = find_first_zero_bit(iommu->domain_ids, ndomains);
+                       if (num >= ndomains) {
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               printk(KERN_ERR "IOMMU: no free domain ids\n");
+                               return -EFAULT;
+                       }
+
+                       set_bit(num, iommu->domain_ids);
+                       iommu->domains[num] = domain;
+                       id = num;
+               }
+
+               /* Skip top levels of page tables for
+                * iommu which has less agaw than default.
+                */
+               for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+                       pgd = phys_to_virt(dma_pte_addr(pgd));
+                       if (!dma_pte_present(pgd)) {
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       context_set_domain_id(context, id);
+       context_set_address_width(context, iommu->agaw);
+       context_set_address_root(context, virt_to_phys(pgd));
        context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
        context_set_fault_enable(context);
        context_set_present(context);
-       __iommu_flush_cache(iommu, context, sizeof(*context));
+       domain_flush_cache(domain, context, sizeof(*context));
 
        /* it's a non-present to present mapping */
        if (iommu->flush.flush_context(iommu, domain->id,
@@ -1357,6 +1553,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
 
        spin_unlock_irqrestore(&iommu->lock, flags);
+
+       spin_lock_irqsave(&domain->iommu_lock, flags);
+       if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
+               domain->iommu_count++;
+               domain_update_iommu_coherency(domain);
+       }
+       spin_unlock_irqrestore(&domain->iommu_lock, flags);
        return 0;
 }
 
@@ -1392,13 +1595,17 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
                        tmp->bus->number, tmp->devfn);
 }
 
-static int domain_context_mapped(struct dmar_domain *domain,
-       struct pci_dev *pdev)
+static int domain_context_mapped(struct pci_dev *pdev)
 {
        int ret;
        struct pci_dev *tmp, *parent;
+       struct intel_iommu *iommu;
 
-       ret = device_context_mapped(domain->iommu,
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       ret = device_context_mapped(iommu,
                pdev->bus->number, pdev->devfn);
        if (!ret)
                return ret;
@@ -1409,17 +1616,17 @@ static int domain_context_mapped(struct dmar_domain *domain,
        /* Secondary interface's bus number and devfn 0 */
        parent = pdev->bus->self;
        while (parent != tmp) {
-               ret = device_context_mapped(domain->iommu, parent->bus->number,
+               ret = device_context_mapped(iommu, parent->bus->number,
                        parent->devfn);
                if (!ret)
                        return ret;
                parent = parent->bus->self;
        }
        if (tmp->is_pcie)
-               return device_context_mapped(domain->iommu,
+               return device_context_mapped(iommu,
                        tmp->subordinate->number, 0);
        else
-               return device_context_mapped(domain->iommu,
+               return device_context_mapped(iommu,
                        tmp->bus->number, tmp->devfn);
 }
 
@@ -1450,19 +1657,22 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
                BUG_ON(dma_pte_addr(pte));
                dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
                dma_set_pte_prot(pte, prot);
-               __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
+               domain_flush_cache(domain, pte, sizeof(*pte));
                start_pfn++;
                index++;
        }
        return 0;
 }
 
-static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
 {
-       clear_context_table(domain->iommu, bus, devfn);
-       domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
+       if (!iommu)
+               return;
+
+       clear_context_table(iommu, bus, devfn);
+       iommu->flush.flush_context(iommu, 0, 0, 0,
                                           DMA_CCMD_GLOBAL_INVL, 0);
-       domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
+       iommu->flush.flush_iotlb(iommu, 0, 0, 0,
                                         DMA_TLB_GLOBAL_FLUSH, 0);
 }
 
@@ -1470,6 +1680,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
 {
        struct device_domain_info *info;
        unsigned long flags;
+       struct intel_iommu *iommu;
 
        spin_lock_irqsave(&device_domain_lock, flags);
        while (!list_empty(&domain->devices)) {
@@ -1481,7 +1692,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
                        info->dev->dev.archdata.iommu = NULL;
                spin_unlock_irqrestore(&device_domain_lock, flags);
 
-               detach_domain_for_dev(info->domain, info->bus, info->devfn);
+               iommu = device_to_iommu(info->bus, info->devfn);
+               iommu_detach_dev(iommu, info->bus, info->devfn);
                free_devinfo_mem(info);
 
                spin_lock_irqsave(&device_domain_lock, flags);
@@ -1574,7 +1786,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
                info->dev = NULL;
                info->domain = domain;
                /* This domain is shared by devices under p2p bridge */
-               domain->flags |= DOMAIN_FLAG_MULTIPLE_DEVICES;
+               domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
 
                /* pcie-to-pci bridge already has a domain, uses it */
                found = NULL;
@@ -1792,9 +2004,18 @@ static int __init init_dmars(void)
                 */
        }
 
+       g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
+                       GFP_KERNEL);
+       if (!g_iommus) {
+               printk(KERN_ERR "Allocating global iommu array failed\n");
+               ret = -ENOMEM;
+               goto error;
+       }
+
        deferred_flush = kzalloc(g_num_of_iommus *
                sizeof(struct deferred_flush_tables), GFP_KERNEL);
        if (!deferred_flush) {
+               kfree(g_iommus);
                ret = -ENOMEM;
                goto error;
        }
@@ -1804,6 +2025,7 @@ static int __init init_dmars(void)
                        continue;
 
                iommu = drhd->iommu;
+               g_iommus[iommu->seq_id] = iommu;
 
                ret = iommu_init_domains(iommu);
                if (ret)
@@ -1916,6 +2138,7 @@ error:
                iommu = drhd->iommu;
                free_iommu(iommu);
        }
+       kfree(g_iommus);
        return ret;
 }
 
@@ -1984,7 +2207,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
        }
 
        /* make sure context mapping is ok */
-       if (unlikely(!domain_context_mapped(domain, pdev))) {
+       if (unlikely(!domain_context_mapped(pdev))) {
                ret = domain_context_mapping(domain, pdev);
                if (ret) {
                        printk(KERN_ERR
@@ -2006,6 +2229,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
        struct iova *iova;
        int prot = 0;
        int ret;
+       struct intel_iommu *iommu;
 
        BUG_ON(dir == DMA_NONE);
        if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
@@ -2015,6 +2239,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
        if (!domain)
                return 0;
 
+       iommu = domain_get_iommu(domain);
        size = aligned_size((u64)paddr, size);
 
        iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
@@ -2028,7 +2253,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
         * mappings..
         */
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
-                       !cap_zlr(domain->iommu->cap))
+                       !cap_zlr(iommu->cap))
                prot |= DMA_PTE_READ;
        if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
                prot |= DMA_PTE_WRITE;
@@ -2044,10 +2269,10 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
                goto error;
 
        /* it's a non-present to present mapping */
-       ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
+       ret = iommu_flush_iotlb_psi(iommu, domain->id,
                        start_paddr, size >> VTD_PAGE_SHIFT, 1);
        if (ret)
-               iommu_flush_write_buffer(domain->iommu);
+               iommu_flush_write_buffer(iommu);
 
        return start_paddr + ((u64)paddr & (~PAGE_MASK));
 
@@ -2074,10 +2299,11 @@ static void flush_unmaps(void)
 
        /* just flush them all */
        for (i = 0; i < g_num_of_iommus; i++) {
-               if (deferred_flush[i].next) {
-                       struct intel_iommu *iommu =
-                               deferred_flush[i].domain[0]->iommu;
+               struct intel_iommu *iommu = g_iommus[i];
+               if (!iommu)
+                       continue;
 
+               if (deferred_flush[i].next) {
                        iommu->flush.flush_iotlb(iommu, 0, 0, 0,
                                                 DMA_TLB_GLOBAL_FLUSH, 0);
                        for (j = 0; j < deferred_flush[i].next; j++) {
@@ -2104,12 +2330,14 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
 {
        unsigned long flags;
        int next, iommu_id;
+       struct intel_iommu *iommu;
 
        spin_lock_irqsave(&async_umap_flush_lock, flags);
        if (list_size == HIGH_WATER_MARK)
                flush_unmaps();
 
-       iommu_id = dom->iommu->seq_id;
+       iommu = domain_get_iommu(dom);
+       iommu_id = iommu->seq_id;
 
        next = deferred_flush[iommu_id].next;
        deferred_flush[iommu_id].domain[next] = dom;
@@ -2131,12 +2359,15 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
        struct dmar_domain *domain;
        unsigned long start_addr;
        struct iova *iova;
+       struct intel_iommu *iommu;
 
        if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
                return;
        domain = find_domain(pdev);
        BUG_ON(!domain);
 
+       iommu = domain_get_iommu(domain);
+
        iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
        if (!iova)
                return;
@@ -2152,9 +2383,9 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
        /* free page tables */
        dma_pte_free_pagetable(domain, start_addr, start_addr + size);
        if (intel_iommu_strict) {
-               if (iommu_flush_iotlb_psi(domain->iommu,
+               if (iommu_flush_iotlb_psi(iommu,
                        domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
-                       iommu_flush_write_buffer(domain->iommu);
+                       iommu_flush_write_buffer(iommu);
                /* free iova */
                __free_iova(&domain->iovad, iova);
        } else {
@@ -2215,11 +2446,15 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
        size_t size = 0;
        void *addr;
        struct scatterlist *sg;
+       struct intel_iommu *iommu;
 
        if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
                return;
 
        domain = find_domain(pdev);
+       BUG_ON(!domain);
+
+       iommu = domain_get_iommu(domain);
 
        iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
        if (!iova)
@@ -2236,9 +2471,9 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
        /* free page tables */
        dma_pte_free_pagetable(domain, start_addr, start_addr + size);
 
-       if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
+       if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
                        size >> VTD_PAGE_SHIFT, 0))
-               iommu_flush_write_buffer(domain->iommu);
+               iommu_flush_write_buffer(iommu);
 
        /* free iova */
        __free_iova(&domain->iovad, iova);
@@ -2272,6 +2507,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
        int ret;
        struct scatterlist *sg;
        unsigned long start_addr;
+       struct intel_iommu *iommu;
 
        BUG_ON(dir == DMA_NONE);
        if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
@@ -2281,6 +2517,8 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
        if (!domain)
                return 0;
 
+       iommu = domain_get_iommu(domain);
+
        for_each_sg(sglist, sg, nelems, i) {
                addr = SG_ENT_VIRT_ADDRESS(sg);
                addr = (void *)virt_to_phys(addr);
@@ -2298,7 +2536,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
         * mappings..
         */
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
-                       !cap_zlr(domain->iommu->cap))
+                       !cap_zlr(iommu->cap))
                prot |= DMA_PTE_READ;
        if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
                prot |= DMA_PTE_WRITE;
@@ -2330,9 +2568,9 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
        }
 
        /* it's a non-present to present mapping */
-       if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
+       if (iommu_flush_iotlb_psi(iommu, domain->id,
                        start_addr, offset >> VTD_PAGE_SHIFT, 1))
-               iommu_flush_write_buffer(domain->iommu);
+               iommu_flush_write_buffer(iommu);
        return nelems;
 }
 
@@ -2504,10 +2742,220 @@ int __init intel_iommu_init(void)
        init_timer(&unmap_timer);
        force_iommu = 1;
        dma_ops = &intel_dma_ops;
+
+       register_iommu(&intel_iommu_ops);
+
+       return 0;
+}
+
+static int vm_domain_add_dev_info(struct dmar_domain *domain,
+                                 struct pci_dev *pdev)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
+
+       info = alloc_devinfo_mem();
+       if (!info)
+               return -ENOMEM;
+
+       info->bus = pdev->bus->number;
+       info->devfn = pdev->devfn;
+       info->dev = pdev;
+       info->domain = domain;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_add(&info->link, &domain->devices);
+       list_add(&info->global, &device_domain_list);
+       pdev->dev.archdata.iommu = info;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return 0;
+}
+
+static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
+                                         struct pci_dev *pdev)
+{
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags;
+       int found = 0;
+       struct list_head *entry, *tmp;
+
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_for_each_safe(entry, tmp, &domain->devices) {
+               info = list_entry(entry, struct device_domain_info, link);
+               if (info->bus == pdev->bus->number &&
+                   info->devfn == pdev->devfn) {
+                       list_del(&info->link);
+                       list_del(&info->global);
+                       if (info->dev)
+                               info->dev->dev.archdata.iommu = NULL;
+                       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+                       iommu_detach_dev(iommu, info->bus, info->devfn);
+                       free_devinfo_mem(info);
+
+                       spin_lock_irqsave(&device_domain_lock, flags);
+
+                       if (found)
+                               break;
+                       else
+                               continue;
+               }
+
+               /* if there is no other devices under the same iommu
+                * owned by this domain, clear this iommu in iommu_bmp
+                * update iommu count and coherency
+                */
+               if (device_to_iommu(info->bus, info->devfn) == iommu)
+                       found = 1;
+       }
+
+       if (found == 0) {
+               unsigned long tmp_flags;
+               spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
+               clear_bit(iommu->seq_id, &domain->iommu_bmp);
+               domain->iommu_count--;
+               domain_update_iommu_coherency(domain);
+               spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
+       }
+
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
+{
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags1, flags2;
+
+       spin_lock_irqsave(&device_domain_lock, flags1);
+       while (!list_empty(&domain->devices)) {
+               info = list_entry(domain->devices.next,
+                       struct device_domain_info, link);
+               list_del(&info->link);
+               list_del(&info->global);
+               if (info->dev)
+                       info->dev->dev.archdata.iommu = NULL;
+
+               spin_unlock_irqrestore(&device_domain_lock, flags1);
+
+               iommu = device_to_iommu(info->bus, info->devfn);
+               iommu_detach_dev(iommu, info->bus, info->devfn);
+
+               /* clear this iommu in iommu_bmp, update iommu count
+                * and coherency
+                */
+               spin_lock_irqsave(&domain->iommu_lock, flags2);
+               if (test_and_clear_bit(iommu->seq_id,
+                                      &domain->iommu_bmp)) {
+                       domain->iommu_count--;
+                       domain_update_iommu_coherency(domain);
+               }
+               spin_unlock_irqrestore(&domain->iommu_lock, flags2);
+
+               free_devinfo_mem(info);
+               spin_lock_irqsave(&device_domain_lock, flags1);
+       }
+       spin_unlock_irqrestore(&device_domain_lock, flags1);
+}
+
+/* domain id for virtual machine, it won't be set in context */
+static unsigned long vm_domid;
+
+static int vm_domain_min_agaw(struct dmar_domain *domain)
+{
+       int i;
+       int min_agaw = domain->agaw;
+
+       i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+       for (; i < g_num_of_iommus; ) {
+               if (min_agaw > g_iommus[i]->agaw)
+                       min_agaw = g_iommus[i]->agaw;
+
+               i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
+       }
+
+       return min_agaw;
+}
+
+static struct dmar_domain *iommu_alloc_vm_domain(void)
+{
+       struct dmar_domain *domain;
+
+       domain = alloc_domain_mem();
+       if (!domain)
+               return NULL;
+
+       domain->id = vm_domid++;
+       memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+       domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
+
+       return domain;
+}
+
+static int vm_domain_init(struct dmar_domain *domain, int guest_width)
+{
+       int adjust_width;
+
+       init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+       spin_lock_init(&domain->mapping_lock);
+       spin_lock_init(&domain->iommu_lock);
+
+       domain_reserve_special_ranges(domain);
+
+       /* calculate AGAW */
+       domain->gaw = guest_width;
+       adjust_width = guestwidth_to_adjustwidth(guest_width);
+       domain->agaw = width_to_agaw(adjust_width);
+
+       INIT_LIST_HEAD(&domain->devices);
+
+       domain->iommu_count = 0;
+       domain->iommu_coherency = 0;
+       domain->max_addr = 0;
+
+       /* always allocate the top pgd */
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+       if (!domain->pgd)
+               return -ENOMEM;
+       domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
        return 0;
 }
 
-void intel_iommu_domain_exit(struct dmar_domain *domain)
+static void iommu_free_vm_domain(struct dmar_domain *domain)
+{
+       unsigned long flags;
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       unsigned long i;
+       unsigned long ndomains;
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+               iommu = drhd->iommu;
+
+               ndomains = cap_ndoms(iommu->cap);
+               i = find_first_bit(iommu->domain_ids, ndomains);
+               for (; i < ndomains; ) {
+                       if (iommu->domains[i] == domain) {
+                               spin_lock_irqsave(&iommu->lock, flags);
+                               clear_bit(i, iommu->domain_ids);
+                               iommu->domains[i] = NULL;
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               break;
+                       }
+                       i = find_next_bit(iommu->domain_ids, ndomains, i+1);
+               }
+       }
+}
+
+static void vm_domain_exit(struct dmar_domain *domain)
 {
        u64 end;
 
@@ -2515,6 +2963,9 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
        if (!domain)
                return;
 
+       vm_domain_remove_all_dev_info(domain);
+       /* destroy iovas */
+       put_iova_domain(&domain->iovad);
        end = DOMAIN_MAX_ADDR(domain->gaw);
        end = end & (~VTD_PAGE_MASK);
 
@@ -2524,94 +2975,179 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
        /* free page tables */
        dma_pte_free_pagetable(domain, 0, end);
 
-       iommu_free_domain(domain);
+       iommu_free_vm_domain(domain);
        free_domain_mem(domain);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
 
-struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
+static int intel_iommu_domain_init(struct iommu_domain *domain)
 {
-       struct dmar_drhd_unit *drhd;
-       struct dmar_domain *domain;
-       struct intel_iommu *iommu;
-
-       drhd = dmar_find_matched_drhd_unit(pdev);
-       if (!drhd) {
-               printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
-               return NULL;
-       }
+       struct dmar_domain *dmar_domain;
 
-       iommu = drhd->iommu;
-       if (!iommu) {
-               printk(KERN_ERR
-                       "intel_iommu_domain_alloc: iommu == NULL\n");
-               return NULL;
-       }
-       domain = iommu_alloc_domain(iommu);
-       if (!domain) {
+       dmar_domain = iommu_alloc_vm_domain();
+       if (!dmar_domain) {
                printk(KERN_ERR
-                       "intel_iommu_domain_alloc: domain == NULL\n");
-               return NULL;
+                       "intel_iommu_domain_init: dmar_domain == NULL\n");
+               return -ENOMEM;
        }
-       if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+       if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
                printk(KERN_ERR
-                       "intel_iommu_domain_alloc: domain_init() failed\n");
-               intel_iommu_domain_exit(domain);
-               return NULL;
+                       "intel_iommu_domain_init() failed\n");
+               vm_domain_exit(dmar_domain);
+               return -ENOMEM;
        }
-       return domain;
+       domain->priv = dmar_domain;
+
+       return 0;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
 
-int intel_iommu_context_mapping(
-       struct dmar_domain *domain, struct pci_dev *pdev)
+static void intel_iommu_domain_destroy(struct iommu_domain *domain)
 {
-       int rc;
-       rc = domain_context_mapping(domain, pdev);
-       return rc;
+       struct dmar_domain *dmar_domain = domain->priv;
+
+       domain->priv = NULL;
+       vm_domain_exit(dmar_domain);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
 
-int intel_iommu_page_mapping(
-       struct dmar_domain *domain, dma_addr_t iova,
-       u64 hpa, size_t size, int prot)
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+                                    struct device *dev)
 {
-       int rc;
-       rc = domain_page_mapping(domain, iova, hpa, size, prot);
-       return rc;
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct intel_iommu *iommu;
+       int addr_width;
+       u64 end;
+       int ret;
+
+       /* normally pdev is not mapped */
+       if (unlikely(domain_context_mapped(pdev))) {
+               struct dmar_domain *old_domain;
+
+               old_domain = find_domain(pdev);
+               if (old_domain) {
+                       if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                               vm_domain_remove_one_dev_info(old_domain, pdev);
+                       else
+                               domain_remove_dev_info(old_domain);
+               }
+       }
+
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       /* check if this iommu agaw is sufficient for max mapped address */
+       addr_width = agaw_to_width(iommu->agaw);
+       end = DOMAIN_MAX_ADDR(addr_width);
+       end = end & VTD_PAGE_MASK;
+       if (end < dmar_domain->max_addr) {
+               printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                      "sufficient for the mapped address (%llx)\n",
+                      __func__, iommu->agaw, dmar_domain->max_addr);
+               return -EFAULT;
+       }
+
+       ret = domain_context_mapping(dmar_domain, pdev);
+       if (ret)
+               return ret;
+
+       ret = vm_domain_add_dev_info(dmar_domain, pdev);
+       return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
 
-void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+static void intel_iommu_detach_device(struct iommu_domain *domain,
+                                     struct device *dev)
 {
-       detach_domain_for_dev(domain, bus, devfn);
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       vm_domain_remove_one_dev_info(dmar_domain, pdev);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
 
-struct dmar_domain *
-intel_iommu_find_domain(struct pci_dev *pdev)
+static int intel_iommu_map_range(struct iommu_domain *domain,
+                                unsigned long iova, phys_addr_t hpa,
+                                size_t size, int iommu_prot)
 {
-       return find_domain(pdev);
+       struct dmar_domain *dmar_domain = domain->priv;
+       u64 max_addr;
+       int addr_width;
+       int prot = 0;
+       int ret;
+
+       if (iommu_prot & IOMMU_READ)
+               prot |= DMA_PTE_READ;
+       if (iommu_prot & IOMMU_WRITE)
+               prot |= DMA_PTE_WRITE;
+
+       max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
+       if (dmar_domain->max_addr < max_addr) {
+               int min_agaw;
+               u64 end;
+
+               /* check if minimum agaw is sufficient for mapped address */
+               min_agaw = vm_domain_min_agaw(dmar_domain);
+               addr_width = agaw_to_width(min_agaw);
+               end = DOMAIN_MAX_ADDR(addr_width);
+               end = end & VTD_PAGE_MASK;
+               if (end < max_addr) {
+                       printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                              "sufficient for the mapped address (%llx)\n",
+                              __func__, min_agaw, max_addr);
+                       return -EFAULT;
+               }
+               dmar_domain->max_addr = max_addr;
+       }
+
+       ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
+       return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
 
-int intel_iommu_found(void)
+static void intel_iommu_unmap_range(struct iommu_domain *domain,
+                                   unsigned long iova, size_t size)
 {
-       return g_num_of_iommus;
+       struct dmar_domain *dmar_domain = domain->priv;
+       dma_addr_t base;
+
+       /* The address might not be aligned */
+       base = iova & VTD_PAGE_MASK;
+       size = VTD_PAGE_ALIGN(size);
+       dma_pte_clear_range(dmar_domain, base, base + size);
+
+       if (dmar_domain->max_addr == base + size)
+               dmar_domain->max_addr = base;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_found);
 
-u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
+static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+                                           unsigned long iova)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
        struct dma_pte *pte;
-       u64 pfn;
-
-       pfn = 0;
-       pte = addr_to_dma_pte(domain, iova);
+       u64 phys = 0;
 
+       pte = addr_to_dma_pte(dmar_domain, iova);
        if (pte)
-               pfn = dma_pte_addr(pte);
+               phys = dma_pte_addr(pte);
 
-       return pfn >> VTD_PAGE_SHIFT;
+       return phys;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
+
+static struct iommu_ops intel_iommu_ops = {
+       .domain_init    = intel_iommu_domain_init,
+       .domain_destroy = intel_iommu_domain_destroy,
+       .attach_dev     = intel_iommu_attach_device,
+       .detach_dev     = intel_iommu_detach_device,
+       .map            = intel_iommu_map_range,
+       .unmap          = intel_iommu_unmap_range,
+       .iova_to_phys   = intel_iommu_iova_to_phys,
+};
+
+static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+{
+       /*
+        * Mobile 4 Series Chipset neglects to set RWBF capability,
+        * but needs it:
+        */
+       printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+       rwbf_quirk = 1;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);