acer-wmi: Unmark as 'experimental'
[safe/jmp/linux-2.6] / drivers / pci / intel-iommu.c
index 5094704..f3f6865 100644 (file)
@@ -18,6 +18,7 @@
  * Author: Ashok Raj <ashok.raj@intel.com>
  * Author: Shaohua Li <shaohua.li@intel.com>
  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * Author: Fenghua Yu <fenghua.yu@intel.com>
  */
 
 #include <linux/init.h>
@@ -26,7 +27,6 @@
 #include <linux/slab.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
-#include <linux/sysdev.h>
 #include <linux/spinlock.h>
 #include <linux/pci.h>
 #include <linux/dmar.h>
 #include <linux/mempool.h>
 #include <linux/timer.h>
 #include <linux/iova.h>
+#include <linux/iommu.h>
 #include <linux/intel-iommu.h>
-#include <asm/proto.h> /* force_iommu in this header in x86-64*/
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
 #include "pci.h"
 
+#define ROOT_SIZE              VTD_PAGE_SIZE
+#define CONTEXT_SIZE           VTD_PAGE_SIZE
+
 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
 
 
 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
 
+#define IOVA_PFN(addr)         ((addr) >> PAGE_SHIFT)
+#define DMA_32BIT_PFN          IOVA_PFN(DMA_32BIT_MASK)
+#define DMA_64BIT_PFN          IOVA_PFN(DMA_64BIT_MASK)
+
+/* global iommu list, set NULL for ignored DMAR units */
+static struct intel_iommu **g_iommus;
+
+static int rwbf_quirk;
+
+/*
+ * 0: Present
+ * 1-11: Reserved
+ * 12-63: Context Ptr (12 - (haw-1))
+ * 64-127: Reserved
+ */
+struct root_entry {
+       u64     val;
+       u64     rsvd1;
+};
+#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
+static inline bool root_present(struct root_entry *root)
+{
+       return (root->val & 1);
+}
+static inline void set_root_present(struct root_entry *root)
+{
+       root->val |= 1;
+}
+static inline void set_root_value(struct root_entry *root, unsigned long value)
+{
+       root->val |= value & VTD_PAGE_MASK;
+}
+
+static inline struct context_entry *
+get_context_addr_from_root(struct root_entry *root)
+{
+       return (struct context_entry *)
+               (root_present(root)?phys_to_virt(
+               root->val & VTD_PAGE_MASK) :
+               NULL);
+}
+
+/*
+ * low 64 bits:
+ * 0: present
+ * 1: fault processing disable
+ * 2-3: translation type
+ * 12-63: address space root
+ * high 64 bits:
+ * 0-2: address width
+ * 3-6: aval
+ * 8-23: domain id
+ */
+struct context_entry {
+       u64 lo;
+       u64 hi;
+};
+
+static inline bool context_present(struct context_entry *context)
+{
+       return (context->lo & 1);
+}
+static inline void context_set_present(struct context_entry *context)
+{
+       context->lo |= 1;
+}
+
+static inline void context_set_fault_enable(struct context_entry *context)
+{
+       context->lo &= (((u64)-1) << 2) | 1;
+}
+
+#define CONTEXT_TT_MULTI_LEVEL 0
+
+static inline void context_set_translation_type(struct context_entry *context,
+                                               unsigned long value)
+{
+       context->lo &= (((u64)-1) << 4) | 3;
+       context->lo |= (value & 3) << 2;
+}
+
+static inline void context_set_address_root(struct context_entry *context,
+                                           unsigned long value)
+{
+       context->lo |= value & VTD_PAGE_MASK;
+}
+
+static inline void context_set_address_width(struct context_entry *context,
+                                            unsigned long value)
+{
+       context->hi |= value & 7;
+}
+
+static inline void context_set_domain_id(struct context_entry *context,
+                                        unsigned long value)
+{
+       context->hi |= (value & ((1 << 16) - 1)) << 8;
+}
+
+static inline void context_clear_entry(struct context_entry *context)
+{
+       context->lo = 0;
+       context->hi = 0;
+}
+
+/*
+ * 0: readable
+ * 1: writable
+ * 2-6: reserved
+ * 7: super page
+ * 8-11: available
+ * 12-63: Host physcial address
+ */
+struct dma_pte {
+       u64 val;
+};
+
+static inline void dma_clear_pte(struct dma_pte *pte)
+{
+       pte->val = 0;
+}
+
+static inline void dma_set_pte_readable(struct dma_pte *pte)
+{
+       pte->val |= DMA_PTE_READ;
+}
+
+static inline void dma_set_pte_writable(struct dma_pte *pte)
+{
+       pte->val |= DMA_PTE_WRITE;
+}
+
+static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
+{
+       pte->val = (pte->val & ~3) | (prot & 3);
+}
+
+static inline u64 dma_pte_addr(struct dma_pte *pte)
+{
+       return (pte->val & VTD_PAGE_MASK);
+}
+
+static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
+{
+       pte->val |= (addr & VTD_PAGE_MASK);
+}
+
+static inline bool dma_pte_present(struct dma_pte *pte)
+{
+       return (pte->val & 3) != 0;
+}
+
+/* devices under the same p2p bridge are owned in one domain */
+#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
+
+/* domain represents a virtual machine, more than one devices
+ * across iommus may be owned in one domain, e.g. kvm guest.
+ */
+#define DOMAIN_FLAG_VIRTUAL_MACHINE    (1 << 1)
+
+struct dmar_domain {
+       int     id;                     /* domain id */
+       unsigned long iommu_bmp;        /* bitmap of iommus this domain uses*/
+
+       struct list_head devices;       /* all devices' list */
+       struct iova_domain iovad;       /* iova's that belong to this domain */
+
+       struct dma_pte  *pgd;           /* virtual address */
+       spinlock_t      mapping_lock;   /* page table lock */
+       int             gaw;            /* max guest address width */
+
+       /* adjusted guest address width, 0 is level 2 30-bit */
+       int             agaw;
+
+       int             flags;          /* flags to find out type of domain */
+
+       int             iommu_coherency;/* indicate coherency of iommu access */
+       int             iommu_count;    /* reference count of iommu */
+       spinlock_t      iommu_lock;     /* protect iommu set in domain */
+       u64             max_addr;       /* maximum mapped address */
+};
+
+/* PCI domain-device relationship */
+struct device_domain_info {
+       struct list_head link;  /* link to domain siblings */
+       struct list_head global; /* link to global list */
+       u8 bus;                 /* PCI bus numer */
+       u8 devfn;               /* PCI devfn number */
+       struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+       struct dmar_domain *domain; /* pointer to domain */
+};
 
 static void flush_unmaps_timeout(unsigned long data);
 
@@ -76,7 +270,12 @@ static long list_size;
 
 static void domain_remove_dev_info(struct dmar_domain *domain);
 
-int dmar_disabled;
+#ifdef CONFIG_DMAR_DEFAULT_ON
+int dmar_disabled = 0;
+#else
+int dmar_disabled = 1;
+#endif /*CONFIG_DMAR_DEFAULT_ON*/
+
 static int __initdata dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
@@ -85,14 +284,19 @@ static int intel_iommu_strict;
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
+static struct iommu_ops intel_iommu_ops;
+
 static int __init intel_iommu_setup(char *str)
 {
        if (!str)
                return -EINVAL;
        while (*str) {
-               if (!strncmp(str, "off", 3)) {
+               if (!strncmp(str, "on", 2)) {
+                       dmar_disabled = 0;
+                       printk(KERN_INFO "Intel-IOMMU: enabled\n");
+               } else if (!strncmp(str, "off", 3)) {
                        dmar_disabled = 1;
-                       printk(KERN_INFO"Intel-IOMMU: disabled\n");
+                       printk(KERN_INFO "Intel-IOMMU: disabled\n");
                } else if (!strncmp(str, "igfx_off", 8)) {
                        dmar_map_gfx = 0;
                        printk(KERN_INFO
@@ -181,6 +385,88 @@ void free_iova_mem(struct iova *iova)
        kmem_cache_free(iommu_iova_cache, iova);
 }
 
+
+static inline int width_to_agaw(int width);
+
+/* calculate agaw for each iommu.
+ * "SAGAW" may be different across iommus, use a default agaw, and
+ * get a supported less agaw for iommus that don't support the default agaw.
+ */
+int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+       unsigned long sagaw;
+       int agaw = -1;
+
+       sagaw = cap_sagaw(iommu->cap);
+       for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+            agaw >= 0; agaw--) {
+               if (test_bit(agaw, &sagaw))
+                       break;
+       }
+
+       return agaw;
+}
+
+/* in native case, each domain is related to only one iommu */
+static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
+{
+       int iommu_id;
+
+       BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
+
+       iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+       if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
+               return NULL;
+
+       return g_iommus[iommu_id];
+}
+
+/* "Coherency" capability may be different across iommus */
+static void domain_update_iommu_coherency(struct dmar_domain *domain)
+{
+       int i;
+
+       domain->iommu_coherency = 1;
+
+       i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+       for (; i < g_num_of_iommus; ) {
+               if (!ecap_coherent(g_iommus[i]->ecap)) {
+                       domain->iommu_coherency = 0;
+                       break;
+               }
+               i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
+       }
+}
+
+static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
+{
+       struct dmar_drhd_unit *drhd = NULL;
+       int i;
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+
+               for (i = 0; i < drhd->devices_cnt; i++)
+                       if (drhd->devices[i] &&
+                           drhd->devices[i]->bus->number == bus &&
+                           drhd->devices[i]->devfn == devfn)
+                               return drhd->iommu;
+
+               if (drhd->include_all)
+                       return drhd->iommu;
+       }
+
+       return NULL;
+}
+
+static void domain_flush_cache(struct dmar_domain *domain,
+                              void *addr, int size)
+{
+       if (!domain->iommu_coherency)
+               clflush_cache_range(addr, size);
+}
+
 /* Gets context entry for a given bus and devfn */
 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
                u8 bus, u8 devfn)
@@ -199,7 +485,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
                        spin_unlock_irqrestore(&iommu->lock, flags);
                        return NULL;
                }
-               __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K);
+               __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
                phy_addr = virt_to_phys((void *)context);
                set_root_value(root, phy_addr);
                set_root_present(root);
@@ -223,7 +509,7 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
                ret = 0;
                goto out;
        }
-       ret = context_present(context[devfn]);
+       ret = context_present(&context[devfn]);
 out:
        spin_unlock_irqrestore(&iommu->lock, flags);
        return ret;
@@ -239,7 +525,7 @@ static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
        root = &iommu->root_entry[bus];
        context = get_context_addr_from_root(root);
        if (context) {
-               context_clear_entry(context[devfn]);
+               context_clear_entry(&context[devfn]);
                __iommu_flush_cache(iommu, &context[devfn], \
                        sizeof(*context));
        }
@@ -336,7 +622,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
                if (level == 1)
                        break;
 
-               if (!dma_pte_present(*pte)) {
+               if (!dma_pte_present(pte)) {
                        tmp_page = alloc_pgtable_page();
 
                        if (!tmp_page) {
@@ -344,18 +630,17 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
                                        flags);
                                return NULL;
                        }
-                       __iommu_flush_cache(domain->iommu, tmp_page,
-                                       PAGE_SIZE_4K);
-                       dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
+                       domain_flush_cache(domain, tmp_page, PAGE_SIZE);
+                       dma_set_pte_addr(pte, virt_to_phys(tmp_page));
                        /*
                         * high level table always sets r/w, last level page
                         * table control read/write
                         */
-                       dma_set_pte_readable(*pte);
-                       dma_set_pte_writable(*pte);
-                       __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
+                       dma_set_pte_readable(pte);
+                       dma_set_pte_writable(pte);
+                       domain_flush_cache(domain, pte, sizeof(*pte));
                }
-               parent = phys_to_virt(dma_pte_addr(*pte));
+               parent = phys_to_virt(dma_pte_addr(pte));
                level--;
        }
 
@@ -378,9 +663,9 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
                if (level == total)
                        return pte;
 
-               if (!dma_pte_present(*pte))
+               if (!dma_pte_present(pte))
                        break;
-               parent = phys_to_virt(dma_pte_addr(*pte));
+               parent = phys_to_virt(dma_pte_addr(pte));
                total--;
        }
        return NULL;
@@ -395,8 +680,8 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
        pte = dma_addr_level_pte(domain, addr, 1);
 
        if (pte) {
-               dma_clear_pte(*pte);
-               __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
+               dma_clear_pte(pte);
+               domain_flush_cache(domain, pte, sizeof(*pte));
        }
 }
 
@@ -408,13 +693,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
        start &= (((u64)1) << addr_width) - 1;
        end &= (((u64)1) << addr_width) - 1;
        /* in case it's partial page */
-       start = PAGE_ALIGN_4K(start);
-       end &= PAGE_MASK_4K;
+       start = PAGE_ALIGN(start);
+       end &= PAGE_MASK;
 
        /* we don't need lock here, nobody else touches the iova range */
        while (start < end) {
                dma_pte_clear_one(domain, start);
-               start += PAGE_SIZE_4K;
+               start += VTD_PAGE_SIZE;
        }
 }
 
@@ -442,10 +727,9 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
                        pte = dma_addr_level_pte(domain, tmp, level);
                        if (pte) {
                                free_pgtable_page(
-                                       phys_to_virt(dma_pte_addr(*pte)));
-                               dma_clear_pte(*pte);
-                               __iommu_flush_cache(domain->iommu,
-                                               pte, sizeof(*pte));
+                                       phys_to_virt(dma_pte_addr(pte)));
+                               dma_clear_pte(pte);
+                               domain_flush_cache(domain, pte, sizeof(*pte));
                        }
                        tmp += level_size(level);
                }
@@ -468,7 +752,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
        if (!root)
                return -ENOMEM;
 
-       __iommu_flush_cache(iommu, root, PAGE_SIZE_4K);
+       __iommu_flush_cache(iommu, root, ROOT_SIZE);
 
        spin_lock_irqsave(&iommu->lock, flags);
        iommu->root_entry = root;
@@ -503,7 +787,7 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
        u32 val;
        unsigned long flag;
 
-       if (!cap_rwbf(iommu->cap))
+       if (!rwbf_quirk && !cap_rwbf(iommu->cap))
                return;
        val = iommu->gcmd | DMA_GCMD_WBF;
 
@@ -563,7 +847,7 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
 
        spin_unlock_irqrestore(&iommu->register_lock, flag);
 
-       /* flush context entry will implictly flush write buffer */
+       /* flush context entry will implicitly flush write buffer */
        return 0;
 }
 
@@ -634,8 +918,9 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
                printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
        if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
                pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
-                       DMA_TLB_IIRG(type), DMA_TLB_IAIG(val));
-       /* flush context entry will implictly flush write buffer */
+                       (unsigned long long)DMA_TLB_IIRG(type),
+                       (unsigned long long)DMA_TLB_IAIG(val));
+       /* flush iotlb entry will implicitly flush write buffer */
        return 0;
 }
 
@@ -644,7 +929,7 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
 {
        unsigned int mask;
 
-       BUG_ON(addr & (~PAGE_MASK_4K));
+       BUG_ON(addr & (~VTD_PAGE_MASK));
        BUG_ON(pages == 0);
 
        /* Fallback to domain selective flush if no PSI support */
@@ -798,7 +1083,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
 }
 
 static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
-               u8 fault_reason, u16 source_id, u64 addr)
+               u8 fault_reason, u16 source_id, unsigned long long addr)
 {
        const char *reason;
 
@@ -946,17 +1231,28 @@ static int iommu_init_domains(struct intel_iommu *iommu)
 
 
 static void domain_exit(struct dmar_domain *domain);
+static void vm_domain_exit(struct dmar_domain *domain);
 
 void free_dmar_iommu(struct intel_iommu *iommu)
 {
        struct dmar_domain *domain;
        int i;
+       unsigned long flags;
 
        i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
        for (; i < cap_ndoms(iommu->cap); ) {
                domain = iommu->domains[i];
                clear_bit(i, iommu->domain_ids);
-               domain_exit(domain);
+
+               spin_lock_irqsave(&domain->iommu_lock, flags);
+               if (--domain->iommu_count == 0) {
+                       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                               vm_domain_exit(domain);
+                       else
+                               domain_exit(domain);
+               }
+               spin_unlock_irqrestore(&domain->iommu_lock, flags);
+
                i = find_next_bit(iommu->domain_ids,
                        cap_ndoms(iommu->cap), i+1);
        }
@@ -974,6 +1270,17 @@ void free_dmar_iommu(struct intel_iommu *iommu)
        kfree(iommu->domains);
        kfree(iommu->domain_ids);
 
+       g_iommus[iommu->seq_id] = NULL;
+
+       /* if all iommus are freed, free g_iommus */
+       for (i = 0; i < g_num_of_iommus; i++) {
+               if (g_iommus[i])
+                       break;
+       }
+
+       if (i == g_num_of_iommus)
+               kfree(g_iommus);
+
        /* free context mapping */
        free_context_table(iommu);
 }
@@ -1002,7 +1309,9 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
 
        set_bit(num, iommu->domain_ids);
        domain->id = num;
-       domain->iommu = iommu;
+       memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+       set_bit(iommu->seq_id, &domain->iommu_bmp);
+       domain->flags = 0;
        iommu->domains[num] = domain;
        spin_unlock_irqrestore(&iommu->lock, flags);
 
@@ -1012,10 +1321,13 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
 static void iommu_free_domain(struct dmar_domain *domain)
 {
        unsigned long flags;
+       struct intel_iommu *iommu;
+
+       iommu = domain_get_iommu(domain);
 
-       spin_lock_irqsave(&domain->iommu->lock, flags);
-       clear_bit(domain->id, domain->iommu->domain_ids);
-       spin_unlock_irqrestore(&domain->iommu->lock, flags);
+       spin_lock_irqsave(&iommu->lock, flags);
+       clear_bit(domain->id, iommu->domain_ids);
+       spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
 static struct iova_domain reserved_iova_list;
@@ -1051,9 +1363,9 @@ static void dmar_init_reserved_ranges(void)
                        if (!r->flags || !(r->flags & IORESOURCE_MEM))
                                continue;
                        addr = r->start;
-                       addr &= PAGE_MASK_4K;
+                       addr &= PAGE_MASK;
                        size = r->end - addr;
-                       size = PAGE_ALIGN_4K(size);
+                       size = PAGE_ALIGN(size);
                        iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
                                IOVA_PFN(size + addr) - 1);
                        if (!iova)
@@ -1090,11 +1402,12 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
        init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
        spin_lock_init(&domain->mapping_lock);
+       spin_lock_init(&domain->iommu_lock);
 
        domain_reserve_special_ranges(domain);
 
        /* calculate AGAW */
-       iommu = domain->iommu;
+       iommu = domain_get_iommu(domain);
        if (guest_width > cap_mgaw(iommu->cap))
                guest_width = cap_mgaw(iommu->cap);
        domain->gaw = guest_width;
@@ -1111,11 +1424,18 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
        domain->agaw = agaw;
        INIT_LIST_HEAD(&domain->devices);
 
+       if (ecap_coherent(iommu->ecap))
+               domain->iommu_coherency = 1;
+       else
+               domain->iommu_coherency = 0;
+
+       domain->iommu_count = 1;
+
        /* always allocate the top pgd */
        domain->pgd = (struct dma_pte *)alloc_pgtable_page();
        if (!domain->pgd)
                return -ENOMEM;
-       __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K);
+       __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
        return 0;
 }
 
@@ -1131,7 +1451,7 @@ static void domain_exit(struct dmar_domain *domain)
        /* destroy iovas */
        put_iova_domain(&domain->iovad);
        end = DOMAIN_MAX_ADDR(domain->gaw);
-       end = end & (~PAGE_MASK_4K);
+       end = end & (~PAGE_MASK);
 
        /* clear ptes */
        dma_pte_clear_range(domain, 0, end);
@@ -1147,28 +1467,82 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                u8 bus, u8 devfn)
 {
        struct context_entry *context;
-       struct intel_iommu *iommu = domain->iommu;
        unsigned long flags;
+       struct intel_iommu *iommu;
+       struct dma_pte *pgd;
+       unsigned long num;
+       unsigned long ndomains;
+       int id;
+       int agaw;
 
        pr_debug("Set context mapping for %02x:%02x.%d\n",
                bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
        BUG_ON(!domain->pgd);
+
+       iommu = device_to_iommu(bus, devfn);
+       if (!iommu)
+               return -ENODEV;
+
        context = device_to_context_entry(iommu, bus, devfn);
        if (!context)
                return -ENOMEM;
        spin_lock_irqsave(&iommu->lock, flags);
-       if (context_present(*context)) {
+       if (context_present(context)) {
                spin_unlock_irqrestore(&iommu->lock, flags);
                return 0;
        }
 
-       context_set_domain_id(*context, domain->id);
-       context_set_address_width(*context, domain->agaw);
-       context_set_address_root(*context, virt_to_phys(domain->pgd));
-       context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
-       context_set_fault_enable(*context);
-       context_set_present(*context);
-       __iommu_flush_cache(iommu, context, sizeof(*context));
+       id = domain->id;
+       pgd = domain->pgd;
+
+       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+               int found = 0;
+
+               /* find an available domain id for this device in iommu */
+               ndomains = cap_ndoms(iommu->cap);
+               num = find_first_bit(iommu->domain_ids, ndomains);
+               for (; num < ndomains; ) {
+                       if (iommu->domains[num] == domain) {
+                               id = num;
+                               found = 1;
+                               break;
+                       }
+                       num = find_next_bit(iommu->domain_ids,
+                                           cap_ndoms(iommu->cap), num+1);
+               }
+
+               if (found == 0) {
+                       num = find_first_zero_bit(iommu->domain_ids, ndomains);
+                       if (num >= ndomains) {
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               printk(KERN_ERR "IOMMU: no free domain ids\n");
+                               return -EFAULT;
+                       }
+
+                       set_bit(num, iommu->domain_ids);
+                       iommu->domains[num] = domain;
+                       id = num;
+               }
+
+               /* Skip top levels of page tables for
+                * iommu which has less agaw than default.
+                */
+               for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+                       pgd = phys_to_virt(dma_pte_addr(pgd));
+                       if (!dma_pte_present(pgd)) {
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       context_set_domain_id(context, id);
+       context_set_address_width(context, iommu->agaw);
+       context_set_address_root(context, virt_to_phys(pgd));
+       context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
+       context_set_fault_enable(context);
+       context_set_present(context);
+       domain_flush_cache(domain, context, sizeof(*context));
 
        /* it's a non-present to present mapping */
        if (iommu->flush.flush_context(iommu, domain->id,
@@ -1179,6 +1553,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
 
        spin_unlock_irqrestore(&iommu->lock, flags);
+
+       spin_lock_irqsave(&domain->iommu_lock, flags);
+       if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
+               domain->iommu_count++;
+               domain_update_iommu_coherency(domain);
+       }
+       spin_unlock_irqrestore(&domain->iommu_lock, flags);
        return 0;
 }
 
@@ -1214,13 +1595,17 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
                        tmp->bus->number, tmp->devfn);
 }
 
-static int domain_context_mapped(struct dmar_domain *domain,
-       struct pci_dev *pdev)
+static int domain_context_mapped(struct pci_dev *pdev)
 {
        int ret;
        struct pci_dev *tmp, *parent;
+       struct intel_iommu *iommu;
 
-       ret = device_context_mapped(domain->iommu,
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       ret = device_context_mapped(iommu,
                pdev->bus->number, pdev->devfn);
        if (!ret)
                return ret;
@@ -1231,17 +1616,17 @@ static int domain_context_mapped(struct dmar_domain *domain,
        /* Secondary interface's bus number and devfn 0 */
        parent = pdev->bus->self;
        while (parent != tmp) {
-               ret = device_context_mapped(domain->iommu, parent->bus->number,
+               ret = device_context_mapped(iommu, parent->bus->number,
                        parent->devfn);
                if (!ret)
                        return ret;
                parent = parent->bus->self;
        }
        if (tmp->is_pcie)
-               return device_context_mapped(domain->iommu,
+               return device_context_mapped(iommu,
                        tmp->subordinate->number, 0);
        else
-               return device_context_mapped(domain->iommu,
+               return device_context_mapped(iommu,
                        tmp->bus->number, tmp->devfn);
 }
 
@@ -1252,36 +1637,42 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
        u64 start_pfn, end_pfn;
        struct dma_pte *pte;
        int index;
+       int addr_width = agaw_to_width(domain->agaw);
+
+       hpa &= (((u64)1) << addr_width) - 1;
 
        if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
                return -EINVAL;
-       iova &= PAGE_MASK_4K;
-       start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K;
-       end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K;
+       iova &= PAGE_MASK;
+       start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
+       end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
        index = 0;
        while (start_pfn < end_pfn) {
-               pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index);
+               pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
                if (!pte)
                        return -ENOMEM;
                /* We don't need lock here, nobody else
                 * touches the iova range
                 */
-               BUG_ON(dma_pte_addr(*pte));
-               dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
-               dma_set_pte_prot(*pte, prot);
-               __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
+               BUG_ON(dma_pte_addr(pte));
+               dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
+               dma_set_pte_prot(pte, prot);
+               domain_flush_cache(domain, pte, sizeof(*pte));
                start_pfn++;
                index++;
        }
        return 0;
 }
 
-static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
 {
-       clear_context_table(domain->iommu, bus, devfn);
-       domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
+       if (!iommu)
+               return;
+
+       clear_context_table(iommu, bus, devfn);
+       iommu->flush.flush_context(iommu, 0, 0, 0,
                                           DMA_CCMD_GLOBAL_INVL, 0);
-       domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
+       iommu->flush.flush_iotlb(iommu, 0, 0, 0,
                                         DMA_TLB_GLOBAL_FLUSH, 0);
 }
 
@@ -1289,6 +1680,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
 {
        struct device_domain_info *info;
        unsigned long flags;
+       struct intel_iommu *iommu;
 
        spin_lock_irqsave(&device_domain_lock, flags);
        while (!list_empty(&domain->devices)) {
@@ -1300,7 +1692,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
                        info->dev->dev.archdata.iommu = NULL;
                spin_unlock_irqrestore(&device_domain_lock, flags);
 
-               detach_domain_for_dev(info->domain, info->bus, info->devfn);
+               iommu = device_to_iommu(info->bus, info->devfn);
+               iommu_detach_dev(iommu, info->bus, info->devfn);
                free_devinfo_mem(info);
 
                spin_lock_irqsave(&device_domain_lock, flags);
@@ -1393,7 +1786,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
                info->dev = NULL;
                info->domain = domain;
                /* This domain is shared by devices under p2p bridge */
-               domain->flags |= DOMAIN_FLAG_MULTIPLE_DEVICES;
+               domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
 
                /* pcie-to-pci bridge already has a domain, uses it */
                found = NULL;
@@ -1445,11 +1838,13 @@ error:
        return find_domain(pdev);
 }
 
-static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
+static int iommu_prepare_identity_map(struct pci_dev *pdev,
+                                     unsigned long long start,
+                                     unsigned long long end)
 {
        struct dmar_domain *domain;
        unsigned long size;
-       u64 base;
+       unsigned long long base;
        int ret;
 
        printk(KERN_INFO
@@ -1461,9 +1856,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
                return -ENOMEM;
 
        /* The address might not be aligned */
-       base = start & PAGE_MASK_4K;
+       base = start & PAGE_MASK;
        size = end - base;
-       size = PAGE_ALIGN_4K(size);
+       size = PAGE_ALIGN(size);
        if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
                        IOVA_PFN(base + size) - 1)) {
                printk(KERN_ERR "IOMMU: reserve iova failed\n");
@@ -1554,6 +1949,11 @@ static void __init iommu_prepare_gfx_mapping(void)
                        printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
        }
 }
+#else /* !CONFIG_DMAR_GFX_WA */
+static inline void iommu_prepare_gfx_mapping(void)
+{
+       return;
+}
 #endif
 
 #ifdef CONFIG_DMAR_FLOPPY_WA
@@ -1581,7 +1981,7 @@ static inline void iommu_prepare_isa(void)
 }
 #endif /* !CONFIG_DMAR_FLPY_WA */
 
-int __init init_dmars(void)
+static int __init init_dmars(void)
 {
        struct dmar_drhd_unit *drhd;
        struct dmar_rmrr_unit *rmrr;
@@ -1604,9 +2004,18 @@ int __init init_dmars(void)
                 */
        }
 
+       g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
+                       GFP_KERNEL);
+       if (!g_iommus) {
+               printk(KERN_ERR "Allocating global iommu array failed\n");
+               ret = -ENOMEM;
+               goto error;
+       }
+
        deferred_flush = kzalloc(g_num_of_iommus *
                sizeof(struct deferred_flush_tables), GFP_KERNEL);
        if (!deferred_flush) {
+               kfree(g_iommus);
                ret = -ENOMEM;
                goto error;
        }
@@ -1616,6 +2025,7 @@ int __init init_dmars(void)
                        continue;
 
                iommu = drhd->iommu;
+               g_iommus[iommu->seq_id] = iommu;
 
                ret = iommu_init_domains(iommu);
                if (ret)
@@ -1646,12 +2056,14 @@ int __init init_dmars(void)
                        iommu->flush.flush_context = __iommu_flush_context;
                        iommu->flush.flush_iotlb = __iommu_flush_iotlb;
                        printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
-                              "invalidation\n", drhd->reg_base_addr);
+                              "invalidation\n",
+                              (unsigned long long)drhd->reg_base_addr);
                } else {
                        iommu->flush.flush_context = qi_flush_context;
                        iommu->flush.flush_iotlb = qi_flush_iotlb;
                        printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
-                              "invalidation\n", drhd->reg_base_addr);
+                              "invalidation\n",
+                              (unsigned long long)drhd->reg_base_addr);
                }
        }
 
@@ -1726,14 +2138,15 @@ error:
                iommu = drhd->iommu;
                free_iommu(iommu);
        }
+       kfree(g_iommus);
        return ret;
 }
 
 static inline u64 aligned_size(u64 host_addr, size_t size)
 {
        u64 addr;
-       addr = (host_addr & (~PAGE_MASK_4K)) + size;
-       return PAGE_ALIGN_4K(addr);
+       addr = (host_addr & (~PAGE_MASK)) + size;
+       return PAGE_ALIGN(addr);
 }
 
 struct iova *
@@ -1747,20 +2160,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
                return NULL;
 
        piova = alloc_iova(&domain->iovad,
-                       size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1);
+                       size >> PAGE_SHIFT, IOVA_PFN(end), 1);
        return piova;
 }
 
 static struct iova *
 __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
-               size_t size)
+                  size_t size, u64 dma_mask)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct iova *iova = NULL;
 
-       if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) {
-               iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
-       } else  {
+       if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
+               iova = iommu_alloc_iova(domain, size, dma_mask);
+       else {
                /*
                 * First try to allocate an io virtual address in
                 * DMA_32BIT_MASK and if that fails then try allocating
@@ -1768,7 +2181,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
                 */
                iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
                if (!iova)
-                       iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
+                       iova = iommu_alloc_iova(domain, size, dma_mask);
        }
 
        if (!iova) {
@@ -1794,7 +2207,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
        }
 
        /* make sure context mapping is ok */
-       if (unlikely(!domain_context_mapped(domain, pdev))) {
+       if (unlikely(!domain_context_mapped(pdev))) {
                ret = domain_context_mapping(domain, pdev);
                if (ret) {
                        printk(KERN_ERR
@@ -1807,15 +2220,16 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
        return domain;
 }
 
-static dma_addr_t
-intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
+static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
+                                    size_t size, int dir, u64 dma_mask)
 {
        struct pci_dev *pdev = to_pci_dev(hwdev);
        struct dmar_domain *domain;
-       unsigned long start_paddr;
+       phys_addr_t start_paddr;
        struct iova *iova;
        int prot = 0;
        int ret;
+       struct intel_iommu *iommu;
 
        BUG_ON(dir == DMA_NONE);
        if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
@@ -1825,20 +2239,21 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
        if (!domain)
                return 0;
 
+       iommu = domain_get_iommu(domain);
        size = aligned_size((u64)paddr, size);
 
-       iova = __intel_alloc_iova(hwdev, domain, size);
+       iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
        if (!iova)
                goto error;
 
-       start_paddr = iova->pfn_lo << PAGE_SHIFT_4K;
+       start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
 
        /*
         * Check if DMAR supports zero-length reads on write only
         * mappings..
         */
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
-                       !cap_zlr(domain->iommu->cap))
+                       !cap_zlr(iommu->cap))
                prot |= DMA_PTE_READ;
        if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
                prot |= DMA_PTE_WRITE;
@@ -1849,30 +2264,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
         * is not a big problem
         */
        ret = domain_page_mapping(domain, start_paddr,
-               ((u64)paddr) & PAGE_MASK_4K, size, prot);
+               ((u64)paddr) & PAGE_MASK, size, prot);
        if (ret)
                goto error;
 
-       pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
-               pci_name(pdev), size, (u64)paddr,
-               size, (u64)start_paddr, dir);
-
        /* it's a non-present to present mapping */
-       ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
-                       start_paddr, size >> PAGE_SHIFT_4K, 1);
+       ret = iommu_flush_iotlb_psi(iommu, domain->id,
+                       start_paddr, size >> VTD_PAGE_SHIFT, 1);
        if (ret)
-               iommu_flush_write_buffer(domain->iommu);
+               iommu_flush_write_buffer(iommu);
 
-       return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K)));
+       return start_paddr + ((u64)paddr & (~PAGE_MASK));
 
 error:
        if (iova)
                __free_iova(&domain->iovad, iova);
        printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
-               pci_name(pdev), size, (u64)paddr, dir);
+               pci_name(pdev), size, (unsigned long long)paddr, dir);
        return 0;
 }
 
+dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
+                           size_t size, int dir)
+{
+       return __intel_map_single(hwdev, paddr, size, dir,
+                                 to_pci_dev(hwdev)->dma_mask);
+}
+
 static void flush_unmaps(void)
 {
        int i, j;
@@ -1881,10 +2299,11 @@ static void flush_unmaps(void)
 
        /* just flush them all */
        for (i = 0; i < g_num_of_iommus; i++) {
-               if (deferred_flush[i].next) {
-                       struct intel_iommu *iommu =
-                               deferred_flush[i].domain[0]->iommu;
+               struct intel_iommu *iommu = g_iommus[i];
+               if (!iommu)
+                       continue;
 
+               if (deferred_flush[i].next) {
                        iommu->flush.flush_iotlb(iommu, 0, 0, 0,
                                                 DMA_TLB_GLOBAL_FLUSH, 0);
                        for (j = 0; j < deferred_flush[i].next; j++) {
@@ -1911,12 +2330,14 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
 {
        unsigned long flags;
        int next, iommu_id;
+       struct intel_iommu *iommu;
 
        spin_lock_irqsave(&async_umap_flush_lock, flags);
        if (list_size == HIGH_WATER_MARK)
                flush_unmaps();
 
-       iommu_id = dom->iommu->seq_id;
+       iommu = domain_get_iommu(dom);
+       iommu_id = iommu->seq_id;
 
        next = deferred_flush[iommu_id].next;
        deferred_flush[iommu_id].domain[next] = dom;
@@ -1931,37 +2352,40 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
        spin_unlock_irqrestore(&async_umap_flush_lock, flags);
 }
 
-static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
-       size_t size, int dir)
+void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
+                       int dir)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct dmar_domain *domain;
        unsigned long start_addr;
        struct iova *iova;
+       struct intel_iommu *iommu;
 
        if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
                return;
        domain = find_domain(pdev);
        BUG_ON(!domain);
 
+       iommu = domain_get_iommu(domain);
+
        iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
        if (!iova)
                return;
 
-       start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+       start_addr = iova->pfn_lo << PAGE_SHIFT;
        size = aligned_size((u64)dev_addr, size);
 
        pr_debug("Device %s unmapping: %lx@%llx\n",
-               pci_name(pdev), size, (u64)start_addr);
+               pci_name(pdev), size, (unsigned long long)start_addr);
 
        /*  clear the whole page */
        dma_pte_clear_range(domain, start_addr, start_addr + size);
        /* free page tables */
        dma_pte_free_pagetable(domain, start_addr, start_addr + size);
        if (intel_iommu_strict) {
-               if (iommu_flush_iotlb_psi(domain->iommu,
-                       domain->id, start_addr, size >> PAGE_SHIFT_4K, 0))
-                       iommu_flush_write_buffer(domain->iommu);
+               if (iommu_flush_iotlb_psi(iommu,
+                       domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
+                       iommu_flush_write_buffer(iommu);
                /* free iova */
                __free_iova(&domain->iovad, iova);
        } else {
@@ -1973,13 +2397,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
        }
 }
 
-static void * intel_alloc_coherent(struct device *hwdev, size_t size,
-                      dma_addr_t *dma_handle, gfp_t flags)
+void *intel_alloc_coherent(struct device *hwdev, size_t size,
+                          dma_addr_t *dma_handle, gfp_t flags)
 {
        void *vaddr;
        int order;
 
-       size = PAGE_ALIGN_4K(size);
+       size = PAGE_ALIGN(size);
        order = get_order(size);
        flags &= ~(GFP_DMA | GFP_DMA32);
 
@@ -1988,19 +2412,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size,
                return NULL;
        memset(vaddr, 0, size);
 
-       *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL);
+       *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
+                                        DMA_BIDIRECTIONAL,
+                                        hwdev->coherent_dma_mask);
        if (*dma_handle)
                return vaddr;
        free_pages((unsigned long)vaddr, order);
        return NULL;
 }
 
-static void intel_free_coherent(struct device *hwdev, size_t size,
-       void *vaddr, dma_addr_t dma_handle)
+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+                        dma_addr_t dma_handle)
 {
        int order;
 
-       size = PAGE_ALIGN_4K(size);
+       size = PAGE_ALIGN(size);
        order = get_order(size);
 
        intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
@@ -2008,8 +2434,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
 }
 
 #define SG_ENT_VIRT_ADDRESS(sg)        (sg_virt((sg)))
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
-       int nelems, int dir)
+
+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
+                   int nelems, int dir)
 {
        int i;
        struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2019,11 +2446,15 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
        size_t size = 0;
        void *addr;
        struct scatterlist *sg;
+       struct intel_iommu *iommu;
 
        if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
                return;
 
        domain = find_domain(pdev);
+       BUG_ON(!domain);
+
+       iommu = domain_get_iommu(domain);
 
        iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
        if (!iova)
@@ -2033,16 +2464,16 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
                size += aligned_size((u64)addr, sg->length);
        }
 
-       start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+       start_addr = iova->pfn_lo << PAGE_SHIFT;
 
        /*  clear the whole page */
        dma_pte_clear_range(domain, start_addr, start_addr + size);
        /* free page tables */
        dma_pte_free_pagetable(domain, start_addr, start_addr + size);
 
-       if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
-                       size >> PAGE_SHIFT_4K, 0))
-               iommu_flush_write_buffer(domain->iommu);
+       if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
+                       size >> VTD_PAGE_SHIFT, 0))
+               iommu_flush_write_buffer(iommu);
 
        /* free iova */
        __free_iova(&domain->iovad, iova);
@@ -2062,8 +2493,8 @@ static int intel_nontranslate_map_sg(struct device *hddev,
        return nelems;
 }
 
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
-                               int nelems, int dir)
+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
+                int dir)
 {
        void *addr;
        int i;
@@ -2076,6 +2507,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
        int ret;
        struct scatterlist *sg;
        unsigned long start_addr;
+       struct intel_iommu *iommu;
 
        BUG_ON(dir == DMA_NONE);
        if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
@@ -2085,13 +2517,15 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
        if (!domain)
                return 0;
 
+       iommu = domain_get_iommu(domain);
+
        for_each_sg(sglist, sg, nelems, i) {
                addr = SG_ENT_VIRT_ADDRESS(sg);
                addr = (void *)virt_to_phys(addr);
                size += aligned_size((u64)addr, sg->length);
        }
 
-       iova = __intel_alloc_iova(hwdev, domain, size);
+       iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
        if (!iova) {
                sglist->dma_length = 0;
                return 0;
@@ -2102,19 +2536,19 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
         * mappings..
         */
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
-                       !cap_zlr(domain->iommu->cap))
+                       !cap_zlr(iommu->cap))
                prot |= DMA_PTE_READ;
        if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
                prot |= DMA_PTE_WRITE;
 
-       start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+       start_addr = iova->pfn_lo << PAGE_SHIFT;
        offset = 0;
        for_each_sg(sglist, sg, nelems, i) {
                addr = SG_ENT_VIRT_ADDRESS(sg);
                addr = (void *)virt_to_phys(addr);
                size = aligned_size((u64)addr, sg->length);
                ret = domain_page_mapping(domain, start_addr + offset,
-                       ((u64)addr) & PAGE_MASK_4K,
+                       ((u64)addr) & PAGE_MASK,
                        size, prot);
                if (ret) {
                        /*  clear the page */
@@ -2128,15 +2562,15 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
                        return 0;
                }
                sg->dma_address = start_addr + offset +
-                               ((u64)addr & (~PAGE_MASK_4K));
+                               ((u64)addr & (~PAGE_MASK));
                sg->dma_length = sg->length;
                offset += size;
        }
 
        /* it's a non-present to present mapping */
-       if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
-                       start_addr, offset >> PAGE_SHIFT_4K, 1))
-               iommu_flush_write_buffer(domain->iommu);
+       if (iommu_flush_iotlb_psi(iommu, domain->id,
+                       start_addr, offset >> VTD_PAGE_SHIFT, 1))
+               iommu_flush_write_buffer(iommu);
        return nelems;
 }
 
@@ -2175,7 +2609,6 @@ static inline int iommu_devinfo_cache_init(void)
                                         sizeof(struct device_domain_info),
                                         0,
                                         SLAB_HWCACHE_ALIGN,
-
                                         NULL);
        if (!iommu_devinfo_cache) {
                printk(KERN_ERR "Couldn't create devinfo cache\n");
@@ -2193,7 +2626,6 @@ static inline int iommu_iova_cache_init(void)
                                         sizeof(struct iova),
                                         0,
                                         SLAB_HWCACHE_ALIGN,
-
                                         NULL);
        if (!iommu_iova_cache) {
                printk(KERN_ERR "Couldn't create iova cache\n");
@@ -2310,10 +2742,220 @@ int __init intel_iommu_init(void)
        init_timer(&unmap_timer);
        force_iommu = 1;
        dma_ops = &intel_dma_ops;
+
+       register_iommu(&intel_iommu_ops);
+
+       return 0;
+}
+
+static int vm_domain_add_dev_info(struct dmar_domain *domain,
+                                 struct pci_dev *pdev)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
+
+       info = alloc_devinfo_mem();
+       if (!info)
+               return -ENOMEM;
+
+       info->bus = pdev->bus->number;
+       info->devfn = pdev->devfn;
+       info->dev = pdev;
+       info->domain = domain;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_add(&info->link, &domain->devices);
+       list_add(&info->global, &device_domain_list);
+       pdev->dev.archdata.iommu = info;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return 0;
+}
+
+static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
+                                         struct pci_dev *pdev)
+{
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags;
+       int found = 0;
+       struct list_head *entry, *tmp;
+
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_for_each_safe(entry, tmp, &domain->devices) {
+               info = list_entry(entry, struct device_domain_info, link);
+               if (info->bus == pdev->bus->number &&
+                   info->devfn == pdev->devfn) {
+                       list_del(&info->link);
+                       list_del(&info->global);
+                       if (info->dev)
+                               info->dev->dev.archdata.iommu = NULL;
+                       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+                       iommu_detach_dev(iommu, info->bus, info->devfn);
+                       free_devinfo_mem(info);
+
+                       spin_lock_irqsave(&device_domain_lock, flags);
+
+                       if (found)
+                               break;
+                       else
+                               continue;
+               }
+
+               /* if there is no other devices under the same iommu
+                * owned by this domain, clear this iommu in iommu_bmp
+                * update iommu count and coherency
+                */
+               if (device_to_iommu(info->bus, info->devfn) == iommu)
+                       found = 1;
+       }
+
+       if (found == 0) {
+               unsigned long tmp_flags;
+               spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
+               clear_bit(iommu->seq_id, &domain->iommu_bmp);
+               domain->iommu_count--;
+               domain_update_iommu_coherency(domain);
+               spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
+       }
+
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
+{
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags1, flags2;
+
+       spin_lock_irqsave(&device_domain_lock, flags1);
+       while (!list_empty(&domain->devices)) {
+               info = list_entry(domain->devices.next,
+                       struct device_domain_info, link);
+               list_del(&info->link);
+               list_del(&info->global);
+               if (info->dev)
+                       info->dev->dev.archdata.iommu = NULL;
+
+               spin_unlock_irqrestore(&device_domain_lock, flags1);
+
+               iommu = device_to_iommu(info->bus, info->devfn);
+               iommu_detach_dev(iommu, info->bus, info->devfn);
+
+               /* clear this iommu in iommu_bmp, update iommu count
+                * and coherency
+                */
+               spin_lock_irqsave(&domain->iommu_lock, flags2);
+               if (test_and_clear_bit(iommu->seq_id,
+                                      &domain->iommu_bmp)) {
+                       domain->iommu_count--;
+                       domain_update_iommu_coherency(domain);
+               }
+               spin_unlock_irqrestore(&domain->iommu_lock, flags2);
+
+               free_devinfo_mem(info);
+               spin_lock_irqsave(&device_domain_lock, flags1);
+       }
+       spin_unlock_irqrestore(&device_domain_lock, flags1);
+}
+
+/* domain id for virtual machine, it won't be set in context */
+static unsigned long vm_domid;
+
+static int vm_domain_min_agaw(struct dmar_domain *domain)
+{
+       int i;
+       int min_agaw = domain->agaw;
+
+       i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+       for (; i < g_num_of_iommus; ) {
+               if (min_agaw > g_iommus[i]->agaw)
+                       min_agaw = g_iommus[i]->agaw;
+
+               i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
+       }
+
+       return min_agaw;
+}
+
+static struct dmar_domain *iommu_alloc_vm_domain(void)
+{
+       struct dmar_domain *domain;
+
+       domain = alloc_domain_mem();
+       if (!domain)
+               return NULL;
+
+       domain->id = vm_domid++;
+       memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+       domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
+
+       return domain;
+}
+
+static int vm_domain_init(struct dmar_domain *domain, int guest_width)
+{
+       int adjust_width;
+
+       init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+       spin_lock_init(&domain->mapping_lock);
+       spin_lock_init(&domain->iommu_lock);
+
+       domain_reserve_special_ranges(domain);
+
+       /* calculate AGAW */
+       domain->gaw = guest_width;
+       adjust_width = guestwidth_to_adjustwidth(guest_width);
+       domain->agaw = width_to_agaw(adjust_width);
+
+       INIT_LIST_HEAD(&domain->devices);
+
+       domain->iommu_count = 0;
+       domain->iommu_coherency = 0;
+       domain->max_addr = 0;
+
+       /* always allocate the top pgd */
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+       if (!domain->pgd)
+               return -ENOMEM;
+       domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
        return 0;
 }
 
-void intel_iommu_domain_exit(struct dmar_domain *domain)
+static void iommu_free_vm_domain(struct dmar_domain *domain)
+{
+       unsigned long flags;
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       unsigned long i;
+       unsigned long ndomains;
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+               iommu = drhd->iommu;
+
+               ndomains = cap_ndoms(iommu->cap);
+               i = find_first_bit(iommu->domain_ids, ndomains);
+               for (; i < ndomains; ) {
+                       if (iommu->domains[i] == domain) {
+                               spin_lock_irqsave(&iommu->lock, flags);
+                               clear_bit(i, iommu->domain_ids);
+                               iommu->domains[i] = NULL;
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               break;
+                       }
+                       i = find_next_bit(iommu->domain_ids, ndomains, i+1);
+               }
+       }
+}
+
+static void vm_domain_exit(struct dmar_domain *domain)
 {
        u64 end;
 
@@ -2321,8 +2963,11 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
        if (!domain)
                return;
 
+       vm_domain_remove_all_dev_info(domain);
+       /* destroy iovas */
+       put_iova_domain(&domain->iovad);
        end = DOMAIN_MAX_ADDR(domain->gaw);
-       end = end & (~PAGE_MASK_4K);
+       end = end & (~VTD_PAGE_MASK);
 
        /* clear ptes */
        dma_pte_clear_range(domain, 0, end);
@@ -2330,94 +2975,179 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
        /* free page tables */
        dma_pte_free_pagetable(domain, 0, end);
 
-       iommu_free_domain(domain);
+       iommu_free_vm_domain(domain);
        free_domain_mem(domain);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
 
-struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
+static int intel_iommu_domain_init(struct iommu_domain *domain)
 {
-       struct dmar_drhd_unit *drhd;
-       struct dmar_domain *domain;
-       struct intel_iommu *iommu;
-
-       drhd = dmar_find_matched_drhd_unit(pdev);
-       if (!drhd) {
-               printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
-               return NULL;
-       }
+       struct dmar_domain *dmar_domain;
 
-       iommu = drhd->iommu;
-       if (!iommu) {
-               printk(KERN_ERR
-                       "intel_iommu_domain_alloc: iommu == NULL\n");
-               return NULL;
-       }
-       domain = iommu_alloc_domain(iommu);
-       if (!domain) {
+       dmar_domain = iommu_alloc_vm_domain();
+       if (!dmar_domain) {
                printk(KERN_ERR
-                       "intel_iommu_domain_alloc: domain == NULL\n");
-               return NULL;
+                       "intel_iommu_domain_init: dmar_domain == NULL\n");
+               return -ENOMEM;
        }
-       if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+       if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
                printk(KERN_ERR
-                       "intel_iommu_domain_alloc: domain_init() failed\n");
-               intel_iommu_domain_exit(domain);
-               return NULL;
+                       "intel_iommu_domain_init() failed\n");
+               vm_domain_exit(dmar_domain);
+               return -ENOMEM;
        }
-       return domain;
+       domain->priv = dmar_domain;
+
+       return 0;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
 
-int intel_iommu_context_mapping(
-       struct dmar_domain *domain, struct pci_dev *pdev)
+static void intel_iommu_domain_destroy(struct iommu_domain *domain)
 {
-       int rc;
-       rc = domain_context_mapping(domain, pdev);
-       return rc;
+       struct dmar_domain *dmar_domain = domain->priv;
+
+       domain->priv = NULL;
+       vm_domain_exit(dmar_domain);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
 
-int intel_iommu_page_mapping(
-       struct dmar_domain *domain, dma_addr_t iova,
-       u64 hpa, size_t size, int prot)
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+                                    struct device *dev)
 {
-       int rc;
-       rc = domain_page_mapping(domain, iova, hpa, size, prot);
-       return rc;
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct intel_iommu *iommu;
+       int addr_width;
+       u64 end;
+       int ret;
+
+       /* normally pdev is not mapped */
+       if (unlikely(domain_context_mapped(pdev))) {
+               struct dmar_domain *old_domain;
+
+               old_domain = find_domain(pdev);
+               if (old_domain) {
+                       if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                               vm_domain_remove_one_dev_info(old_domain, pdev);
+                       else
+                               domain_remove_dev_info(old_domain);
+               }
+       }
+
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       /* check if this iommu agaw is sufficient for max mapped address */
+       addr_width = agaw_to_width(iommu->agaw);
+       end = DOMAIN_MAX_ADDR(addr_width);
+       end = end & VTD_PAGE_MASK;
+       if (end < dmar_domain->max_addr) {
+               printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                      "sufficient for the mapped address (%llx)\n",
+                      __func__, iommu->agaw, dmar_domain->max_addr);
+               return -EFAULT;
+       }
+
+       ret = domain_context_mapping(dmar_domain, pdev);
+       if (ret)
+               return ret;
+
+       ret = vm_domain_add_dev_info(dmar_domain, pdev);
+       return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
 
-void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+static void intel_iommu_detach_device(struct iommu_domain *domain,
+                                     struct device *dev)
 {
-       detach_domain_for_dev(domain, bus, devfn);
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       vm_domain_remove_one_dev_info(dmar_domain, pdev);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
 
-struct dmar_domain *
-intel_iommu_find_domain(struct pci_dev *pdev)
+static int intel_iommu_map_range(struct iommu_domain *domain,
+                                unsigned long iova, phys_addr_t hpa,
+                                size_t size, int iommu_prot)
 {
-       return find_domain(pdev);
+       struct dmar_domain *dmar_domain = domain->priv;
+       u64 max_addr;
+       int addr_width;
+       int prot = 0;
+       int ret;
+
+       if (iommu_prot & IOMMU_READ)
+               prot |= DMA_PTE_READ;
+       if (iommu_prot & IOMMU_WRITE)
+               prot |= DMA_PTE_WRITE;
+
+       max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
+       if (dmar_domain->max_addr < max_addr) {
+               int min_agaw;
+               u64 end;
+
+               /* check if minimum agaw is sufficient for mapped address */
+               min_agaw = vm_domain_min_agaw(dmar_domain);
+               addr_width = agaw_to_width(min_agaw);
+               end = DOMAIN_MAX_ADDR(addr_width);
+               end = end & VTD_PAGE_MASK;
+               if (end < max_addr) {
+                       printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                              "sufficient for the mapped address (%llx)\n",
+                              __func__, min_agaw, max_addr);
+                       return -EFAULT;
+               }
+               dmar_domain->max_addr = max_addr;
+       }
+
+       ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
+       return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
 
-int intel_iommu_found(void)
+static void intel_iommu_unmap_range(struct iommu_domain *domain,
+                                   unsigned long iova, size_t size)
 {
-       return g_num_of_iommus;
+       struct dmar_domain *dmar_domain = domain->priv;
+       dma_addr_t base;
+
+       /* The address might not be aligned */
+       base = iova & VTD_PAGE_MASK;
+       size = VTD_PAGE_ALIGN(size);
+       dma_pte_clear_range(dmar_domain, base, base + size);
+
+       if (dmar_domain->max_addr == base + size)
+               dmar_domain->max_addr = base;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_found);
 
-u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
+static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+                                           unsigned long iova)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
        struct dma_pte *pte;
-       u64 pfn;
-
-       pfn = 0;
-       pte = addr_to_dma_pte(domain, iova);
+       u64 phys = 0;
 
+       pte = addr_to_dma_pte(dmar_domain, iova);
        if (pte)
-               pfn = dma_pte_addr(*pte);
+               phys = dma_pte_addr(pte);
+
+       return phys;
+}
 
-       return pfn >> PAGE_SHIFT_4K;
+static struct iommu_ops intel_iommu_ops = {
+       .domain_init    = intel_iommu_domain_init,
+       .domain_destroy = intel_iommu_domain_destroy,
+       .attach_dev     = intel_iommu_attach_device,
+       .detach_dev     = intel_iommu_detach_device,
+       .map            = intel_iommu_map_range,
+       .unmap          = intel_iommu_unmap_range,
+       .iova_to_phys   = intel_iommu_iova_to_phys,
+};
+
+static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+{
+       /*
+        * Mobile 4 Series Chipset neglects to set RWBF capability,
+        * but needs it:
+        */
+       printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+       rwbf_quirk = 1;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);