intel-iommu: Limit DOMAIN_MAX_PFN to fit in an 'unsigned long'
[safe/jmp/linux-2.6] / drivers / pci / intel-iommu.c
index f807423..c9272a1 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/iommu.h>
 #include <linux/intel-iommu.h>
 #include <linux/sysdev.h>
+#include <linux/dmi.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
 #include "pci.h"
 
 #define MAX_AGAW_WIDTH 64
 
-#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
-#define DOMAIN_MAX_PFN(gaw)  ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
+#define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
+#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
+
+/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
+   to match. That way, we can use 'unsigned long' for PFNs with impunity. */
+#define DOMAIN_MAX_PFN(gaw)    ((unsigned long) min_t(uint64_t, \
+                               __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
+#define DOMAIN_MAX_ADDR(gaw)   (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
 
 #define IOVA_PFN(addr)         ((addr) >> PAGE_SHIFT)
 #define DMA_32BIT_PFN          IOVA_PFN(DMA_BIT_MASK(32))
@@ -222,7 +229,12 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
 
 static inline u64 dma_pte_addr(struct dma_pte *pte)
 {
-       return (pte->val & VTD_PAGE_MASK);
+#ifdef CONFIG_64BIT
+       return pte->val & VTD_PAGE_MASK;
+#else
+       /* Must have a full atomic 64-bit read */
+       return  __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
+#endif
 }
 
 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
@@ -235,13 +247,19 @@ static inline bool dma_pte_present(struct dma_pte *pte)
        return (pte->val & 3) != 0;
 }
 
+static inline int first_pte_in_page(struct dma_pte *pte)
+{
+       return !((unsigned long)pte & ~VTD_PAGE_MASK);
+}
+
 /*
  * This domain is a statically identity mapping domain.
  *     1. This domain creats a static 1:1 mapping to all usable memory.
  *     2. It maps to each iommu if successful.
  *     3. Each iommu mapps to this domain if successful.
  */
-struct dmar_domain *si_domain;
+static struct dmar_domain *si_domain;
+static int hw_pass_through = 1;
 
 /* devices under the same p2p bridge are owned in one domain */
 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
@@ -262,7 +280,6 @@ struct dmar_domain {
        struct iova_domain iovad;       /* iova's that belong to this domain */
 
        struct dma_pte  *pgd;           /* virtual address */
-       spinlock_t      mapping_lock;   /* page table lock */
        int             gaw;            /* max guest address width */
 
        /* adjusted guest address width, 0 is level 2 30-bit */
@@ -696,13 +713,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
        struct dma_pte *parent, *pte = NULL;
        int level = agaw_to_level(domain->agaw);
        int offset;
-       unsigned long flags;
 
        BUG_ON(!domain->pgd);
        BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
        parent = domain->pgd;
 
-       spin_lock_irqsave(&domain->mapping_lock, flags);
        while (level > 0) {
                void *tmp_page;
 
@@ -712,28 +727,27 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
                        break;
 
                if (!dma_pte_present(pte)) {
+                       uint64_t pteval;
+
                        tmp_page = alloc_pgtable_page();
 
-                       if (!tmp_page) {
-                               spin_unlock_irqrestore(&domain->mapping_lock,
-                                       flags);
+                       if (!tmp_page)
                                return NULL;
+
+                       domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
+                       pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+                       if (cmpxchg64(&pte->val, 0ULL, pteval)) {
+                               /* Someone else set it while we were thinking; use theirs. */
+                               free_pgtable_page(tmp_page);
+                       } else {
+                               dma_pte_addr(pte);
+                               domain_flush_cache(domain, pte, sizeof(*pte));
                        }
-                       domain_flush_cache(domain, tmp_page, PAGE_SIZE);
-                       dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
-                       /*
-                        * high level table always sets r/w, last level page
-                        * table control read/write
-                        */
-                       dma_set_pte_readable(pte);
-                       dma_set_pte_writable(pte);
-                       domain_flush_cache(domain, pte, sizeof(*pte));
                }
                parent = phys_to_virt(dma_pte_addr(pte));
                level--;
        }
 
-       spin_unlock_irqrestore(&domain->mapping_lock, flags);
        return pte;
 }
 
@@ -779,13 +793,12 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
                        start_pfn = align_to_level(start_pfn + 1, 2);
                        continue;
                }
-               while (start_pfn <= last_pfn &&
-                      (unsigned long)pte >> VTD_PAGE_SHIFT ==
-                      (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
+               do { 
                        dma_clear_pte(pte);
                        start_pfn++;
                        pte++;
-               }
+               } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
+
                domain_flush_cache(domain, first_pte,
                                   (void *)pte - (void *)first_pte);
        }
@@ -797,7 +810,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
                                   unsigned long last_pfn)
 {
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-       struct dma_pte *pte;
+       struct dma_pte *first_pte, *pte;
        int total = agaw_to_level(domain->agaw);
        int level;
        unsigned long tmp;
@@ -805,25 +818,34 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
        BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
        BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
 
-       /* we don't need lock here, nobody else touches the iova range */
+       /* We don't need lock here; nobody else touches the iova range */
        level = 2;
        while (level <= total) {
                tmp = align_to_level(start_pfn, level);
 
-               /* Only clear this pte/pmd if we're asked to clear its
-                  _whole_ range */
+               /* If we can't even clear one PTE at this level, we're done */
                if (tmp + level_size(level) - 1 > last_pfn)
                        return;
 
-               while (tmp <= last_pfn) {
-                       pte = dma_pfn_level_pte(domain, tmp, level);
-                       if (pte) {
-                               free_pgtable_page(
-                                       phys_to_virt(dma_pte_addr(pte)));
-                               dma_clear_pte(pte);
-                               domain_flush_cache(domain, pte, sizeof(*pte));
+               while (tmp + level_size(level) - 1 <= last_pfn) {
+                       first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
+                       if (!pte) {
+                               tmp = align_to_level(tmp + 1, level + 1);
+                               continue;
                        }
-                       tmp += level_size(level);
+                       do {
+                               if (dma_pte_present(pte)) {
+                                       free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
+                                       dma_clear_pte(pte);
+                               }
+                               pte++;
+                               tmp += level_size(level);
+                       } while (!first_pte_in_page(pte) &&
+                                tmp + level_size(level) - 1 <= last_pfn);
+
+                       domain_flush_cache(domain, first_pte,
+                                          (void *)pte - (void *)first_pte);
+                       
                }
                level++;
        }
@@ -1143,6 +1165,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
        pr_debug("Number of Domains supportd <%ld>\n", ndomains);
        nlongs = BITS_TO_LONGS(ndomains);
 
+       spin_lock_init(&iommu->lock);
+
        /* TBD: there might be 64K domains,
         * consider other allocation for future chip
         */
@@ -1155,12 +1179,9 @@ static int iommu_init_domains(struct intel_iommu *iommu)
                        GFP_KERNEL);
        if (!iommu->domains) {
                printk(KERN_ERR "Allocating domain array failed\n");
-               kfree(iommu->domain_ids);
                return -ENOMEM;
        }
 
-       spin_lock_init(&iommu->lock);
-
        /*
         * if Caching mode is set, then invalid translations are tagged
         * with domainid 0. Hence we need to pre-allocate it.
@@ -1180,22 +1201,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
        int i;
        unsigned long flags;
 
-       i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
-       for (; i < cap_ndoms(iommu->cap); ) {
-               domain = iommu->domains[i];
-               clear_bit(i, iommu->domain_ids);
+       if ((iommu->domains) && (iommu->domain_ids)) {
+               i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
+               for (; i < cap_ndoms(iommu->cap); ) {
+                       domain = iommu->domains[i];
+                       clear_bit(i, iommu->domain_ids);
+
+                       spin_lock_irqsave(&domain->iommu_lock, flags);
+                       if (--domain->iommu_count == 0) {
+                               if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                                       vm_domain_exit(domain);
+                               else
+                                       domain_exit(domain);
+                       }
+                       spin_unlock_irqrestore(&domain->iommu_lock, flags);
 
-               spin_lock_irqsave(&domain->iommu_lock, flags);
-               if (--domain->iommu_count == 0) {
-                       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
-                               vm_domain_exit(domain);
-                       else
-                               domain_exit(domain);
+                       i = find_next_bit(iommu->domain_ids,
+                               cap_ndoms(iommu->cap), i+1);
                }
-               spin_unlock_irqrestore(&domain->iommu_lock, flags);
-
-               i = find_next_bit(iommu->domain_ids,
-                       cap_ndoms(iommu->cap), i+1);
        }
 
        if (iommu->gcmd & DMA_GCMD_TE)
@@ -1295,7 +1318,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
 }
 
 static struct iova_domain reserved_iova_list;
-static struct lock_class_key reserved_alloc_key;
 static struct lock_class_key reserved_rbtree_key;
 
 static void dmar_init_reserved_ranges(void)
@@ -1306,8 +1328,6 @@ static void dmar_init_reserved_ranges(void)
 
        init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
 
-       lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
-               &reserved_alloc_key);
        lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
                &reserved_rbtree_key);
 
@@ -1361,7 +1381,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
        unsigned long sagaw;
 
        init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
-       spin_lock_init(&domain->mapping_lock);
        spin_lock_init(&domain->iommu_lock);
 
        domain_reserve_special_ranges(domain);
@@ -1492,7 +1511,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
                        }
 
                        set_bit(num, iommu->domain_ids);
-                       set_bit(iommu->seq_id, &domain->iommu_bmp);
                        iommu->domains[num] = domain;
                        id = num;
                }
@@ -1635,12 +1653,22 @@ static int domain_context_mapped(struct pci_dev *pdev)
                                             tmp->devfn);
 }
 
-static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-                             unsigned long phys_pfn, unsigned long nr_pages,
-                             int prot)
+/* Returns a number of VTD pages, but aligned to MM page size */
+static inline unsigned long aligned_nrpages(unsigned long host_addr,
+                                           size_t size)
+{
+       host_addr &= ~PAGE_MASK;
+       return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
+}
+
+static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+                           struct scatterlist *sg, unsigned long phys_pfn,
+                           unsigned long nr_pages, int prot)
 {
        struct dma_pte *first_pte = NULL, *pte = NULL;
+       phys_addr_t uninitialized_var(pteval);
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+       unsigned long sg_res;
 
        BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
 
@@ -1649,7 +1677,22 @@ static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 
        prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
 
+       if (sg)
+               sg_res = 0;
+       else {
+               sg_res = nr_pages + 1;
+               pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+       }
+
        while (nr_pages--) {
+               uint64_t tmp;
+
+               if (!sg_res) {
+                       sg_res = aligned_nrpages(sg->offset, sg->length);
+                       sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+                       sg->dma_length = sg->length;
+                       pteval = page_to_phys(sg_page(sg)) | prot;
+               }
                if (!pte) {
                        first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
                        if (!pte)
@@ -1658,22 +1701,46 @@ static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                /* We don't need lock here, nobody else
                 * touches the iova range
                 */
-               BUG_ON(dma_pte_addr(pte));
-               pte->val = (phys_pfn << VTD_PAGE_SHIFT) | prot;
+               tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
+               if (tmp) {
+                       static int dumps = 5;
+                       printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
+                              iov_pfn, tmp, (unsigned long long)pteval);
+                       if (dumps) {
+                               dumps--;
+                               debug_dma_dump_mappings(NULL);
+                       }
+                       WARN_ON(1);
+               }
                pte++;
-               if (!nr_pages ||
-                   (unsigned long)pte >> VTD_PAGE_SHIFT !=
-                   (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
+               if (!nr_pages || first_pte_in_page(pte)) {
                        domain_flush_cache(domain, first_pte,
                                           (void *)pte - (void *)first_pte);
                        pte = NULL;
                }
                iov_pfn++;
-               phys_pfn++;
+               pteval += VTD_PAGE_SIZE;
+               sg_res--;
+               if (!sg_res)
+                       sg = sg_next(sg);
        }
        return 0;
 }
 
+static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+                                   struct scatterlist *sg, unsigned long nr_pages,
+                                   int prot)
+{
+       return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
+}
+
+static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+                                    unsigned long phys_pfn, unsigned long nr_pages,
+                                    int prot)
+{
+       return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
+}
+
 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
 {
        if (!iommu)
@@ -1897,14 +1964,35 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
        struct dmar_domain *domain;
        int ret;
 
-       printk(KERN_INFO
-              "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
-              pci_name(pdev), start, end);
-
        domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
        if (!domain)
                return -ENOMEM;
 
+       /* For _hardware_ passthrough, don't bother. But for software
+          passthrough, we do it anyway -- it may indicate a memory
+          range which is reserved in E820, so which didn't get set
+          up to start with in si_domain */
+       if (domain == si_domain && hw_pass_through) {
+               printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
+                      pci_name(pdev), start, end);
+               return 0;
+       }
+
+       printk(KERN_INFO
+              "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
+              pci_name(pdev), start, end);
+       
+       if (end >> agaw_to_width(domain->agaw)) {
+               WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
+                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                    agaw_to_width(domain->agaw),
+                    dmi_get_system_info(DMI_BIOS_VENDOR),
+                    dmi_get_system_info(DMI_BIOS_VERSION),
+                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               ret = -EIO;
+               goto error;
+       }
+
        ret = iommu_domain_identity_map(domain, start, end);
        if (ret)
                goto error;
@@ -1955,23 +2043,6 @@ static inline void iommu_prepare_isa(void)
 }
 #endif /* !CONFIG_DMAR_FLPY_WA */
 
-/* Initialize each context entry as pass through.*/
-static int __init init_context_pass_through(void)
-{
-       struct pci_dev *pdev = NULL;
-       struct dmar_domain *domain;
-       int ret;
-
-       for_each_pci_dev(pdev) {
-               domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
-               ret = domain_context_mapping(domain, pdev,
-                                            CONTEXT_TT_PASS_THROUGH);
-               if (ret)
-                       return ret;
-       }
-       return 0;
-}
-
 static int md_domain_init(struct dmar_domain *domain, int guest_width);
 
 static int __init si_domain_work_fn(unsigned long start_pfn,
@@ -1986,7 +2057,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
 
 }
 
-static int si_domain_init(void)
+static int __init si_domain_init(int hw)
 {
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
@@ -2013,6 +2084,9 @@ static int si_domain_init(void)
 
        si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
 
+       if (hw)
+               return 0;
+
        for_each_online_node(nid) {
                work_with_active_regions(nid, si_domain_work_fn, &ret);
                if (ret)
@@ -2039,15 +2113,23 @@ static int identity_mapping(struct pci_dev *pdev)
 }
 
 static int domain_add_dev_info(struct dmar_domain *domain,
-                                 struct pci_dev *pdev)
+                              struct pci_dev *pdev,
+                              int translation)
 {
        struct device_domain_info *info;
        unsigned long flags;
+       int ret;
 
        info = alloc_devinfo_mem();
        if (!info)
                return -ENOMEM;
 
+       ret = domain_context_mapping(domain, pdev, translation);
+       if (ret) {
+               free_devinfo_mem(info);
+               return ret;
+       }
+
        info->segment = pci_domain_nr(pdev->bus);
        info->bus = pdev->bus->number;
        info->devfn = pdev->devfn;
@@ -2063,26 +2145,67 @@ static int domain_add_dev_info(struct dmar_domain *domain,
        return 0;
 }
 
-static int iommu_prepare_static_identity_mapping(void)
+static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+{
+       if (iommu_identity_mapping == 2)
+               return IS_GFX_DEVICE(pdev);
+
+       /*
+        * We want to start off with all devices in the 1:1 domain, and
+        * take them out later if we find they can't access all of memory.
+        *
+        * However, we can't do this for PCI devices behind bridges,
+        * because all PCI devices behind the same bridge will end up
+        * with the same source-id on their transactions.
+        *
+        * Practically speaking, we can't change things around for these
+        * devices at run-time, because we can't be sure there'll be no
+        * DMA transactions in flight for any of their siblings.
+        * 
+        * So PCI devices (unless they're on the root bus) as well as
+        * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
+        * the 1:1 domain, just in _case_ one of their siblings turns out
+        * not to be able to map all of memory.
+        */
+       if (!pdev->is_pcie) {
+               if (!pci_is_root_bus(pdev->bus))
+                       return 0;
+               if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+                       return 0;
+       } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+               return 0;
+
+       /* 
+        * At boot time, we don't yet know if devices will be 64-bit capable.
+        * Assume that they will -- if they turn out not to be, then we can 
+        * take them out of the 1:1 domain later.
+        */
+       if (!startup)
+               return pdev->dma_mask > DMA_BIT_MASK(32);
+
+       return 1;
+}
+
+static int __init iommu_prepare_static_identity_mapping(int hw)
 {
        struct pci_dev *pdev = NULL;
        int ret;
 
-       ret = si_domain_init();
+       ret = si_domain_init(hw);
        if (ret)
                return -EFAULT;
 
        for_each_pci_dev(pdev) {
-               printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
-                      pci_name(pdev));
-
-               ret = domain_context_mapping(si_domain, pdev,
-                                            CONTEXT_TT_MULTI_LEVEL);
-               if (ret)
-                       return ret;
-               ret = domain_add_dev_info(si_domain, pdev);
-               if (ret)
-                       return ret;
+               if (iommu_should_identity_map(pdev, 1)) {
+                       printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
+                              hw ? "hardware" : "software", pci_name(pdev));
+
+                       ret = domain_add_dev_info(si_domain, pdev,
+                                                    hw ? CONTEXT_TT_PASS_THROUGH :
+                                                    CONTEXT_TT_MULTI_LEVEL);
+                       if (ret)
+                               return ret;
+               }
        }
 
        return 0;
@@ -2095,14 +2218,6 @@ int __init init_dmars(void)
        struct pci_dev *pdev;
        struct intel_iommu *iommu;
        int i, ret;
-       int pass_through = 1;
-
-       /*
-        * In case pass through can not be enabled, iommu tries to use identity
-        * mapping.
-        */
-       if (iommu_pass_through)
-               iommu_identity_mapping = 1;
 
        /*
         * for each drhd
@@ -2130,7 +2245,6 @@ int __init init_dmars(void)
        deferred_flush = kzalloc(g_num_of_iommus *
                sizeof(struct deferred_flush_tables), GFP_KERNEL);
        if (!deferred_flush) {
-               kfree(g_iommus);
                ret = -ENOMEM;
                goto error;
        }
@@ -2157,14 +2271,8 @@ int __init init_dmars(void)
                        goto error;
                }
                if (!ecap_pass_through(iommu->ecap))
-                       pass_through = 0;
+                       hw_pass_through = 0;
        }
-       if (iommu_pass_through)
-               if (!pass_through) {
-                       printk(KERN_INFO
-                              "Pass Through is not supported by hardware.\n");
-                       iommu_pass_through = 0;
-               }
 
        /*
         * Start from the sane iommu hardware state.
@@ -2219,60 +2327,57 @@ int __init init_dmars(void)
                }
        }
 
+       if (iommu_pass_through)
+               iommu_identity_mapping = 1;
+#ifdef CONFIG_DMAR_BROKEN_GFX_WA
+       else
+               iommu_identity_mapping = 2;
+#endif
        /*
-        * If pass through is set and enabled, context entries of all pci
-        * devices are intialized by pass through translation type.
+        * If pass through is not set or not enabled, setup context entries for
+        * identity mappings for rmrr, gfx, and isa and may fall back to static
+        * identity mapping if iommu_identity_mapping is set.
         */
-       if (iommu_pass_through) {
-               ret = init_context_pass_through();
+       if (iommu_identity_mapping) {
+               ret = iommu_prepare_static_identity_mapping(hw_pass_through);
                if (ret) {
-                       printk(KERN_ERR "IOMMU: Pass through init failed.\n");
-                       iommu_pass_through = 0;
+                       printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
+                       goto error;
                }
        }
-
        /*
-        * If pass through is not set or not enabled, setup context entries for
-        * identity mappings for rmrr, gfx, and isa and may fall back to static
-        * identity mapping if iommu_identity_mapping is set.
+        * For each rmrr
+        *   for each dev attached to rmrr
+        *   do
+        *     locate drhd for dev, alloc domain for dev
+        *     allocate free domain
+        *     allocate page table entries for rmrr
+        *     if context not allocated for bus
+        *           allocate and init context
+        *           set present in root table for this bus
+        *     init context with domain, translation etc
+        *    endfor
+        * endfor
         */
-       if (!iommu_pass_through) {
-               if (iommu_identity_mapping)
-                       iommu_prepare_static_identity_mapping();
-               /*
-                * For each rmrr
-                *   for each dev attached to rmrr
-                *   do
-                *     locate drhd for dev, alloc domain for dev
-                *     allocate free domain
-                *     allocate page table entries for rmrr
-                *     if context not allocated for bus
-                *           allocate and init context
-                *           set present in root table for this bus
-                *     init context with domain, translation etc
-                *    endfor
-                * endfor
-                */
-               printk(KERN_INFO "IOMMU: Setting RMRR:\n");
-               for_each_rmrr_units(rmrr) {
-                       for (i = 0; i < rmrr->devices_cnt; i++) {
-                               pdev = rmrr->devices[i];
-                               /*
-                                * some BIOS lists non-exist devices in DMAR
-                                * table.
-                                */
-                               if (!pdev)
-                                       continue;
-                               ret = iommu_prepare_rmrr_dev(rmrr, pdev);
-                               if (ret)
-                                       printk(KERN_ERR
-                                "IOMMU: mapping reserved region failed\n");
-                       }
+       printk(KERN_INFO "IOMMU: Setting RMRR:\n");
+       for_each_rmrr_units(rmrr) {
+               for (i = 0; i < rmrr->devices_cnt; i++) {
+                       pdev = rmrr->devices[i];
+                       /*
+                        * some BIOS lists non-exist devices in DMAR
+                        * table.
+                        */
+                       if (!pdev)
+                               continue;
+                       ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+                       if (ret)
+                               printk(KERN_ERR
+                                      "IOMMU: mapping reserved region failed\n");
                }
-
-               iommu_prepare_isa();
        }
 
+       iommu_prepare_isa();
+
        /*
         * for each drhd
         *   enable fault log
@@ -2314,60 +2419,39 @@ error:
        return ret;
 }
 
-static inline unsigned long aligned_nrpages(unsigned long host_addr,
-                                           size_t size)
-{
-       host_addr &= ~PAGE_MASK;
-       host_addr += size + PAGE_SIZE - 1;
-
-       return host_addr >> VTD_PAGE_SHIFT;
-}
-
-struct iova *
-iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
-{
-       struct iova *piova;
-
-       /* Make sure it's in range */
-       end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
-       if (!size || (IOVA_START_ADDR + size > end))
-               return NULL;
-
-       piova = alloc_iova(&domain->iovad,
-                       size >> PAGE_SHIFT, IOVA_PFN(end), 1);
-       return piova;
-}
-
-static struct iova *
-__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
-                  size_t size, u64 dma_mask)
+/* This takes a number of _MM_ pages, not VTD pages */
+static struct iova *intel_alloc_iova(struct device *dev,
+                                    struct dmar_domain *domain,
+                                    unsigned long nrpages, uint64_t dma_mask)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct iova *iova = NULL;
 
-       if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
-               iova = iommu_alloc_iova(domain, size, dma_mask);
-       else {
+       /* Restrict dma_mask to the width that the iommu can handle */
+       dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+
+       if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
                /*
                 * First try to allocate an io virtual address in
                 * DMA_BIT_MASK(32) and if that fails then try allocating
                 * from higher range
                 */
-               iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
-               if (!iova)
-                       iova = iommu_alloc_iova(domain, size, dma_mask);
-       }
-
-       if (!iova) {
-               printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
+               iova = alloc_iova(&domain->iovad, nrpages,
+                                 IOVA_PFN(DMA_BIT_MASK(32)), 1);
+               if (iova)
+                       return iova;
+       }
+       iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
+       if (unlikely(!iova)) {
+               printk(KERN_ERR "Allocating %ld-page iova for %s failed",
+                      nrpages, pci_name(pdev));
                return NULL;
        }
 
        return iova;
 }
 
-static struct dmar_domain *
-get_valid_domain_for_dev(struct pci_dev *pdev)
+static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
 {
        struct dmar_domain *domain;
        int ret;
@@ -2395,22 +2479,42 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
        return domain;
 }
 
+static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
+{
+       struct device_domain_info *info;
+
+       /* No lock here, assumes no domain exit in normal case */
+       info = dev->dev.archdata.iommu;
+       if (likely(info))
+               return info->domain;
+
+       return __get_valid_domain_for_dev(dev);
+}
+
 static int iommu_dummy(struct pci_dev *pdev)
 {
        return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
 }
 
 /* Check if the pdev needs to go through non-identity map and unmap process.*/
-static int iommu_no_mapping(struct pci_dev *pdev)
+static int iommu_no_mapping(struct device *dev)
 {
+       struct pci_dev *pdev;
        int found;
 
+       if (unlikely(dev->bus != &pci_bus_type))
+               return 1;
+
+       pdev = to_pci_dev(dev);
+       if (iommu_dummy(pdev))
+               return 1;
+
        if (!iommu_identity_mapping)
-               return iommu_dummy(pdev);
+               return 0;
 
        found = identity_mapping(pdev);
        if (found) {
-               if (pdev->dma_mask > DMA_BIT_MASK(32))
+               if (iommu_should_identity_map(pdev, 0))
                        return 1;
                else {
                        /*
@@ -2427,9 +2531,12 @@ static int iommu_no_mapping(struct pci_dev *pdev)
                 * In case of a detached 64 bit DMA device from vm, the device
                 * is put into si_domain for identity mapping.
                 */
-               if (pdev->dma_mask > DMA_BIT_MASK(32)) {
+               if (iommu_should_identity_map(pdev, 0)) {
                        int ret;
-                       ret = domain_add_dev_info(si_domain, pdev);
+                       ret = domain_add_dev_info(si_domain, pdev,
+                                                 hw_pass_through ?
+                                                 CONTEXT_TT_PASS_THROUGH :
+                                                 CONTEXT_TT_MULTI_LEVEL);
                        if (!ret) {
                                printk(KERN_INFO "64bit %s uses identity mapping\n",
                                       pci_name(pdev));
@@ -2438,7 +2545,7 @@ static int iommu_no_mapping(struct pci_dev *pdev)
                }
        }
 
-       return iommu_dummy(pdev);
+       return 0;
 }
 
 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
@@ -2451,10 +2558,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
        int prot = 0;
        int ret;
        struct intel_iommu *iommu;
+       unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
 
        BUG_ON(dir == DMA_NONE);
 
-       if (iommu_no_mapping(pdev))
+       if (iommu_no_mapping(hwdev))
                return paddr;
 
        domain = get_valid_domain_for_dev(pdev);
@@ -2464,7 +2572,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
        iommu = domain_get_iommu(domain);
        size = aligned_nrpages(paddr, size);
 
-       iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT, pdev->dma_mask);
+       iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+                               pdev->dma_mask);
        if (!iova)
                goto error;
 
@@ -2484,7 +2593,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
         * is not a big problem
         */
        ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
-                                paddr >> VTD_PAGE_SHIFT, size, prot);
+                                mm_to_dma_pfn(paddr_pfn), size, prot);
        if (ret)
                goto error;
 
@@ -2593,7 +2702,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
        struct iova *iova;
        struct intel_iommu *iommu;
 
-       if (iommu_no_mapping(pdev))
+       if (iommu_no_mapping(dev))
                return;
 
        domain = find_domain(pdev);
@@ -2602,7 +2711,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
        iommu = domain_get_iommu(domain);
 
        iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
-       if (!iova)
+       if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
+                     (unsigned long long)dev_addr))
                return;
 
        start_pfn = mm_to_dma_pfn(iova->pfn_lo);
@@ -2631,12 +2741,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
        }
 }
 
-static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
-                              int dir)
-{
-       intel_unmap_page(dev, dev_addr, size, dir, NULL);
-}
-
 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
                                  dma_addr_t *dma_handle, gfp_t flags)
 {
@@ -2669,7 +2773,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
        size = PAGE_ALIGN(size);
        order = get_order(size);
 
-       intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
+       intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
        free_pages((unsigned long)vaddr, order);
 }
 
@@ -2683,7 +2787,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
        struct iova *iova;
        struct intel_iommu *iommu;
 
-       if (iommu_no_mapping(pdev))
+       if (iommu_no_mapping(hwdev))
                return;
 
        domain = find_domain(pdev);
@@ -2692,7 +2796,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
        iommu = domain_get_iommu(domain);
 
        iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
-       if (!iova)
+       if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
+                     (unsigned long long)sglist[0].dma_address))
                return;
 
        start_pfn = mm_to_dma_pfn(iova->pfn_lo);
@@ -2704,11 +2809,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
        /* free page tables */
        dma_pte_free_pagetable(domain, start_pfn, last_pfn);
 
-       iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
-                             (last_pfn - start_pfn + 1));
-
-       /* free iova */
-       __free_iova(&domain->iovad, iova);
+       if (intel_iommu_strict) {
+               iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+                                     last_pfn - start_pfn + 1);
+               /* free iova */
+               __free_iova(&domain->iovad, iova);
+       } else {
+               add_unmap(domain, iova);
+               /*
+                * queue up the release of the unmap to save the 1/6th of the
+                * cpu used up by the iotlb flush operation...
+                */
+       }
 }
 
 static int intel_nontranslate_map_sg(struct device *hddev,
@@ -2741,7 +2853,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
        struct intel_iommu *iommu;
 
        BUG_ON(dir == DMA_NONE);
-       if (iommu_no_mapping(pdev))
+       if (iommu_no_mapping(hwdev))
                return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
 
        domain = get_valid_domain_for_dev(pdev);
@@ -2753,8 +2865,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
        for_each_sg(sglist, sg, nelems, i)
                size += aligned_nrpages(sg->offset, sg->length);
 
-       iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT,
-                                 pdev->dma_mask);
+       iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+                               pdev->dma_mask);
        if (!iova) {
                sglist->dma_length = 0;
                return 0;
@@ -2771,27 +2883,18 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
                prot |= DMA_PTE_WRITE;
 
        start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
-       offset_pfn = 0;
-       for_each_sg(sglist, sg, nelems, i) {
-               int nr_pages = aligned_nrpages(sg->offset, sg->length);
-               ret = domain_pfn_mapping(domain, start_vpfn + offset_pfn,
-                                        page_to_dma_pfn(sg_page(sg)),
-                                        nr_pages, prot);
-               if (ret) {
-                       /*  clear the page */
-                       dma_pte_clear_range(domain, start_vpfn,
-                                           start_vpfn + offset_pfn);
-                       /* free page tables */
-                       dma_pte_free_pagetable(domain, start_vpfn,
-                                              start_vpfn + offset_pfn);
-                       /* free iova */
-                       __free_iova(&domain->iovad, iova);
-                       return 0;
-               }
-               sg->dma_address = ((dma_addr_t)(start_vpfn + offset_pfn)
-                                  << VTD_PAGE_SHIFT) + sg->offset;
-               sg->dma_length = sg->length;
-               offset_pfn += nr_pages;
+
+       ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
+       if (unlikely(ret)) {
+               /*  clear the page */
+               dma_pte_clear_range(domain, start_vpfn,
+                                   start_vpfn + size - 1);
+               /* free page tables */
+               dma_pte_free_pagetable(domain, start_vpfn,
+                                      start_vpfn + size - 1);
+               /* free iova */
+               __free_iova(&domain->iovad, iova);
+               return 0;
        }
 
        /* it's a non-present to present mapping. Only flush if caching mode */
@@ -3100,7 +3203,7 @@ int __init intel_iommu_init(void)
         * Check the need for DMA-remapping initialization now.
         * Above initialization will also be used by Interrupt-remapping.
         */
-       if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
+       if (no_iommu || swiotlb || dmar_disabled)
                return -ENODEV;
 
        iommu_init_mempool();
@@ -3120,14 +3223,7 @@ int __init intel_iommu_init(void)
 
        init_timer(&unmap_timer);
        force_iommu = 1;
-
-       if (!iommu_pass_through) {
-               printk(KERN_INFO
-                      "Multi-level page-table translation for DMAR.\n");
-               dma_ops = &intel_dma_ops;
-       } else
-               printk(KERN_INFO
-                      "DMAR: Pass through translation for DMAR.\n");
+       dma_ops = &intel_dma_ops;
 
        init_iommu_sysfs();
 
@@ -3301,7 +3397,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
        int adjust_width;
 
        init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
-       spin_lock_init(&domain->mapping_lock);
        spin_lock_init(&domain->iommu_lock);
 
        domain_reserve_special_ranges(domain);
@@ -3315,6 +3410,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
 
        domain->iommu_count = 0;
        domain->iommu_coherency = 0;
+       domain->iommu_snooping = 0;
        domain->max_addr = 0;
 
        /* always allocate the top pgd */
@@ -3410,7 +3506,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
        struct intel_iommu *iommu;
        int addr_width;
        u64 end;
-       int ret;
 
        /* normally pdev is not mapped */
        if (unlikely(domain_context_mapped(pdev))) {
@@ -3442,12 +3537,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
                return -EFAULT;
        }
 
-       ret = domain_add_dev_info(dmar_domain, pdev);
-       if (ret)
-               return ret;
-
-       ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
-       return ret;
+       return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
 }
 
 static void intel_iommu_detach_device(struct iommu_domain *domain,
@@ -3507,6 +3597,9 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain,
 {
        struct dmar_domain *dmar_domain = domain->priv;
 
+       if (!size)
+               return;
+
        dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
                            (iova + size - 1) >> VTD_PAGE_SHIFT);