mmc: remove the "state" argument to mmc_suspend_host()
[safe/jmp/linux-2.6] / drivers / pci / intel-iommu.c
index b8802ae..796828f 100644 (file)
@@ -277,6 +277,7 @@ static int hw_pass_through = 1;
 
 struct dmar_domain {
        int     id;                     /* domain id */
+       int     nid;                    /* node id */
        unsigned long iommu_bmp;        /* bitmap of iommus this domain uses*/
 
        struct list_head devices;       /* all devices' list */
@@ -304,7 +305,7 @@ struct device_domain_info {
        int segment;            /* PCI domain */
        u8 bus;                 /* PCI bus number */
        u8 devfn;               /* PCI devfn number */
-       struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+       struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
        struct intel_iommu *iommu; /* IOMMU used by this device */
        struct dmar_domain *domain; /* pointer to domain */
 };
@@ -386,30 +387,14 @@ static struct kmem_cache *iommu_domain_cache;
 static struct kmem_cache *iommu_devinfo_cache;
 static struct kmem_cache *iommu_iova_cache;
 
-static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
+static inline void *alloc_pgtable_page(int node)
 {
-       unsigned int flags;
-       void *vaddr;
-
-       /* trying to avoid low memory issues */
-       flags = current->flags & PF_MEMALLOC;
-       current->flags |= PF_MEMALLOC;
-       vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
-       current->flags &= (~PF_MEMALLOC | flags);
-       return vaddr;
-}
-
-
-static inline void *alloc_pgtable_page(void)
-{
-       unsigned int flags;
-       void *vaddr;
+       struct page *page;
+       void *vaddr = NULL;
 
-       /* trying to avoid low memory issues */
-       flags = current->flags & PF_MEMALLOC;
-       current->flags |= PF_MEMALLOC;
-       vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
-       current->flags &= (~PF_MEMALLOC | flags);
+       page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
+       if (page)
+               vaddr = page_address(page);
        return vaddr;
 }
 
@@ -420,7 +405,7 @@ static inline void free_pgtable_page(void *vaddr)
 
 static inline void *alloc_domain_mem(void)
 {
-       return iommu_kmem_cache_alloc(iommu_domain_cache);
+       return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
 }
 
 static void free_domain_mem(void *vaddr)
@@ -430,7 +415,7 @@ static void free_domain_mem(void *vaddr)
 
 static inline void * alloc_devinfo_mem(void)
 {
-       return iommu_kmem_cache_alloc(iommu_devinfo_cache);
+       return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
 }
 
 static inline void free_devinfo_mem(void *vaddr)
@@ -440,7 +425,7 @@ static inline void free_devinfo_mem(void *vaddr)
 
 struct iova *alloc_iova_mem(void)
 {
-       return iommu_kmem_cache_alloc(iommu_iova_cache);
+       return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
 }
 
 void free_iova_mem(struct iova *iova)
@@ -506,13 +491,11 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
 
        domain->iommu_coherency = 1;
 
-       i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
-       for (; i < g_num_of_iommus; ) {
+       for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
                if (!ecap_coherent(g_iommus[i]->ecap)) {
                        domain->iommu_coherency = 0;
                        break;
                }
-               i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
        }
 }
 
@@ -522,13 +505,11 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
 
        domain->iommu_snooping = 1;
 
-       i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
-       for (; i < g_num_of_iommus; ) {
+       for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
                if (!ecap_sc_support(g_iommus[i]->ecap)) {
                        domain->iommu_snooping = 0;
                        break;
                }
-               i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
        }
 }
 
@@ -589,7 +570,8 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
        root = &iommu->root_entry[bus];
        context = get_context_addr_from_root(root);
        if (!context) {
-               context = (struct context_entry *)alloc_pgtable_page();
+               context = (struct context_entry *)
+                               alloc_pgtable_page(iommu->node);
                if (!context) {
                        spin_unlock_irqrestore(&iommu->lock, flags);
                        return NULL;
@@ -732,7 +714,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
                if (!dma_pte_present(pte)) {
                        uint64_t pteval;
 
-                       tmp_page = alloc_pgtable_page();
+                       tmp_page = alloc_pgtable_page(domain->nid);
 
                        if (!tmp_page)
                                return NULL;
@@ -868,7 +850,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
        struct root_entry *root;
        unsigned long flags;
 
-       root = (struct root_entry *)alloc_pgtable_page();
+       root = (struct root_entry *)alloc_pgtable_page(iommu->node);
        if (!root)
                return -ENOMEM;
 
@@ -1082,7 +1064,7 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
 }
 
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
-                                 unsigned long pfn, unsigned int pages)
+                                 unsigned long pfn, unsigned int pages, int map)
 {
        unsigned int mask = ilog2(__roundup_pow_of_two(pages));
        uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
@@ -1103,10 +1085,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
                                                DMA_TLB_PSI_FLUSH);
 
        /*
-        * In caching mode, domain ID 0 is reserved for non-present to present
-        * mapping flush. Device IOTLB doesn't need to be flushed in this case.
+        * In caching mode, changes of pages from non-present to present require
+        * flush. However, device IOTLB doesn't need to be flushed in this case.
         */
-       if (!cap_caching_mode(iommu->cap) || did)
+       if (!cap_caching_mode(iommu->cap) || !map)
                iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
 }
 
@@ -1168,7 +1150,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
        unsigned long nlongs;
 
        ndomains = cap_ndoms(iommu->cap);
-       pr_debug("Number of Domains supportd <%ld>\n", ndomains);
+       pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
+                       ndomains);
        nlongs = BITS_TO_LONGS(ndomains);
 
        spin_lock_init(&iommu->lock);
@@ -1208,8 +1191,7 @@ void free_dmar_iommu(struct intel_iommu *iommu)
        unsigned long flags;
 
        if ((iommu->domains) && (iommu->domain_ids)) {
-               i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
-               for (; i < cap_ndoms(iommu->cap); ) {
+               for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
                        domain = iommu->domains[i];
                        clear_bit(i, iommu->domain_ids);
 
@@ -1221,9 +1203,6 @@ void free_dmar_iommu(struct intel_iommu *iommu)
                                        domain_exit(domain);
                        }
                        spin_unlock_irqrestore(&domain->iommu_lock, flags);
-
-                       i = find_next_bit(iommu->domain_ids,
-                               cap_ndoms(iommu->cap), i+1);
                }
        }
 
@@ -1263,6 +1242,7 @@ static struct dmar_domain *alloc_domain(void)
        if (!domain)
                return NULL;
 
+       domain->nid = -1;
        memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
        domain->flags = 0;
 
@@ -1305,14 +1285,11 @@ static void iommu_detach_domain(struct dmar_domain *domain,
 
        spin_lock_irqsave(&iommu->lock, flags);
        ndomains = cap_ndoms(iommu->cap);
-       num = find_first_bit(iommu->domain_ids, ndomains);
-       for (; num < ndomains; ) {
+       for_each_set_bit(num, iommu->domain_ids, ndomains) {
                if (iommu->domains[num] == domain) {
                        found = 1;
                        break;
                }
-               num = find_next_bit(iommu->domain_ids,
-                                   cap_ndoms(iommu->cap), num+1);
        }
 
        if (found) {
@@ -1420,9 +1397,10 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
                domain->iommu_snooping = 0;
 
        domain->iommu_count = 1;
+       domain->nid = iommu->node;
 
        /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
        if (!domain->pgd)
                return -ENOMEM;
        __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
@@ -1497,15 +1475,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
 
                /* find an available domain id for this device in iommu */
                ndomains = cap_ndoms(iommu->cap);
-               num = find_first_bit(iommu->domain_ids, ndomains);
-               for (; num < ndomains; ) {
+               for_each_set_bit(num, iommu->domain_ids, ndomains) {
                        if (iommu->domains[num] == domain) {
                                id = num;
                                found = 1;
                                break;
                        }
-                       num = find_next_bit(iommu->domain_ids,
-                                           cap_ndoms(iommu->cap), num+1);
                }
 
                if (found == 0) {
@@ -1523,12 +1498,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
 
                /* Skip top levels of page tables for
                 * iommu which has less agaw than default.
+                * Unnecessary for PT mode.
                 */
-               for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
-                       pgd = phys_to_virt(dma_pte_addr(pgd));
-                       if (!dma_pte_present(pgd)) {
-                               spin_unlock_irqrestore(&iommu->lock, flags);
-                               return -ENOMEM;
+               if (translation != CONTEXT_TT_PASS_THROUGH) {
+                       for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+                               pgd = phys_to_virt(dma_pte_addr(pgd));
+                               if (!dma_pte_present(pgd)) {
+                                       spin_unlock_irqrestore(&iommu->lock, flags);
+                                       return -ENOMEM;
+                               }
                        }
                }
        }
@@ -1567,7 +1545,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
                                           (((u16)bus) << 8) | devfn,
                                           DMA_CCMD_MASK_NOBIT,
                                           DMA_CCMD_DEVICE_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+               iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
        } else {
                iommu_flush_write_buffer(iommu);
        }
@@ -1577,6 +1555,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
        spin_lock_irqsave(&domain->iommu_lock, flags);
        if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
                domain->iommu_count++;
+               if (domain->iommu_count == 1)
+                       domain->nid = iommu->node;
                domain_update_iommu_cap(domain);
        }
        spin_unlock_irqrestore(&domain->iommu_lock, flags);
@@ -1611,7 +1591,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
                        return ret;
                parent = parent->bus->self;
        }
-       if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
+       if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
                return domain_context_mapping_one(domain,
                                        pci_domain_nr(tmp->subordinate),
                                        tmp->subordinate->number, 0,
@@ -1991,6 +1971,16 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
               "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
               pci_name(pdev), start, end);
        
+       if (end < start) {
+               WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
+                       "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                       dmi_get_system_info(DMI_BIOS_VENDOR),
+                       dmi_get_system_info(DMI_BIOS_VERSION),
+                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               ret = -EIO;
+               goto error;
+       }
+
        if (end >> agaw_to_width(domain->agaw)) {
                WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
                     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
@@ -2330,14 +2320,16 @@ int __init init_dmars(void)
                         */
                        iommu->flush.flush_context = __iommu_flush_context;
                        iommu->flush.flush_iotlb = __iommu_flush_iotlb;
-                       printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
+                       printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
                               "invalidation\n",
+                               iommu->seq_id,
                               (unsigned long long)drhd->reg_base_addr);
                } else {
                        iommu->flush.flush_context = qi_flush_context;
                        iommu->flush.flush_iotlb = qi_flush_iotlb;
-                       printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
+                       printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
                               "invalidation\n",
+                               iommu->seq_id,
                               (unsigned long long)drhd->reg_base_addr);
                }
        }
@@ -2618,7 +2610,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
 
        /* it's a non-present to present mapping. Only flush if caching mode */
        if (cap_caching_mode(iommu->cap))
-               iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
+               iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
        else
                iommu_flush_write_buffer(iommu);
 
@@ -2658,15 +2650,24 @@ static void flush_unmaps(void)
                if (!deferred_flush[i].next)
                        continue;
 
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+               /* In caching mode, global flushes turn emulation expensive */
+               if (!cap_caching_mode(iommu->cap))
+                       iommu->flush.flush_iotlb(iommu, 0, 0, 0,
                                         DMA_TLB_GLOBAL_FLUSH);
                for (j = 0; j < deferred_flush[i].next; j++) {
                        unsigned long mask;
                        struct iova *iova = deferred_flush[i].iova[j];
-
-                       mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
-                       iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
-                                       (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+                       struct dmar_domain *domain = deferred_flush[i].domain[j];
+
+                       /* On real hardware multiple invalidations are expensive */
+                       if (cap_caching_mode(iommu->cap))
+                               iommu_flush_iotlb_psi(iommu, domain->id,
+                               iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
+                       else {
+                               mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
+                               iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
+                                               (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+                       }
                        __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
                }
                deferred_flush[i].next = 0;
@@ -2747,7 +2748,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
 
        if (intel_iommu_strict) {
                iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
-                                     last_pfn - start_pfn + 1);
+                                     last_pfn - start_pfn + 1, 0);
                /* free iova */
                __free_iova(&domain->iovad, iova);
        } else {
@@ -2767,7 +2768,15 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
 
        size = PAGE_ALIGN(size);
        order = get_order(size);
-       flags &= ~(GFP_DMA | GFP_DMA32);
+
+       if (!iommu_no_mapping(hwdev))
+               flags &= ~(GFP_DMA | GFP_DMA32);
+       else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
+               if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
+                       flags |= GFP_DMA;
+               else
+                       flags |= GFP_DMA32;
+       }
 
        vaddr = (void *)__get_free_pages(flags, order);
        if (!vaddr)
@@ -2829,7 +2838,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
 
        if (intel_iommu_strict) {
                iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
-                                     last_pfn - start_pfn + 1);
+                                     last_pfn - start_pfn + 1, 0);
                /* free iova */
                __free_iova(&domain->iovad, iova);
        } else {
@@ -2863,7 +2872,6 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
        struct dmar_domain *domain;
        size_t size = 0;
        int prot = 0;
-       size_t offset_pfn = 0;
        struct iova *iova = NULL;
        int ret;
        struct scatterlist *sg;
@@ -2917,7 +2925,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
 
        /* it's a non-present to present mapping. Only flush if caching mode */
        if (cap_caching_mode(iommu->cap))
-               iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
+               iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
        else
                iommu_flush_write_buffer(iommu);
 
@@ -3207,6 +3215,36 @@ static int __init init_iommu_sysfs(void)
 }
 #endif /* CONFIG_PM */
 
+/*
+ * Here we only respond to action of unbound device from driver.
+ *
+ * Added device is not attached to its DMAR domain here yet. That will happen
+ * when mapping the device to iova.
+ */
+static int device_notifier(struct notifier_block *nb,
+                                 unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct dmar_domain *domain;
+
+       if (iommu_no_mapping(dev))
+               return 0;
+
+       domain = find_domain(pdev);
+       if (!domain)
+               return 0;
+
+       if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
+               domain_remove_one_dev_info(domain, pdev);
+
+       return 0;
+}
+
+static struct notifier_block device_nb = {
+       .notifier_call = device_notifier,
+};
+
 int __init intel_iommu_init(void)
 {
        int ret = 0;
@@ -3231,7 +3269,7 @@ int __init intel_iommu_init(void)
         * Check the need for DMA-remapping initialization now.
         * Above initialization will also be used by Interrupt-remapping.
         */
-       if (no_iommu || swiotlb || dmar_disabled)
+       if (no_iommu || dmar_disabled)
                return -ENODEV;
 
        iommu_init_mempool();
@@ -3252,13 +3290,17 @@ int __init intel_iommu_init(void)
        "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
 
        init_timer(&unmap_timer);
-       force_iommu = 1;
+#ifdef CONFIG_SWIOTLB
+       swiotlb = 0;
+#endif
        dma_ops = &intel_dma_ops;
 
        init_iommu_sysfs();
 
        register_iommu(&intel_iommu_ops);
 
+       bus_register_notifier(&pci_bus_type, &device_nb);
+
        return 0;
 }
 
@@ -3280,7 +3322,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
                                         parent->devfn);
                        parent = parent->bus->self;
                }
-               if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
+               if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
                        iommu_detach_dev(iommu,
                                tmp->subordinate->number, 0);
                else /* this is a legacy PCI bridge */
@@ -3391,22 +3433,6 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
 /* domain id for virtual machine, it won't be set in context */
 static unsigned long vm_domid;
 
-static int vm_domain_min_agaw(struct dmar_domain *domain)
-{
-       int i;
-       int min_agaw = domain->agaw;
-
-       i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
-       for (; i < g_num_of_iommus; ) {
-               if (min_agaw > g_iommus[i]->agaw)
-                       min_agaw = g_iommus[i]->agaw;
-
-               i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
-       }
-
-       return min_agaw;
-}
-
 static struct dmar_domain *iommu_alloc_vm_domain(void)
 {
        struct dmar_domain *domain;
@@ -3416,6 +3442,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
                return NULL;
 
        domain->id = vm_domid++;
+       domain->nid = -1;
        memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
        domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
 
@@ -3442,9 +3469,10 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
        domain->iommu_coherency = 0;
        domain->iommu_snooping = 0;
        domain->max_addr = 0;
+       domain->nid = -1;
 
        /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
        if (!domain->pgd)
                return -ENOMEM;
        domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
@@ -3465,8 +3493,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
                iommu = drhd->iommu;
 
                ndomains = cap_ndoms(iommu->cap);
-               i = find_first_bit(iommu->domain_ids, ndomains);
-               for (; i < ndomains; ) {
+               for_each_set_bit(i, iommu->domain_ids, ndomains) {
                        if (iommu->domains[i] == domain) {
                                spin_lock_irqsave(&iommu->lock, flags);
                                clear_bit(i, iommu->domain_ids);
@@ -3474,7 +3501,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
                                spin_unlock_irqrestore(&iommu->lock, flags);
                                break;
                        }
-                       i = find_next_bit(iommu->domain_ids, ndomains, i+1);
                }
        }
 }
@@ -3535,7 +3561,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
        struct pci_dev *pdev = to_pci_dev(dev);
        struct intel_iommu *iommu;
        int addr_width;
-       u64 end;
 
        /* normally pdev is not mapped */
        if (unlikely(domain_context_mapped(pdev))) {
@@ -3558,14 +3583,30 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
 
        /* check if this iommu agaw is sufficient for max mapped address */
        addr_width = agaw_to_width(iommu->agaw);
-       end = DOMAIN_MAX_ADDR(addr_width);
-       end = end & VTD_PAGE_MASK;
-       if (end < dmar_domain->max_addr) {
-               printk(KERN_ERR "%s: iommu agaw (%d) is not "
+       if (addr_width > cap_mgaw(iommu->cap))
+               addr_width = cap_mgaw(iommu->cap);
+
+       if (dmar_domain->max_addr > (1LL << addr_width)) {
+               printk(KERN_ERR "%s: iommu width (%d) is not "
                       "sufficient for the mapped address (%llx)\n",
-                      __func__, iommu->agaw, dmar_domain->max_addr);
+                      __func__, addr_width, dmar_domain->max_addr);
                return -EFAULT;
        }
+       dmar_domain->gaw = addr_width;
+
+       /*
+        * Knock out extra levels of page tables if necessary
+        */
+       while (iommu->agaw < dmar_domain->agaw) {
+               struct dma_pte *pte;
+
+               pte = dmar_domain->pgd;
+               if (dma_pte_present(pte)) {
+                       free_pgtable_page(dmar_domain->pgd);
+                       dmar_domain->pgd = (struct dma_pte *)dma_pte_addr(pte);
+               }
+               dmar_domain->agaw--;
+       }
 
        return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
 }
@@ -3579,14 +3620,14 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
        domain_remove_one_dev_info(dmar_domain, pdev);
 }
 
-static int intel_iommu_map_range(struct iommu_domain *domain,
-                                unsigned long iova, phys_addr_t hpa,
-                                size_t size, int iommu_prot)
+static int intel_iommu_map(struct iommu_domain *domain,
+                          unsigned long iova, phys_addr_t hpa,
+                          int gfp_order, int iommu_prot)
 {
        struct dmar_domain *dmar_domain = domain->priv;
        u64 max_addr;
-       int addr_width;
        int prot = 0;
+       size_t size;
        int ret;
 
        if (iommu_prot & IOMMU_READ)
@@ -3596,20 +3637,17 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
        if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
                prot |= DMA_PTE_SNP;
 
+       size     = PAGE_SIZE << gfp_order;
        max_addr = iova + size;
        if (dmar_domain->max_addr < max_addr) {
-               int min_agaw;
                u64 end;
 
                /* check if minimum agaw is sufficient for mapped address */
-               min_agaw = vm_domain_min_agaw(dmar_domain);
-               addr_width = agaw_to_width(min_agaw);
-               end = DOMAIN_MAX_ADDR(addr_width);
-               end = end & VTD_PAGE_MASK;
+               end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
                if (end < max_addr) {
-                       printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                       printk(KERN_ERR "%s: iommu width (%d) is not "
                               "sufficient for the mapped address (%llx)\n",
-                              __func__, min_agaw, max_addr);
+                              __func__, dmar_domain->gaw, max_addr);
                        return -EFAULT;
                }
                dmar_domain->max_addr = max_addr;
@@ -3622,19 +3660,19 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
        return ret;
 }
 
-static void intel_iommu_unmap_range(struct iommu_domain *domain,
-                                   unsigned long iova, size_t size)
+static int intel_iommu_unmap(struct iommu_domain *domain,
+                            unsigned long iova, int gfp_order)
 {
        struct dmar_domain *dmar_domain = domain->priv;
-
-       if (!size)
-               return;
+       size_t size = PAGE_SIZE << gfp_order;
 
        dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
                            (iova + size - 1) >> VTD_PAGE_SHIFT);
 
        if (dmar_domain->max_addr == iova + size)
                dmar_domain->max_addr = iova;
+
+       return gfp_order;
 }
 
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3667,8 +3705,8 @@ static struct iommu_ops intel_iommu_ops = {
        .domain_destroy = intel_iommu_domain_destroy,
        .attach_dev     = intel_iommu_attach_device,
        .detach_dev     = intel_iommu_detach_device,
-       .map            = intel_iommu_map_range,
-       .unmap          = intel_iommu_unmap_range,
+       .map            = intel_iommu_map,
+       .unmap          = intel_iommu_unmap,
        .iova_to_phys   = intel_iommu_iova_to_phys,
        .domain_has_cap = intel_iommu_domain_has_cap,
 };