drm/i915: Respect GM965/GM45 bit-17-instead-of-bit-11 option for swizzling.
[safe/jmp/linux-2.6] / drivers / pci / intel-iommu.c
index 977d29b..5c8baa4 100644 (file)
  * Author: Ashok Raj <ashok.raj@intel.com>
  * Author: Shaohua Li <shaohua.li@intel.com>
  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * Author: Fenghua Yu <fenghua.yu@intel.com>
  */
 
 #include <linux/init.h>
 #include <linux/bitmap.h>
+#include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/dmar.h>
 #include <linux/dma-mapping.h>
 #include <linux/mempool.h>
-#include "iova.h"
-#include "intel-iommu.h"
-#include <asm/proto.h> /* force_iommu in this header in x86-64*/
+#include <linux/timer.h>
+#include <linux/iova.h>
+#include <linux/intel-iommu.h>
 #include <asm/cacheflush.h>
-#include <asm/gart.h>
+#include <asm/iommu.h>
 #include "pci.h"
 
+#define ROOT_SIZE              VTD_PAGE_SIZE
+#define CONTEXT_SIZE           VTD_PAGE_SIZE
+
 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
 
 
 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
 
-#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
-
 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
 
+
+static void flush_unmaps_timeout(unsigned long data);
+
+DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
+
+#define HIGH_WATER_MARK 250
+struct deferred_flush_tables {
+       int next;
+       struct iova *iova[HIGH_WATER_MARK];
+       struct dmar_domain *domain[HIGH_WATER_MARK];
+};
+
+static struct deferred_flush_tables *deferred_flush;
+
+/* bitmap for indexing intel_iommus */
+static int g_num_of_iommus;
+
+static DEFINE_SPINLOCK(async_umap_flush_lock);
+static LIST_HEAD(unmaps_to_do);
+
+static int timer_on;
+static long list_size;
+
 static void domain_remove_dev_info(struct dmar_domain *domain);
 
-static int dmar_disabled;
+int dmar_disabled;
 static int __initdata dmar_map_gfx = 1;
 static int dmar_forcedac;
+static int intel_iommu_strict;
 
 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
 static DEFINE_SPINLOCK(device_domain_lock);
@@ -74,9 +101,13 @@ static int __init intel_iommu_setup(char *str)
                        printk(KERN_INFO
                                "Intel-IOMMU: disable GFX device mapping\n");
                } else if (!strncmp(str, "forcedac", 8)) {
-                       printk (KERN_INFO
+                       printk(KERN_INFO
                                "Intel-IOMMU: Forcing DAC for PCI devices\n");
                        dmar_forcedac = 1;
+               } else if (!strncmp(str, "strict", 6)) {
+                       printk(KERN_INFO
+                               "Intel-IOMMU: disable batched IOTLB flush\n");
+                       intel_iommu_strict = 1;
                }
 
                str += strcspn(str, ",");
@@ -128,7 +159,7 @@ static inline void *alloc_domain_mem(void)
        return iommu_kmem_cache_alloc(iommu_domain_cache);
 }
 
-static inline void free_domain_mem(void *vaddr)
+static void free_domain_mem(void *vaddr)
 {
        kmem_cache_free(iommu_domain_cache, vaddr);
 }
@@ -153,13 +184,6 @@ void free_iova_mem(struct iova *iova)
        kmem_cache_free(iommu_iova_cache, iova);
 }
 
-static inline void __iommu_flush_cache(
-       struct intel_iommu *iommu, void *addr, int size)
-{
-       if (!ecap_coherent(iommu->ecap))
-               clflush_cache_range(addr, size);
-}
-
 /* Gets context entry for a given bus and devfn */
 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
                u8 bus, u8 devfn)
@@ -178,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
                        spin_unlock_irqrestore(&iommu->lock, flags);
                        return NULL;
                }
-               __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K);
+               __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
                phy_addr = virt_to_phys((void *)context);
                set_root_value(root, phy_addr);
                set_root_present(root);
@@ -324,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
                                return NULL;
                        }
                        __iommu_flush_cache(domain->iommu, tmp_page,
-                                       PAGE_SIZE_4K);
+                                       PAGE_SIZE);
                        dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
                        /*
                         * high level table always sets r/w, last level page
@@ -387,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
        start &= (((u64)1) << addr_width) - 1;
        end &= (((u64)1) << addr_width) - 1;
        /* in case it's partial page */
-       start = PAGE_ALIGN_4K(start);
-       end &= PAGE_MASK_4K;
+       start = PAGE_ALIGN(start);
+       end &= PAGE_MASK;
 
        /* we don't need lock here, nobody else touches the iova range */
        while (start < end) {
                dma_pte_clear_one(domain, start);
-               start += PAGE_SIZE_4K;
+               start += VTD_PAGE_SIZE;
        }
 }
 
@@ -447,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
        if (!root)
                return -ENOMEM;
 
-       __iommu_flush_cache(iommu, root, PAGE_SIZE_4K);
+       __iommu_flush_cache(iommu, root, ROOT_SIZE);
 
        spin_lock_irqsave(&iommu->lock, flags);
        iommu->root_entry = root;
@@ -456,19 +480,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
        return 0;
 }
 
-#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
-{\
-       unsigned long start_time = jiffies;\
-       while (1) {\
-               sts = op (iommu->reg + offset);\
-               if (cond)\
-                       break;\
-               if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))\
-                       panic("DMAR hardware is malfunctioning\n");\
-               cpu_relax();\
-       }\
-}
-
 static void iommu_set_root_entry(struct intel_iommu *iommu)
 {
        void *addr;
@@ -555,31 +566,10 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
 
        spin_unlock_irqrestore(&iommu->register_lock, flag);
 
-       /* flush context entry will implictly flush write buffer */
+       /* flush context entry will implicitly flush write buffer */
        return 0;
 }
 
-static int inline iommu_flush_context_global(struct intel_iommu *iommu,
-       int non_present_entry_flush)
-{
-       return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
-               non_present_entry_flush);
-}
-
-static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did,
-       int non_present_entry_flush)
-{
-       return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
-               non_present_entry_flush);
-}
-
-static int inline iommu_flush_context_device(struct intel_iommu *iommu,
-       u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
-{
-       return __iommu_flush_context(iommu, did, source_id, function_mask,
-               DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
-}
-
 /* return value determine if we need a write buffer flush */
 static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
        u64 addr, unsigned int size_order, u64 type,
@@ -647,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
                printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
        if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
                pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
-                       DMA_TLB_IIRG(type), DMA_TLB_IAIG(val));
-       /* flush context entry will implictly flush write buffer */
+                       (unsigned long long)DMA_TLB_IIRG(type),
+                       (unsigned long long)DMA_TLB_IAIG(val));
+       /* flush iotlb entry will implicitly flush write buffer */
        return 0;
 }
 
-static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu,
-       int non_present_entry_flush)
-{
-       return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
-               non_present_entry_flush);
-}
-
-static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did,
-       int non_present_entry_flush)
-{
-       return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
-               non_present_entry_flush);
-}
-
 static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
        u64 addr, unsigned int pages, int non_present_entry_flush)
 {
        unsigned int mask;
 
-       BUG_ON(addr & (~PAGE_MASK_4K));
+       BUG_ON(addr & (~VTD_PAGE_MASK));
        BUG_ON(pages == 0);
 
        /* Fallback to domain selective flush if no PSI support */
        if (!cap_pgsel_inv(iommu->cap))
-               return iommu_flush_iotlb_dsi(iommu, did,
-                       non_present_entry_flush);
+               return iommu->flush.flush_iotlb(iommu, did, 0, 0,
+                                               DMA_TLB_DSI_FLUSH,
+                                               non_present_entry_flush);
 
        /*
         * PSI requires page size to be 2 ^ x, and the base address is naturally
@@ -686,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
        mask = ilog2(__roundup_pow_of_two(pages));
        /* Fallback to domain selective flush if size is too big */
        if (mask > cap_max_amask_val(iommu->cap))
-               return iommu_flush_iotlb_dsi(iommu, did,
-                       non_present_entry_flush);
+               return iommu->flush.flush_iotlb(iommu, did, 0, 0,
+                       DMA_TLB_DSI_FLUSH, non_present_entry_flush);
 
-       return __iommu_flush_iotlb(iommu, did, addr, mask,
-               DMA_TLB_PSI_FLUSH, non_present_entry_flush);
+       return iommu->flush.flush_iotlb(iommu, did, addr, mask,
+                                       DMA_TLB_PSI_FLUSH,
+                                       non_present_entry_flush);
 }
 
 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -823,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
 }
 
 static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
-               u8 fault_reason, u16 source_id, u64 addr)
+               u8 fault_reason, u16 source_id, unsigned long long addr)
 {
        const char *reason;
 
@@ -958,6 +937,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
                return -ENOMEM;
        }
 
+       spin_lock_init(&iommu->lock);
+
        /*
         * if Caching mode is set, then invalid translations are tagged
         * with domainid 0. Hence we need to pre-allocate it.
@@ -967,65 +948,14 @@ static int iommu_init_domains(struct intel_iommu *iommu)
        return 0;
 }
 
-static struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd)
-{
-       struct intel_iommu *iommu;
-       int ret;
-       int map_size;
-       u32 ver;
-
-       iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-       if (!iommu)
-               return NULL;
-       iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
-       if (!iommu->reg) {
-               printk(KERN_ERR "IOMMU: can't map the region\n");
-               goto error;
-       }
-       iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
-       iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
-
-       /* the registers might be more than one page */
-       map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
-               cap_max_fault_reg_offset(iommu->cap));
-       map_size = PAGE_ALIGN_4K(map_size);
-       if (map_size > PAGE_SIZE_4K) {
-               iounmap(iommu->reg);
-               iommu->reg = ioremap(drhd->reg_base_addr, map_size);
-               if (!iommu->reg) {
-                       printk(KERN_ERR "IOMMU: can't map the region\n");
-                       goto error;
-               }
-       }
-
-       ver = readl(iommu->reg + DMAR_VER_REG);
-       pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
-               drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
-               iommu->cap, iommu->ecap);
-       ret = iommu_init_domains(iommu);
-       if (ret)
-               goto error_unmap;
-       spin_lock_init(&iommu->lock);
-       spin_lock_init(&iommu->register_lock);
-
-       drhd->iommu = iommu;
-       return iommu;
-error_unmap:
-       iounmap(iommu->reg);
-error:
-       kfree(iommu);
-       return NULL;
-}
 
 static void domain_exit(struct dmar_domain *domain);
-static void free_iommu(struct intel_iommu *iommu)
+
+void free_dmar_iommu(struct intel_iommu *iommu)
 {
        struct dmar_domain *domain;
        int i;
 
-       if (!iommu)
-               return;
-
        i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
        for (; i < cap_ndoms(iommu->cap); ) {
                domain = iommu->domains[i];
@@ -1050,10 +980,6 @@ static void free_iommu(struct intel_iommu *iommu)
 
        /* free context mapping */
        free_context_table(iommu);
-
-       if (iommu->reg)
-               iounmap(iommu->reg);
-       kfree(iommu);
 }
 
 static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
@@ -1097,6 +1023,8 @@ static void iommu_free_domain(struct dmar_domain *domain)
 }
 
 static struct iova_domain reserved_iova_list;
+static struct lock_class_key reserved_alloc_key;
+static struct lock_class_key reserved_rbtree_key;
 
 static void dmar_init_reserved_ranges(void)
 {
@@ -1107,6 +1035,11 @@ static void dmar_init_reserved_ranges(void)
 
        init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
 
+       lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
+               &reserved_alloc_key);
+       lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
+               &reserved_rbtree_key);
+
        /* IOAPIC ranges shouldn't be accessed by DMA */
        iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
                IOVA_PFN(IOAPIC_RANGE_END));
@@ -1122,9 +1055,9 @@ static void dmar_init_reserved_ranges(void)
                        if (!r->flags || !(r->flags & IORESOURCE_MEM))
                                continue;
                        addr = r->start;
-                       addr &= PAGE_MASK_4K;
+                       addr &= PAGE_MASK;
                        size = r->end - addr;
-                       size = PAGE_ALIGN_4K(size);
+                       size = PAGE_ALIGN(size);
                        iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
                                IOVA_PFN(size + addr) - 1);
                        if (!iova)
@@ -1186,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
        domain->pgd = (struct dma_pte *)alloc_pgtable_page();
        if (!domain->pgd)
                return -ENOMEM;
-       __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K);
+       __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
        return 0;
 }
 
@@ -1202,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain)
        /* destroy iovas */
        put_iova_domain(&domain->iovad);
        end = DOMAIN_MAX_ADDR(domain->gaw);
-       end = end & (~PAGE_MASK_4K);
+       end = end & (~PAGE_MASK);
 
        /* clear ptes */
        dma_pte_clear_range(domain, 0, end);
@@ -1242,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
        __iommu_flush_cache(iommu, context, sizeof(*context));
 
        /* it's a non-present to present mapping */
-       if (iommu_flush_context_device(iommu, domain->id,
-                       (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1))
+       if (iommu->flush.flush_context(iommu, domain->id,
+               (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
+               DMA_CCMD_DEVICE_INVL, 1))
                iommu_flush_write_buffer(iommu);
        else
-               iommu_flush_iotlb_dsi(iommu, 0, 0);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
+
        spin_unlock_irqrestore(&iommu->lock, flags);
        return 0;
 }
@@ -1321,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
        u64 start_pfn, end_pfn;
        struct dma_pte *pte;
        int index;
+       int addr_width = agaw_to_width(domain->agaw);
+
+       hpa &= (((u64)1) << addr_width) - 1;
 
        if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
                return -EINVAL;
-       iova &= PAGE_MASK_4K;
-       start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K;
-       end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K;
+       iova &= PAGE_MASK;
+       start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
+       end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
        index = 0;
        while (start_pfn < end_pfn) {
-               pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index);
+               pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
                if (!pte)
                        return -ENOMEM;
                /* We don't need lock here, nobody else
                 * touches the iova range
                 */
                BUG_ON(dma_pte_addr(*pte));
-               dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
+               dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
                dma_set_pte_prot(*pte, prot);
                __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
                start_pfn++;
@@ -1348,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
 static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
 {
        clear_context_table(domain->iommu, bus, devfn);
-       iommu_flush_context_global(domain->iommu, 0);
-       iommu_flush_iotlb_global(domain->iommu, 0);
+       domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
+                                          DMA_CCMD_GLOBAL_INVL, 0);
+       domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
+                                        DMA_TLB_GLOBAL_FLUSH, 0);
 }
 
 static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1379,7 +1319,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
  * find_domain
  * Note: we use struct pci_dev->dev.archdata.iommu stores the info
  */
-struct dmar_domain *
+static struct dmar_domain *
 find_domain(struct pci_dev *pdev)
 {
        struct device_domain_info *info;
@@ -1391,37 +1331,6 @@ find_domain(struct pci_dev *pdev)
        return NULL;
 }
 
-static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
-     struct pci_dev *dev)
-{
-       int index;
-
-       while (dev) {
-               for (index = 0; index < cnt; index ++)
-                       if (dev == devices[index])
-                               return 1;
-
-               /* Check our parent */
-               dev = dev->bus->self;
-       }
-
-       return 0;
-}
-
-static struct dmar_drhd_unit *
-dmar_find_matched_drhd_unit(struct pci_dev *dev)
-{
-       struct dmar_drhd_unit *drhd = NULL;
-
-       list_for_each_entry(drhd, &dmar_drhd_units, list) {
-               if (drhd->include_all || dmar_pci_device_match(drhd->devices,
-                                               drhd->devices_cnt, dev))
-                       return drhd;
-       }
-
-       return NULL;
-}
-
 /* domain is initialized */
 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
 {
@@ -1543,11 +1452,13 @@ error:
        return find_domain(pdev);
 }
 
-static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
+static int iommu_prepare_identity_map(struct pci_dev *pdev,
+                                     unsigned long long start,
+                                     unsigned long long end)
 {
        struct dmar_domain *domain;
        unsigned long size;
-       u64 base;
+       unsigned long long base;
        int ret;
 
        printk(KERN_INFO
@@ -1559,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
                return -ENOMEM;
 
        /* The address might not be aligned */
-       base = start & PAGE_MASK_4K;
+       base = start & PAGE_MASK;
        size = end - base;
-       size = PAGE_ALIGN_4K(size);
+       size = PAGE_ALIGN(size);
        if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
                        IOVA_PFN(base + size) - 1)) {
                printk(KERN_ERR "IOMMU: reserve iova failed\n");
@@ -1602,12 +1513,43 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
 }
 
 #ifdef CONFIG_DMAR_GFX_WA
-extern int arch_get_ram_range(int slot, u64 *addr, u64 *size);
+struct iommu_prepare_data {
+       struct pci_dev *pdev;
+       int ret;
+};
+
+static int __init iommu_prepare_work_fn(unsigned long start_pfn,
+                                        unsigned long end_pfn, void *datax)
+{
+       struct iommu_prepare_data *data;
+
+       data = (struct iommu_prepare_data *)datax;
+
+       data->ret = iommu_prepare_identity_map(data->pdev,
+                               start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+       return data->ret;
+
+}
+
+static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
+{
+       int nid;
+       struct iommu_prepare_data data;
+
+       data.pdev = pdev;
+       data.ret = 0;
+
+       for_each_online_node(nid) {
+               work_with_active_regions(nid, iommu_prepare_work_fn, &data);
+               if (data.ret)
+                       return data.ret;
+       }
+       return data.ret;
+}
+
 static void __init iommu_prepare_gfx_mapping(void)
 {
        struct pci_dev *pdev = NULL;
-       u64 base, size;
-       int slot;
        int ret;
 
        for_each_pci_dev(pdev) {
@@ -1616,17 +1558,9 @@ static void __init iommu_prepare_gfx_mapping(void)
                        continue;
                printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
                        pci_name(pdev));
-               slot = arch_get_ram_range(0, &base, &size);
-               while (slot >= 0) {
-                       ret = iommu_prepare_identity_map(pdev,
-                                       base, base + size);
-                       if (ret)
-                               goto error;
-                       slot = arch_get_ram_range(slot, &base, &size);
-               }
-               continue;
-error:
-               printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
+               ret = iommu_prepare_with_active_regions(pdev);
+               if (ret)
+                       printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
        }
 }
 #endif
@@ -1662,7 +1596,7 @@ int __init init_dmars(void)
        struct dmar_rmrr_unit *rmrr;
        struct pci_dev *pdev;
        struct intel_iommu *iommu;
-       int ret, unit = 0;
+       int i, ret, unit = 0;
 
        /*
         * for each drhd
@@ -1671,13 +1605,30 @@ int __init init_dmars(void)
         * endfor
         */
        for_each_drhd_unit(drhd) {
+               g_num_of_iommus++;
+               /*
+                * lock not needed as this is only incremented in the single
+                * threaded kernel __init code path all other access are read
+                * only
+                */
+       }
+
+       deferred_flush = kzalloc(g_num_of_iommus *
+               sizeof(struct deferred_flush_tables), GFP_KERNEL);
+       if (!deferred_flush) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       for_each_drhd_unit(drhd) {
                if (drhd->ignored)
                        continue;
-               iommu = alloc_iommu(drhd);
-               if (!iommu) {
-                       ret = -ENOMEM;
+
+               iommu = drhd->iommu;
+
+               ret = iommu_init_domains(iommu);
+               if (ret)
                        goto error;
-               }
 
                /*
                 * TBD:
@@ -1691,6 +1642,30 @@ int __init init_dmars(void)
                }
        }
 
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+
+               iommu = drhd->iommu;
+               if (dmar_enable_qi(iommu)) {
+                       /*
+                        * Queued Invalidate not enabled, use Register Based
+                        * Invalidate
+                        */
+                       iommu->flush.flush_context = __iommu_flush_context;
+                       iommu->flush.flush_iotlb = __iommu_flush_iotlb;
+                       printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
+                              "invalidation\n",
+                              (unsigned long long)drhd->reg_base_addr);
+               } else {
+                       iommu->flush.flush_context = qi_flush_context;
+                       iommu->flush.flush_iotlb = qi_flush_iotlb;
+                       printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
+                              "invalidation\n",
+                              (unsigned long long)drhd->reg_base_addr);
+               }
+       }
+
        /*
         * For each rmrr
         *   for each dev attached to rmrr
@@ -1706,7 +1681,6 @@ int __init init_dmars(void)
         * endfor
         */
        for_each_rmrr_units(rmrr) {
-               int i;
                for (i = 0; i < rmrr->devices_cnt; i++) {
                        pdev = rmrr->devices[i];
                        /* some BIOS lists non-exist devices in DMAR table */
@@ -1744,9 +1718,10 @@ int __init init_dmars(void)
 
                iommu_set_root_entry(iommu);
 
-               iommu_flush_context_global(iommu, 0);
-               iommu_flush_iotlb_global(iommu, 0);
-
+               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
+                                          0);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
+                                        0);
                iommu_disable_protect_mem_regions(iommu);
 
                ret = iommu_enable_translation(iommu);
@@ -1768,8 +1743,8 @@ error:
 static inline u64 aligned_size(u64 host_addr, size_t size)
 {
        u64 addr;
-       addr = (host_addr & (~PAGE_MASK_4K)) + size;
-       return PAGE_ALIGN_4K(addr);
+       addr = (host_addr & (~PAGE_MASK)) + size;
+       return PAGE_ALIGN(addr);
 }
 
 struct iova *
@@ -1783,20 +1758,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
                return NULL;
 
        piova = alloc_iova(&domain->iovad,
-                       size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1);
+                       size >> PAGE_SHIFT, IOVA_PFN(end), 1);
        return piova;
 }
 
 static struct iova *
 __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
-               size_t size)
+                  size_t size, u64 dma_mask)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct iova *iova = NULL;
 
-       if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) {
-               iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
-       } else  {
+       if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
+               iova = iommu_alloc_iova(domain, size, dma_mask);
+       else {
                /*
                 * First try to allocate an io virtual address in
                 * DMA_32BIT_MASK and if that fails then try allocating
@@ -1804,7 +1779,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
                 */
                iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
                if (!iova)
-                       iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
+                       iova = iommu_alloc_iova(domain, size, dma_mask);
        }
 
        if (!iova) {
@@ -1843,32 +1818,31 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
        return domain;
 }
 
-static dma_addr_t intel_map_single(struct device *hwdev, void *addr,
-       size_t size, int dir)
+static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
+                                    size_t size, int dir, u64 dma_mask)
 {
        struct pci_dev *pdev = to_pci_dev(hwdev);
-       int ret;
        struct dmar_domain *domain;
-       unsigned long start_addr;
+       phys_addr_t start_paddr;
        struct iova *iova;
        int prot = 0;
+       int ret;
 
        BUG_ON(dir == DMA_NONE);
        if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
-               return virt_to_bus(addr);
+               return paddr;
 
        domain = get_valid_domain_for_dev(pdev);
        if (!domain)
                return 0;
 
-       addr = (void *)virt_to_phys(addr);
-       size = aligned_size((u64)addr, size);
+       size = aligned_size((u64)paddr, size);
 
-       iova = __intel_alloc_iova(hwdev, domain, size);
+       iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
        if (!iova)
                goto error;
 
-       start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+       start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
 
        /*
         * Check if DMAR supports zero-length reads on write only
@@ -1880,38 +1854,99 @@ static dma_addr_t intel_map_single(struct device *hwdev, void *addr,
        if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
                prot |= DMA_PTE_WRITE;
        /*
-        * addr - (addr + size) might be partial page, we should map the whole
+        * paddr - (paddr + size) might be partial page, we should map the whole
         * page.  Note: if two part of one page are separately mapped, we
-        * might have two guest_addr mapping to the same host addr, but this
+        * might have two guest_addr mapping to the same host paddr, but this
         * is not a big problem
         */
-       ret = domain_page_mapping(domain, start_addr,
-               ((u64)addr) & PAGE_MASK_4K, size, prot);
+       ret = domain_page_mapping(domain, start_paddr,
+               ((u64)paddr) & PAGE_MASK, size, prot);
        if (ret)
                goto error;
 
-       pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
-               pci_name(pdev), size, (u64)addr,
-               size, (u64)start_addr, dir);
-
        /* it's a non-present to present mapping */
        ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
-                       start_addr, size >> PAGE_SHIFT_4K, 1);
+                       start_paddr, size >> VTD_PAGE_SHIFT, 1);
        if (ret)
                iommu_flush_write_buffer(domain->iommu);
 
-       return (start_addr + ((u64)addr & (~PAGE_MASK_4K)));
+       return start_paddr + ((u64)paddr & (~PAGE_MASK));
 
 error:
        if (iova)
                __free_iova(&domain->iovad, iova);
        printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
-               pci_name(pdev), size, (u64)addr, dir);
+               pci_name(pdev), size, (unsigned long long)paddr, dir);
        return 0;
 }
 
-static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
-       size_t size, int dir)
+dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
+                           size_t size, int dir)
+{
+       return __intel_map_single(hwdev, paddr, size, dir,
+                                 to_pci_dev(hwdev)->dma_mask);
+}
+
+static void flush_unmaps(void)
+{
+       int i, j;
+
+       timer_on = 0;
+
+       /* just flush them all */
+       for (i = 0; i < g_num_of_iommus; i++) {
+               if (deferred_flush[i].next) {
+                       struct intel_iommu *iommu =
+                               deferred_flush[i].domain[0]->iommu;
+
+                       iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+                                                DMA_TLB_GLOBAL_FLUSH, 0);
+                       for (j = 0; j < deferred_flush[i].next; j++) {
+                               __free_iova(&deferred_flush[i].domain[j]->iovad,
+                                               deferred_flush[i].iova[j]);
+                       }
+                       deferred_flush[i].next = 0;
+               }
+       }
+
+       list_size = 0;
+}
+
+static void flush_unmaps_timeout(unsigned long data)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&async_umap_flush_lock, flags);
+       flush_unmaps();
+       spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+}
+
+static void add_unmap(struct dmar_domain *dom, struct iova *iova)
+{
+       unsigned long flags;
+       int next, iommu_id;
+
+       spin_lock_irqsave(&async_umap_flush_lock, flags);
+       if (list_size == HIGH_WATER_MARK)
+               flush_unmaps();
+
+       iommu_id = dom->iommu->seq_id;
+
+       next = deferred_flush[iommu_id].next;
+       deferred_flush[iommu_id].domain[next] = dom;
+       deferred_flush[iommu_id].iova[next] = iova;
+       deferred_flush[iommu_id].next++;
+
+       if (!timer_on) {
+               mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
+               timer_on = 1;
+       }
+       list_size++;
+       spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+}
+
+void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
+                       int dir)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct dmar_domain *domain;
@@ -1927,32 +1962,38 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
        if (!iova)
                return;
 
-       start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+       start_addr = iova->pfn_lo << PAGE_SHIFT;
        size = aligned_size((u64)dev_addr, size);
 
        pr_debug("Device %s unmapping: %lx@%llx\n",
-               pci_name(pdev), size, (u64)start_addr);
+               pci_name(pdev), size, (unsigned long long)start_addr);
 
        /*  clear the whole page */
        dma_pte_clear_range(domain, start_addr, start_addr + size);
        /* free page tables */
        dma_pte_free_pagetable(domain, start_addr, start_addr + size);
-
-       if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
-                       size >> PAGE_SHIFT_4K, 0))
-               iommu_flush_write_buffer(domain->iommu);
-
-       /* free iova */
-       __free_iova(&domain->iovad, iova);
+       if (intel_iommu_strict) {
+               if (iommu_flush_iotlb_psi(domain->iommu,
+                       domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
+                       iommu_flush_write_buffer(domain->iommu);
+               /* free iova */
+               __free_iova(&domain->iovad, iova);
+       } else {
+               add_unmap(domain, iova);
+               /*
+                * queue up the release of the unmap to save the 1/6th of the
+                * cpu used up by the iotlb flush operation...
+                */
+       }
 }
 
-static void * intel_alloc_coherent(struct device *hwdev, size_t size,
-                      dma_addr_t *dma_handle, gfp_t flags)
+void *intel_alloc_coherent(struct device *hwdev, size_t size,
+                          dma_addr_t *dma_handle, gfp_t flags)
 {
        void *vaddr;
        int order;
 
-       size = PAGE_ALIGN_4K(size);
+       size = PAGE_ALIGN(size);
        order = get_order(size);
        flags &= ~(GFP_DMA | GFP_DMA32);
 
@@ -1961,19 +2002,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size,
                return NULL;
        memset(vaddr, 0, size);
 
-       *dma_handle = intel_map_single(hwdev, vaddr, size, DMA_BIDIRECTIONAL);
+       *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
+                                        DMA_BIDIRECTIONAL,
+                                        hwdev->coherent_dma_mask);
        if (*dma_handle)
                return vaddr;
        free_pages((unsigned long)vaddr, order);
        return NULL;
 }
 
-static void intel_free_coherent(struct device *hwdev, size_t size,
-       void *vaddr, dma_addr_t dma_handle)
+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+                        dma_addr_t dma_handle)
 {
        int order;
 
-       size = PAGE_ALIGN_4K(size);
+       size = PAGE_ALIGN(size);
        order = get_order(size);
 
        intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
@@ -1981,8 +2024,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
 }
 
 #define SG_ENT_VIRT_ADDRESS(sg)        (sg_virt((sg)))
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
-       int nelems, int dir)
+
+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
+                   int nelems, int dir)
 {
        int i;
        struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2006,7 +2050,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
                size += aligned_size((u64)addr, sg->length);
        }
 
-       start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+       start_addr = iova->pfn_lo << PAGE_SHIFT;
 
        /*  clear the whole page */
        dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -2014,7 +2058,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
        dma_pte_free_pagetable(domain, start_addr, start_addr + size);
 
        if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
-                       size >> PAGE_SHIFT_4K, 0))
+                       size >> VTD_PAGE_SHIFT, 0))
                iommu_flush_write_buffer(domain->iommu);
 
        /* free iova */
@@ -2035,8 +2079,8 @@ static int intel_nontranslate_map_sg(struct device *hddev,
        return nelems;
 }
 
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
-                               int nelems, int dir)
+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
+                int dir)
 {
        void *addr;
        int i;
@@ -2064,7 +2108,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
                size += aligned_size((u64)addr, sg->length);
        }
 
-       iova = __intel_alloc_iova(hwdev, domain, size);
+       iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
        if (!iova) {
                sglist->dma_length = 0;
                return 0;
@@ -2080,14 +2124,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
        if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
                prot |= DMA_PTE_WRITE;
 
-       start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+       start_addr = iova->pfn_lo << PAGE_SHIFT;
        offset = 0;
        for_each_sg(sglist, sg, nelems, i) {
                addr = SG_ENT_VIRT_ADDRESS(sg);
                addr = (void *)virt_to_phys(addr);
                size = aligned_size((u64)addr, sg->length);
                ret = domain_page_mapping(domain, start_addr + offset,
-                       ((u64)addr) & PAGE_MASK_4K,
+                       ((u64)addr) & PAGE_MASK,
                        size, prot);
                if (ret) {
                        /*  clear the page */
@@ -2101,14 +2145,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
                        return 0;
                }
                sg->dma_address = start_addr + offset +
-                               ((u64)addr & (~PAGE_MASK_4K));
+                               ((u64)addr & (~PAGE_MASK));
                sg->dma_length = sg->length;
                offset += size;
        }
 
        /* it's a non-present to present mapping */
        if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
-                       start_addr, offset >> PAGE_SHIFT_4K, 1))
+                       start_addr, offset >> VTD_PAGE_SHIFT, 1))
                iommu_flush_write_buffer(domain->iommu);
        return nelems;
 }
@@ -2148,7 +2192,6 @@ static inline int iommu_devinfo_cache_init(void)
                                         sizeof(struct device_domain_info),
                                         0,
                                         SLAB_HWCACHE_ALIGN,
-
                                         NULL);
        if (!iommu_devinfo_cache) {
                printk(KERN_ERR "Couldn't create devinfo cache\n");
@@ -2166,7 +2209,6 @@ static inline int iommu_iova_cache_init(void)
                                         sizeof(struct iova),
                                         0,
                                         SLAB_HWCACHE_ALIGN,
-
                                         NULL);
        if (!iommu_iova_cache) {
                printk(KERN_ERR "Couldn't create iova cache\n");
@@ -2206,15 +2248,6 @@ static void __init iommu_exit_mempool(void)
 
 }
 
-void __init detect_intel_iommu(void)
-{
-       if (swiotlb || no_iommu || iommu_detected || dmar_disabled)
-               return;
-       if (early_dmar_detect()) {
-               iommu_detected = 1;
-       }
-}
-
 static void __init init_no_remapping_devices(void)
 {
        struct dmar_drhd_unit *drhd;
@@ -2261,12 +2294,19 @@ int __init intel_iommu_init(void)
 {
        int ret = 0;
 
-       if (no_iommu || swiotlb || dmar_disabled)
-               return -ENODEV;
-
        if (dmar_table_init())
                return  -ENODEV;
 
+       if (dmar_dev_scope_init())
+               return  -ENODEV;
+
+       /*
+        * Check the need for DMA-remapping initialization now.
+        * Above initialization will also be used by Interrupt-remapping.
+        */
+       if (no_iommu || swiotlb || dmar_disabled)
+               return -ENODEV;
+
        iommu_init_mempool();
        dmar_init_reserved_ranges();
 
@@ -2282,8 +2322,117 @@ int __init intel_iommu_init(void)
        printk(KERN_INFO
        "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
 
+       init_timer(&unmap_timer);
        force_iommu = 1;
        dma_ops = &intel_dma_ops;
        return 0;
 }
 
+void intel_iommu_domain_exit(struct dmar_domain *domain)
+{
+       u64 end;
+
+       /* Domain 0 is reserved, so dont process it */
+       if (!domain)
+               return;
+
+       end = DOMAIN_MAX_ADDR(domain->gaw);
+       end = end & (~VTD_PAGE_MASK);
+
+       /* clear ptes */
+       dma_pte_clear_range(domain, 0, end);
+
+       /* free page tables */
+       dma_pte_free_pagetable(domain, 0, end);
+
+       iommu_free_domain(domain);
+       free_domain_mem(domain);
+}
+EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
+
+struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
+{
+       struct dmar_drhd_unit *drhd;
+       struct dmar_domain *domain;
+       struct intel_iommu *iommu;
+
+       drhd = dmar_find_matched_drhd_unit(pdev);
+       if (!drhd) {
+               printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
+               return NULL;
+       }
+
+       iommu = drhd->iommu;
+       if (!iommu) {
+               printk(KERN_ERR
+                       "intel_iommu_domain_alloc: iommu == NULL\n");
+               return NULL;
+       }
+       domain = iommu_alloc_domain(iommu);
+       if (!domain) {
+               printk(KERN_ERR
+                       "intel_iommu_domain_alloc: domain == NULL\n");
+               return NULL;
+       }
+       if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+               printk(KERN_ERR
+                       "intel_iommu_domain_alloc: domain_init() failed\n");
+               intel_iommu_domain_exit(domain);
+               return NULL;
+       }
+       return domain;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
+
+int intel_iommu_context_mapping(
+       struct dmar_domain *domain, struct pci_dev *pdev)
+{
+       int rc;
+       rc = domain_context_mapping(domain, pdev);
+       return rc;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
+
+int intel_iommu_page_mapping(
+       struct dmar_domain *domain, dma_addr_t iova,
+       u64 hpa, size_t size, int prot)
+{
+       int rc;
+       rc = domain_page_mapping(domain, iova, hpa, size, prot);
+       return rc;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
+
+void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+{
+       detach_domain_for_dev(domain, bus, devfn);
+}
+EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
+
+struct dmar_domain *
+intel_iommu_find_domain(struct pci_dev *pdev)
+{
+       return find_domain(pdev);
+}
+EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
+
+int intel_iommu_found(void)
+{
+       return g_num_of_iommus;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_found);
+
+u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
+{
+       struct dma_pte *pte;
+       u64 pfn;
+
+       pfn = 0;
+       pte = addr_to_dma_pte(domain, iova);
+
+       if (pte)
+               pfn = dma_pte_addr(*pte);
+
+       return pfn >> VTD_PAGE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);