Merge git://git.infradead.org/iommu-2.6
[safe/jmp/linux-2.6] / arch / x86 / kernel / amd_iommu.c
index a6a6f8e..a8fd9eb 100644 (file)
 
 static DEFINE_RWLOCK(amd_iommu_devtable_lock);
 
+/* A list of preallocated protection domains */
+static LIST_HEAD(iommu_pd_list);
+static DEFINE_SPINLOCK(iommu_pd_list_lock);
+
 /*
  * general struct to manage commands send to an IOMMU
  */
@@ -197,10 +201,10 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  */
 static int iommu_completion_wait(struct amd_iommu *iommu)
 {
-       int ret, ready = 0;
+       int ret = 0, ready = 0;
        unsigned status = 0;
        struct iommu_cmd cmd;
-       unsigned long i = 0;
+       unsigned long flags, i = 0;
 
        memset(&cmd, 0, sizeof(cmd));
        cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
@@ -208,10 +212,12 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
 
        iommu->need_sync = 0;
 
-       ret = iommu_queue_command(iommu, &cmd);
+       spin_lock_irqsave(&iommu->lock, flags);
+
+       ret = __iommu_queue_command(iommu, &cmd);
 
        if (ret)
-               return ret;
+               goto out;
 
        while (!ready && (i < EXIT_LOOP_COUNT)) {
                ++i;
@@ -226,6 +232,8 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
 
        if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
                printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
+out:
+       spin_unlock_irqrestore(&iommu->lock, flags);
 
        return 0;
 }
@@ -236,6 +244,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
 static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
 {
        struct iommu_cmd cmd;
+       int ret;
 
        BUG_ON(iommu == NULL);
 
@@ -243,9 +252,11 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
        CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
        cmd.data[0] = devid;
 
+       ret = iommu_queue_command(iommu, &cmd);
+
        iommu->need_sync = 1;
 
-       return iommu_queue_command(iommu, &cmd);
+       return ret;
 }
 
 /*
@@ -255,6 +266,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
                u64 address, u16 domid, int pde, int s)
 {
        struct iommu_cmd cmd;
+       int ret;
 
        memset(&cmd, 0, sizeof(cmd));
        address &= PAGE_MASK;
@@ -267,9 +279,11 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
        if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
                cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 
+       ret = iommu_queue_command(iommu, &cmd);
+
        iommu->need_sync = 1;
 
-       return iommu_queue_command(iommu, &cmd);
+       return ret;
 }
 
 /*
@@ -281,7 +295,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
                u64 address, size_t size)
 {
        int s = 0;
-       unsigned pages = iommu_num_pages(address, size);
+       unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
 
        address &= PAGE_MASK;
 
@@ -466,11 +480,6 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
  * efficient allocator.
  *
  ****************************************************************************/
-static unsigned long dma_mask_to_pages(unsigned long mask)
-{
-       return (mask >> PAGE_SHIFT) +
-               (PAGE_ALIGN(mask & ~PAGE_MASK) >> PAGE_SHIFT);
-}
 
 /*
  * The address allocator core function.
@@ -480,16 +489,17 @@ static unsigned long dma_mask_to_pages(unsigned long mask)
 static unsigned long dma_ops_alloc_addresses(struct device *dev,
                                             struct dma_ops_domain *dom,
                                             unsigned int pages,
-                                            unsigned long align_mask)
+                                            unsigned long align_mask,
+                                            u64 dma_mask)
 {
-       unsigned long limit = dma_mask_to_pages(*dev->dma_mask);
+       unsigned long limit;
        unsigned long address;
-       unsigned long size = dom->aperture_size >> PAGE_SHIFT;
        unsigned long boundary_size;
 
        boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
                        PAGE_SIZE) >> PAGE_SHIFT;
-       limit = limit < size ? limit : size;
+       limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
+                                      dma_mask >> PAGE_SHIFT);
 
        if (dom->next_bit >= limit) {
                dom->next_bit = 0;
@@ -568,7 +578,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
        if (start_page + pages > last_page)
                pages = last_page - start_page;
 
-       set_bit_string(dom->bitmap, start_page, pages);
+       iommu_area_reserve(dom->bitmap, start_page, pages);
 }
 
 static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
@@ -663,13 +673,15 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
        dma_dom->next_bit = 0;
 
        dma_dom->need_flush = false;
+       dma_dom->target_dev = 0xffff;
 
        /* Intialize the exclusion range if necessary */
        if (iommu->exclusion_start &&
            iommu->exclusion_start < dma_dom->aperture_size) {
                unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
                int pages = iommu_num_pages(iommu->exclusion_start,
-                                           iommu->exclusion_length);
+                                           iommu->exclusion_length,
+                                           PAGE_SIZE);
                dma_ops_reserve_addresses(dma_dom, startpage, pages);
        }
 
@@ -734,12 +746,13 @@ static void set_device_domain(struct amd_iommu *iommu,
 
        u64 pte_root = virt_to_phys(domain->pt_root);
 
-       pte_root |= (domain->mode & 0x07) << 9;
-       pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | 2;
+       pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
+                   << DEV_ENTRY_MODE_SHIFT;
+       pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
 
        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
-       amd_iommu_dev_table[devid].data[0] = pte_root;
-       amd_iommu_dev_table[devid].data[1] = pte_root >> 32;
+       amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
+       amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
        amd_iommu_dev_table[devid].data[2] = domain->id;
 
        amd_iommu_pd_table[devid] = domain;
@@ -769,6 +782,33 @@ static bool check_device(struct device *dev)
 }
 
 /*
+ * In this function the list of preallocated protection domains is traversed to
+ * find the domain for a specific device
+ */
+static struct dma_ops_domain *find_protection_domain(u16 devid)
+{
+       struct dma_ops_domain *entry, *ret = NULL;
+       unsigned long flags;
+
+       if (list_empty(&iommu_pd_list))
+               return NULL;
+
+       spin_lock_irqsave(&iommu_pd_list_lock, flags);
+
+       list_for_each_entry(entry, &iommu_pd_list, list) {
+               if (entry->target_dev == devid) {
+                       ret = entry;
+                       list_del(&ret->list);
+                       break;
+               }
+       }
+
+       spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+
+       return ret;
+}
+
+/*
  * In the dma_ops path we only have the struct device. This function
  * finds the corresponding IOMMU, the protection domain and the
  * requestor id for a given device.
@@ -803,9 +843,11 @@ static int get_device_resources(struct device *dev,
        *iommu = amd_iommu_rlookup_table[*bdf];
        if (*iommu == NULL)
                return 0;
-       dma_dom = (*iommu)->default_dom;
        *domain = domain_for_device(*bdf);
        if (*domain == NULL) {
+               dma_dom = find_protection_domain(*bdf);
+               if (!dma_dom)
+                       dma_dom = (*iommu)->default_dom;
                *domain = &dma_dom->domain;
                set_device_domain(*iommu, *domain, *bdf);
                printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
@@ -885,7 +927,8 @@ static dma_addr_t __map_single(struct device *dev,
                               phys_addr_t paddr,
                               size_t size,
                               int dir,
-                              bool align)
+                              bool align,
+                              u64 dma_mask)
 {
        dma_addr_t offset = paddr & ~PAGE_MASK;
        dma_addr_t address, start;
@@ -893,13 +936,14 @@ static dma_addr_t __map_single(struct device *dev,
        unsigned long align_mask = 0;
        int i;
 
-       pages = iommu_num_pages(paddr, size);
+       pages = iommu_num_pages(paddr, size, PAGE_SIZE);
        paddr &= PAGE_MASK;
 
        if (align)
                align_mask = (1UL << get_order(size)) - 1;
 
-       address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask);
+       address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
+                                         dma_mask);
        if (unlikely(address == bad_dma_address))
                goto out;
 
@@ -911,7 +955,7 @@ static dma_addr_t __map_single(struct device *dev,
        }
        address += offset;
 
-       if (unlikely(dma_dom->need_flush && !iommu_fullflush)) {
+       if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
                iommu_flush_tlb(iommu, dma_dom->domain.id);
                dma_dom->need_flush = false;
        } else if (unlikely(iommu_has_npcache(iommu)))
@@ -937,7 +981,7 @@ static void __unmap_single(struct amd_iommu *iommu,
        if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
                return;
 
-       pages = iommu_num_pages(dma_addr, size);
+       pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
        dma_addr &= PAGE_MASK;
        start = dma_addr;
 
@@ -948,7 +992,7 @@ static void __unmap_single(struct amd_iommu *iommu,
 
        dma_ops_free_addresses(dma_dom, dma_addr, pages);
 
-       if (iommu_fullflush)
+       if (amd_iommu_unmap_flush)
                iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
 }
 
@@ -963,10 +1007,13 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
        struct protection_domain *domain;
        u16 devid;
        dma_addr_t addr;
+       u64 dma_mask;
 
        if (!check_device(dev))
                return bad_dma_address;
 
+       dma_mask = *dev->dma_mask;
+
        get_device_resources(dev, &iommu, &domain, &devid);
 
        if (iommu == NULL || domain == NULL)
@@ -974,7 +1021,8 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
                return (dma_addr_t)paddr;
 
        spin_lock_irqsave(&domain->lock, flags);
-       addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false);
+       addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
+                           dma_mask);
        if (addr == bad_dma_address)
                goto out;
 
@@ -1046,10 +1094,13 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
        struct scatterlist *s;
        phys_addr_t paddr;
        int mapped_elems = 0;
+       u64 dma_mask;
 
        if (!check_device(dev))
                return 0;
 
+       dma_mask = *dev->dma_mask;
+
        get_device_resources(dev, &iommu, &domain, &devid);
 
        if (!iommu || !domain)
@@ -1061,7 +1112,8 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
                paddr = sg_phys(s);
 
                s->dma_address = __map_single(dev, iommu, domain->priv,
-                                             paddr, s->length, dir, false);
+                                             paddr, s->length, dir, false,
+                                             dma_mask);
 
                if (s->dma_address) {
                        s->dma_length = s->length;
@@ -1134,28 +1186,33 @@ static void *alloc_coherent(struct device *dev, size_t size,
        struct protection_domain *domain;
        u16 devid;
        phys_addr_t paddr;
+       u64 dma_mask = dev->coherent_dma_mask;
 
        if (!check_device(dev))
                return NULL;
 
+       if (!get_device_resources(dev, &iommu, &domain, &devid))
+               flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+
+       flag |= __GFP_ZERO;
        virt_addr = (void *)__get_free_pages(flag, get_order(size));
        if (!virt_addr)
                return 0;
 
-       memset(virt_addr, 0, size);
        paddr = virt_to_phys(virt_addr);
 
-       get_device_resources(dev, &iommu, &domain, &devid);
-
        if (!iommu || !domain) {
                *dma_addr = (dma_addr_t)paddr;
                return virt_addr;
        }
 
+       if (!dma_mask)
+               dma_mask = *dev->dma_mask;
+
        spin_lock_irqsave(&domain->lock, flags);
 
        *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
-                                size, DMA_BIDIRECTIONAL, true);
+                                size, DMA_BIDIRECTIONAL, true, dma_mask);
 
        if (*dma_addr == bad_dma_address) {
                free_pages((unsigned long)virt_addr, get_order(size));
@@ -1257,10 +1314,9 @@ void prealloc_protection_domains(void)
                if (!dma_dom)
                        continue;
                init_unity_mappings_for_device(dma_dom, devid);
-               set_device_domain(iommu, &dma_dom->domain, devid);
-               printk(KERN_INFO "AMD IOMMU: Allocated domain %d for device ",
-                      dma_dom->domain.id);
-               print_devid(devid, 1);
+               dma_dom->target_dev = devid;
+
+               list_add_tail(&dma_dom->list, &iommu_pd_list);
        }
 }