#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <linux/sysdev.h>
+#include <linux/tboot.h>
+#include <linux/dmi.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
#include "pci.h"
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
+#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
#define IOAPIC_RANGE_START (0xfee00000)
#define IOAPIC_RANGE_END (0xfeefffff)
#define MAX_AGAW_WIDTH 64
-#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
-#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
+#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
+#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
+
+/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
+ to match. That way, we can use 'unsigned long' for PFNs with impunity. */
+#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
+ __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
+#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
+static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
/*
* 2. It maps to each iommu if successful.
* 3. Each iommu mapps to this domain if successful.
*/
-struct dmar_domain *si_domain;
+static struct dmar_domain *si_domain;
+static int hw_pass_through = 1;
/* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
struct dmar_domain {
int id; /* domain id */
+ int nid; /* node id */
unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */
int segment; /* PCI domain */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
- struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+ struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
};
static struct kmem_cache *iommu_devinfo_cache;
static struct kmem_cache *iommu_iova_cache;
-static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
-{
- unsigned int flags;
- void *vaddr;
-
- /* trying to avoid low memory issues */
- flags = current->flags & PF_MEMALLOC;
- current->flags |= PF_MEMALLOC;
- vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
- current->flags &= (~PF_MEMALLOC | flags);
- return vaddr;
-}
-
-
-static inline void *alloc_pgtable_page(void)
+static inline void *alloc_pgtable_page(int node)
{
- unsigned int flags;
- void *vaddr;
+ struct page *page;
+ void *vaddr = NULL;
- /* trying to avoid low memory issues */
- flags = current->flags & PF_MEMALLOC;
- current->flags |= PF_MEMALLOC;
- vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
- current->flags &= (~PF_MEMALLOC | flags);
+ page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
+ if (page)
+ vaddr = page_address(page);
return vaddr;
}
static inline void *alloc_domain_mem(void)
{
- return iommu_kmem_cache_alloc(iommu_domain_cache);
+ return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
}
static void free_domain_mem(void *vaddr)
static inline void * alloc_devinfo_mem(void)
{
- return iommu_kmem_cache_alloc(iommu_devinfo_cache);
+ return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
}
static inline void free_devinfo_mem(void *vaddr)
struct iova *alloc_iova_mem(void)
{
- return iommu_kmem_cache_alloc(iommu_iova_cache);
+ return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
}
void free_iova_mem(struct iova *iova)
root = &iommu->root_entry[bus];
context = get_context_addr_from_root(root);
if (!context) {
- context = (struct context_entry *)alloc_pgtable_page();
+ context = (struct context_entry *)
+ alloc_pgtable_page(iommu->node);
if (!context) {
spin_unlock_irqrestore(&iommu->lock, flags);
return NULL;
if (!dma_pte_present(pte)) {
uint64_t pteval;
- tmp_page = alloc_pgtable_page();
+ tmp_page = alloc_pgtable_page(domain->nid);
if (!tmp_page)
return NULL;
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (cmpxchg64(&pte->val, 0ULL, pteval)) {
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
/* we don't need lock here; nobody else touches the iova range */
- while (start_pfn <= last_pfn) {
+ do {
first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
if (!pte) {
start_pfn = align_to_level(start_pfn + 1, 2);
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
- }
+
+ } while (start_pfn && start_pfn <= last_pfn);
}
/* free page table pages. last level pte should already be cleared */
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
/* We don't need lock here; nobody else touches the iova range */
level = 2;
if (tmp + level_size(level) - 1 > last_pfn)
return;
- while (tmp + level_size(level) - 1 <= last_pfn) {
+ do {
first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
if (!pte) {
tmp = align_to_level(tmp + 1, level + 1);
continue;
}
do {
- free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
- dma_clear_pte(pte);
+ if (dma_pte_present(pte)) {
+ free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
+ dma_clear_pte(pte);
+ }
pte++;
tmp += level_size(level);
} while (!first_pte_in_page(pte) &&
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
- }
+ } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
level++;
}
/* free pgd */
struct root_entry *root;
unsigned long flags;
- root = (struct root_entry *)alloc_pgtable_page();
+ root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root)
return -ENOMEM;
pr_debug("Number of Domains supportd <%ld>\n", ndomains);
nlongs = BITS_TO_LONGS(ndomains);
+ spin_lock_init(&iommu->lock);
+
/* TBD: there might be 64K domains,
* consider other allocation for future chip
*/
GFP_KERNEL);
if (!iommu->domains) {
printk(KERN_ERR "Allocating domain array failed\n");
- kfree(iommu->domain_ids);
return -ENOMEM;
}
- spin_lock_init(&iommu->lock);
-
/*
* if Caching mode is set, then invalid translations are tagged
* with domainid 0. Hence we need to pre-allocate it.
int i;
unsigned long flags;
- i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
- for (; i < cap_ndoms(iommu->cap); ) {
- domain = iommu->domains[i];
- clear_bit(i, iommu->domain_ids);
+ if ((iommu->domains) && (iommu->domain_ids)) {
+ i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
+ for (; i < cap_ndoms(iommu->cap); ) {
+ domain = iommu->domains[i];
+ clear_bit(i, iommu->domain_ids);
+
+ spin_lock_irqsave(&domain->iommu_lock, flags);
+ if (--domain->iommu_count == 0) {
+ if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+ vm_domain_exit(domain);
+ else
+ domain_exit(domain);
+ }
+ spin_unlock_irqrestore(&domain->iommu_lock, flags);
- spin_lock_irqsave(&domain->iommu_lock, flags);
- if (--domain->iommu_count == 0) {
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- vm_domain_exit(domain);
- else
- domain_exit(domain);
+ i = find_next_bit(iommu->domain_ids,
+ cap_ndoms(iommu->cap), i+1);
}
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
-
- i = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), i+1);
}
if (iommu->gcmd & DMA_GCMD_TE)
if (!domain)
return NULL;
+ domain->nid = -1;
memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
domain->flags = 0;
}
static struct iova_domain reserved_iova_list;
-static struct lock_class_key reserved_alloc_key;
static struct lock_class_key reserved_rbtree_key;
static void dmar_init_reserved_ranges(void)
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
- lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
- &reserved_alloc_key);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
domain->iommu_snooping = 0;
domain->iommu_count = 1;
+ domain->nid = iommu->node;
/* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
if (!domain->pgd)
return -ENOMEM;
__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
}
set_bit(num, iommu->domain_ids);
- set_bit(iommu->seq_id, &domain->iommu_bmp);
iommu->domains[num] = domain;
id = num;
}
/* Skip top levels of page tables for
* iommu which has less agaw than default.
+ * Unnecessary for PT mode.
*/
- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd)) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return -ENOMEM;
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
+ for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd)) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return -ENOMEM;
+ }
}
}
}
spin_lock_irqsave(&domain->iommu_lock, flags);
if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
domain->iommu_count++;
+ if (domain->iommu_count == 1)
+ domain->nid = iommu->node;
domain_update_iommu_cap(domain);
}
spin_unlock_irqrestore(&domain->iommu_lock, flags);
return ret;
parent = parent->bus->self;
}
- if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
+ if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->subordinate),
tmp->subordinate->number, 0,
return ret;
parent = parent->bus->self;
}
- if (tmp->is_pcie)
+ if (pci_is_pcie(tmp))
return device_context_mapped(iommu, tmp->subordinate->number,
0);
else
tmp->devfn);
}
+/* Returns a number of VTD pages, but aligned to MM page size */
+static inline unsigned long aligned_nrpages(unsigned long host_addr,
+ size_t size)
+{
+ host_addr &= ~PAGE_MASK;
+ return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
+}
+
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
uint64_t tmp;
if (!sg_res) {
- sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
+ sg_res = aligned_nrpages(sg->offset, sg->length);
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length;
pteval = page_to_phys(sg_page(sg)) | prot;
dev_tmp = pci_find_upstream_pcie_bridge(pdev);
if (dev_tmp) {
- if (dev_tmp->is_pcie) {
+ if (pci_is_pcie(dev_tmp)) {
bus = dev_tmp->subordinate->number;
devfn = 0;
} else {
}
static int iommu_identity_mapping;
+#define IDENTMAP_ALL 1
+#define IDENTMAP_GFX 2
+#define IDENTMAP_AZALIA 4
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long long start,
struct dmar_domain *domain;
int ret;
+ domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ if (!domain)
+ return -ENOMEM;
+
+ /* For _hardware_ passthrough, don't bother. But for software
+ passthrough, we do it anyway -- it may indicate a memory
+ range which is reserved in E820, so which didn't get set
+ up to start with in si_domain */
+ if (domain == si_domain && hw_pass_through) {
+ printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
+ pci_name(pdev), start, end);
+ return 0;
+ }
+
printk(KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end);
+
+ if (end < start) {
+ WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ ret = -EIO;
+ goto error;
+ }
- domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- return -ENOMEM;
+ if (end >> agaw_to_width(domain->agaw)) {
+ WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ agaw_to_width(domain->agaw),
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ ret = -EIO;
+ goto error;
+ }
ret = iommu_domain_identity_map(domain, start, end);
if (ret)
}
#endif /* !CONFIG_DMAR_FLPY_WA */
-/* Initialize each context entry as pass through.*/
-static int __init init_context_pass_through(void)
-{
- struct pci_dev *pdev = NULL;
- struct dmar_domain *domain;
- int ret;
-
- for_each_pci_dev(pdev) {
- domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- ret = domain_context_mapping(domain, pdev,
- CONTEXT_TT_PASS_THROUGH);
- if (ret)
- return ret;
- }
- return 0;
-}
-
static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_work_fn(unsigned long start_pfn,
}
-static int si_domain_init(void)
+static int __init si_domain_init(int hw)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+ if (hw)
+ return 0;
+
for_each_online_node(nid) {
work_with_active_regions(nid, si_domain_work_fn, &ret);
if (ret)
}
static int domain_add_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev)
+ struct pci_dev *pdev,
+ int translation)
{
struct device_domain_info *info;
unsigned long flags;
+ int ret;
info = alloc_devinfo_mem();
if (!info)
return -ENOMEM;
+ ret = domain_context_mapping(domain, pdev, translation);
+ if (ret) {
+ free_devinfo_mem(info);
+ return ret;
+ }
+
info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
return 0;
}
-static int iommu_prepare_static_identity_mapping(void)
+static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+{
+ if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+ return 1;
+
+ if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
+ return 1;
+
+ if (!(iommu_identity_mapping & IDENTMAP_ALL))
+ return 0;
+
+ /*
+ * We want to start off with all devices in the 1:1 domain, and
+ * take them out later if we find they can't access all of memory.
+ *
+ * However, we can't do this for PCI devices behind bridges,
+ * because all PCI devices behind the same bridge will end up
+ * with the same source-id on their transactions.
+ *
+ * Practically speaking, we can't change things around for these
+ * devices at run-time, because we can't be sure there'll be no
+ * DMA transactions in flight for any of their siblings.
+ *
+ * So PCI devices (unless they're on the root bus) as well as
+ * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
+ * the 1:1 domain, just in _case_ one of their siblings turns out
+ * not to be able to map all of memory.
+ */
+ if (!pci_is_pcie(pdev)) {
+ if (!pci_is_root_bus(pdev->bus))
+ return 0;
+ if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+ return 0;
+ } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ /*
+ * At boot time, we don't yet know if devices will be 64-bit capable.
+ * Assume that they will -- if they turn out not to be, then we can
+ * take them out of the 1:1 domain later.
+ */
+ if (!startup)
+ return pdev->dma_mask > DMA_BIT_MASK(32);
+
+ return 1;
+}
+
+static int __init iommu_prepare_static_identity_mapping(int hw)
{
struct pci_dev *pdev = NULL;
int ret;
- ret = si_domain_init();
+ ret = si_domain_init(hw);
if (ret)
return -EFAULT;
for_each_pci_dev(pdev) {
- printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
- pci_name(pdev));
-
- ret = domain_context_mapping(si_domain, pdev,
- CONTEXT_TT_MULTI_LEVEL);
- if (ret)
- return ret;
- ret = domain_add_dev_info(si_domain, pdev);
- if (ret)
- return ret;
+ if (iommu_should_identity_map(pdev, 1)) {
+ printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
+ hw ? "hardware" : "software", pci_name(pdev));
+
+ ret = domain_add_dev_info(si_domain, pdev,
+ hw ? CONTEXT_TT_PASS_THROUGH :
+ CONTEXT_TT_MULTI_LEVEL);
+ if (ret)
+ return ret;
+ }
}
return 0;
struct pci_dev *pdev;
struct intel_iommu *iommu;
int i, ret;
- int pass_through = 1;
-
- /*
- * In case pass through can not be enabled, iommu tries to use identity
- * mapping.
- */
- if (iommu_pass_through)
- iommu_identity_mapping = 1;
/*
* for each drhd
deferred_flush = kzalloc(g_num_of_iommus *
sizeof(struct deferred_flush_tables), GFP_KERNEL);
if (!deferred_flush) {
- kfree(g_iommus);
ret = -ENOMEM;
goto error;
}
goto error;
}
if (!ecap_pass_through(iommu->ecap))
- pass_through = 0;
+ hw_pass_through = 0;
}
- if (iommu_pass_through)
- if (!pass_through) {
- printk(KERN_INFO
- "Pass Through is not supported by hardware.\n");
- iommu_pass_through = 0;
- }
/*
* Start from the sane iommu hardware state.
}
}
+ if (iommu_pass_through)
+ iommu_identity_mapping |= IDENTMAP_ALL;
+
+#ifdef CONFIG_DMAR_BROKEN_GFX_WA
+ iommu_identity_mapping |= IDENTMAP_GFX;
+#endif
+
+ check_tylersburg_isoch();
+
/*
- * If pass through is set and enabled, context entries of all pci
- * devices are intialized by pass through translation type.
+ * If pass through is not set or not enabled, setup context entries for
+ * identity mappings for rmrr, gfx, and isa and may fall back to static
+ * identity mapping if iommu_identity_mapping is set.
*/
- if (iommu_pass_through) {
- ret = init_context_pass_through();
+ if (iommu_identity_mapping) {
+ ret = iommu_prepare_static_identity_mapping(hw_pass_through);
if (ret) {
- printk(KERN_ERR "IOMMU: Pass through init failed.\n");
- iommu_pass_through = 0;
+ printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
+ goto error;
}
}
-
/*
- * If pass through is not set or not enabled, setup context entries for
- * identity mappings for rmrr, gfx, and isa and may fall back to static
- * identity mapping if iommu_identity_mapping is set.
+ * For each rmrr
+ * for each dev attached to rmrr
+ * do
+ * locate drhd for dev, alloc domain for dev
+ * allocate free domain
+ * allocate page table entries for rmrr
+ * if context not allocated for bus
+ * allocate and init context
+ * set present in root table for this bus
+ * init context with domain, translation etc
+ * endfor
+ * endfor
*/
- if (!iommu_pass_through) {
- if (iommu_identity_mapping)
- iommu_prepare_static_identity_mapping();
- /*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
- */
- printk(KERN_INFO "IOMMU: Setting RMRR:\n");
- for_each_rmrr_units(rmrr) {
- for (i = 0; i < rmrr->devices_cnt; i++) {
- pdev = rmrr->devices[i];
- /*
- * some BIOS lists non-exist devices in DMAR
- * table.
- */
- if (!pdev)
- continue;
- ret = iommu_prepare_rmrr_dev(rmrr, pdev);
- if (ret)
- printk(KERN_ERR
- "IOMMU: mapping reserved region failed\n");
- }
+ printk(KERN_INFO "IOMMU: Setting RMRR:\n");
+ for_each_rmrr_units(rmrr) {
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ pdev = rmrr->devices[i];
+ /*
+ * some BIOS lists non-exist devices in DMAR
+ * table.
+ */
+ if (!pdev)
+ continue;
+ ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+ if (ret)
+ printk(KERN_ERR
+ "IOMMU: mapping reserved region failed\n");
}
-
- iommu_prepare_isa();
}
+ iommu_prepare_isa();
+
/*
* for each drhd
* enable fault log
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- iommu_disable_protect_mem_regions(iommu);
ret = iommu_enable_translation(iommu);
if (ret)
goto error;
+
+ iommu_disable_protect_mem_regions(iommu);
}
return 0;
return ret;
}
-static inline unsigned long aligned_nrpages(unsigned long host_addr,
- size_t size)
-{
- host_addr &= ~PAGE_MASK;
- host_addr += size + PAGE_SIZE - 1;
-
- return host_addr >> VTD_PAGE_SHIFT;
-}
-
+/* This takes a number of _MM_ pages, not VTD pages */
static struct iova *intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
unsigned long nrpages, uint64_t dma_mask)
return iova;
}
-static struct dmar_domain *
-get_valid_domain_for_dev(struct pci_dev *pdev)
+static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
{
struct dmar_domain *domain;
int ret;
return domain;
}
+static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
+{
+ struct device_domain_info *info;
+
+ /* No lock here, assumes no domain exit in normal case */
+ info = dev->dev.archdata.iommu;
+ if (likely(info))
+ return info->domain;
+
+ return __get_valid_domain_for_dev(dev);
+}
+
static int iommu_dummy(struct pci_dev *pdev)
{
return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
/* Check if the pdev needs to go through non-identity map and unmap process.*/
-static int iommu_no_mapping(struct pci_dev *pdev)
+static int iommu_no_mapping(struct device *dev)
{
+ struct pci_dev *pdev;
int found;
+ if (unlikely(dev->bus != &pci_bus_type))
+ return 1;
+
+ pdev = to_pci_dev(dev);
+ if (iommu_dummy(pdev))
+ return 1;
+
if (!iommu_identity_mapping)
- return iommu_dummy(pdev);
+ return 0;
found = identity_mapping(pdev);
if (found) {
- if (pdev->dma_mask > DMA_BIT_MASK(32))
+ if (iommu_should_identity_map(pdev, 0))
return 1;
else {
/*
* In case of a detached 64 bit DMA device from vm, the device
* is put into si_domain for identity mapping.
*/
- if (pdev->dma_mask > DMA_BIT_MASK(32)) {
+ if (iommu_should_identity_map(pdev, 0)) {
int ret;
- ret = domain_add_dev_info(si_domain, pdev);
+ ret = domain_add_dev_info(si_domain, pdev,
+ hw_pass_through ?
+ CONTEXT_TT_PASS_THROUGH :
+ CONTEXT_TT_MULTI_LEVEL);
if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n",
pci_name(pdev));
}
}
- return iommu_dummy(pdev);
+ return 0;
}
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
int prot = 0;
int ret;
struct intel_iommu *iommu;
+ unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(hwdev))
return paddr;
domain = get_valid_domain_for_dev(pdev);
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
- iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+ pdev->dma_mask);
if (!iova)
goto error;
* is not a big problem
*/
ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
- paddr >> VTD_PAGE_SHIFT, size, prot);
+ mm_to_dma_pfn(paddr_pfn), size, prot);
if (ret)
goto error;
unsigned long mask;
struct iova *iova = deferred_flush[i].iova[j];
- mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
- mask = ilog2(mask >> VTD_PAGE_SHIFT);
+ mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
- iova->pfn_lo << PAGE_SHIFT, mask);
+ (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
deferred_flush[i].next = 0;
struct iova *iova;
struct intel_iommu *iommu;
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(dev))
return;
domain = find_domain(pdev);
}
}
-static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
- int dir)
-{
- intel_unmap_page(dev, dev_addr, size, dir, NULL);
-}
-
static void *intel_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
size = PAGE_ALIGN(size);
order = get_order(size);
- flags &= ~(GFP_DMA | GFP_DMA32);
+
+ if (!iommu_no_mapping(hwdev))
+ flags &= ~(GFP_DMA | GFP_DMA32);
+ else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
+ if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
+ flags |= GFP_DMA;
+ else
+ flags |= GFP_DMA32;
+ }
vaddr = (void *)__get_free_pages(flags, order);
if (!vaddr)
size = PAGE_ALIGN(size);
order = get_order(size);
- intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
+ intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, order);
}
struct iova *iova;
struct intel_iommu *iommu;
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(hwdev))
return;
domain = find_domain(pdev);
/* free page tables */
dma_pte_free_pagetable(domain, start_pfn, last_pfn);
- iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- (last_pfn - start_pfn + 1));
-
- /* free iova */
- __free_iova(&domain->iovad, iova);
+ if (intel_iommu_strict) {
+ iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ last_pfn - start_pfn + 1);
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
+ } else {
+ add_unmap(domain, iova);
+ /*
+ * queue up the release of the unmap to save the 1/6th of the
+ * cpu used up by the iotlb flush operation...
+ */
+ }
}
static int intel_nontranslate_map_sg(struct device *hddev,
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(hwdev))
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev);
for_each_sg(sglist, sg, nelems, i)
size += aligned_nrpages(sg->offset, sg->length);
- iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+ pdev->dma_mask);
if (!iova) {
sglist->dma_length = 0;
return 0;
start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
- ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
+ ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
if (unlikely(ret)) {
/* clear the page */
dma_pte_clear_range(domain, start_vpfn,
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
- iommu_disable_protect_mem_regions(iommu);
iommu_enable_translation(iommu);
+ iommu_disable_protect_mem_regions(iommu);
}
return 0;
}
#endif /* CONFIG_PM */
+/*
+ * Here we only respond to action of unbound device from driver.
+ *
+ * Added device is not attached to its DMAR domain here yet. That will happen
+ * when mapping the device to iova.
+ */
+static int device_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dmar_domain *domain;
+
+ if (iommu_no_mapping(dev))
+ return 0;
+
+ domain = find_domain(pdev);
+ if (!domain)
+ return 0;
+
+ if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
+ domain_remove_one_dev_info(domain, pdev);
+
+ return 0;
+}
+
+static struct notifier_block device_nb = {
+ .notifier_call = device_notifier,
+};
+
int __init intel_iommu_init(void)
{
int ret = 0;
+ int force_on = 0;
- if (dmar_table_init())
+ /* VT-d is required for a TXT/tboot launch, so enforce that */
+ force_on = tboot_force_iommu();
+
+ if (dmar_table_init()) {
+ if (force_on)
+ panic("tboot: Failed to initialize DMAR table\n");
return -ENODEV;
+ }
- if (dmar_dev_scope_init())
+ if (dmar_dev_scope_init()) {
+ if (force_on)
+ panic("tboot: Failed to initialize DMAR device scope\n");
return -ENODEV;
+ }
/*
* Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping.
*/
- if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
+ if (no_iommu || dmar_disabled)
return -ENODEV;
iommu_init_mempool();
ret = init_dmars();
if (ret) {
+ if (force_on)
+ panic("tboot: Failed to initialize DMARs\n");
printk(KERN_ERR "IOMMU: dmar init failed\n");
put_iova_domain(&reserved_iova_list);
iommu_exit_mempool();
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
init_timer(&unmap_timer);
- force_iommu = 1;
-
- if (!iommu_pass_through) {
- printk(KERN_INFO
- "Multi-level page-table translation for DMAR.\n");
- dma_ops = &intel_dma_ops;
- } else
- printk(KERN_INFO
- "DMAR: Pass through translation for DMAR.\n");
+#ifdef CONFIG_SWIOTLB
+ swiotlb = 0;
+#endif
+ dma_ops = &intel_dma_ops;
init_iommu_sysfs();
register_iommu(&intel_iommu_ops);
+ bus_register_notifier(&pci_bus_type, &device_nb);
+
return 0;
}
parent->devfn);
parent = parent->bus->self;
}
- if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
+ if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
iommu_detach_dev(iommu,
tmp->subordinate->number, 0);
else /* this is a legacy PCI bridge */
return NULL;
domain->id = vm_domid++;
+ domain->nid = -1;
memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
domain->iommu_count = 0;
domain->iommu_coherency = 0;
+ domain->iommu_snooping = 0;
domain->max_addr = 0;
+ domain->nid = -1;
/* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
if (!domain->pgd)
return -ENOMEM;
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
struct intel_iommu *iommu;
int addr_width;
u64 end;
- int ret;
/* normally pdev is not mapped */
if (unlikely(domain_context_mapped(pdev))) {
return -EFAULT;
}
- ret = domain_add_dev_info(dmar_domain, pdev);
- if (ret)
- return ret;
-
- ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
- return ret;
+ return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
{
struct dmar_domain *dmar_domain = domain->priv;
+ if (!size)
+ return;
+
dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+
+/* On Tylersburg chipsets, some BIOSes have been known to enable the
+ ISOCH DMAR unit for the Azalia sound device, but not give it any
+ TLB entries, which causes it to deadlock. Check for that. We do
+ this in a function called from init_dmars(), instead of in a PCI
+ quirk, because we don't want to print the obnoxious "BIOS broken"
+ message if VT-d is actually disabled.
+*/
+static void __init check_tylersburg_isoch(void)
+{
+ struct pci_dev *pdev;
+ uint32_t vtisochctrl;
+
+ /* If there's no Azalia in the system anyway, forget it. */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
+ if (!pdev)
+ return;
+ pci_dev_put(pdev);
+
+ /* System Management Registers. Might be hidden, in which case
+ we can't do the sanity check. But that's OK, because the
+ known-broken BIOSes _don't_ actually hide it, so far. */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
+ if (!pdev)
+ return;
+
+ if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
+ pci_dev_put(pdev);
+ return;
+ }
+
+ pci_dev_put(pdev);
+
+ /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
+ if (vtisochctrl & 1)
+ return;
+
+ /* Drop all bits other than the number of TLB entries */
+ vtisochctrl &= 0x1c;
+
+ /* If we have the recommended number of TLB entries (16), fine. */
+ if (vtisochctrl == 0x10)
+ return;
+
+ /* Zero TLB entries? You get to ride the short bus to school. */
+ if (!vtisochctrl) {
+ WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ iommu_identity_mapping |= IDENTMAP_AZALIA;
+ return;
+ }
+
+ printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
+ vtisochctrl);
+}