2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
60 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
61 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
62 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
64 #ifndef PHYSICAL_PAGE_MASK
65 #define PHYSICAL_PAGE_MASK PAGE_MASK
68 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
69 are never going to work. */
70 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
72 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
75 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
77 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
79 static inline unsigned long page_to_dma_pfn(struct page *pg)
81 return mm_to_dma_pfn(page_to_pfn(pg));
83 static inline unsigned long virt_to_dma_pfn(void *p)
85 return page_to_dma_pfn(virt_to_page(p));
88 /* global iommu list, set NULL for ignored DMAR units */
89 static struct intel_iommu **g_iommus;
91 static int rwbf_quirk;
96 * 12-63: Context Ptr (12 - (haw-1))
103 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
104 static inline bool root_present(struct root_entry *root)
106 return (root->val & 1);
108 static inline void set_root_present(struct root_entry *root)
112 static inline void set_root_value(struct root_entry *root, unsigned long value)
114 root->val |= value & VTD_PAGE_MASK;
117 static inline struct context_entry *
118 get_context_addr_from_root(struct root_entry *root)
120 return (struct context_entry *)
121 (root_present(root)?phys_to_virt(
122 root->val & VTD_PAGE_MASK) :
129 * 1: fault processing disable
130 * 2-3: translation type
131 * 12-63: address space root
137 struct context_entry {
142 static inline bool context_present(struct context_entry *context)
144 return (context->lo & 1);
146 static inline void context_set_present(struct context_entry *context)
151 static inline void context_set_fault_enable(struct context_entry *context)
153 context->lo &= (((u64)-1) << 2) | 1;
156 static inline void context_set_translation_type(struct context_entry *context,
159 context->lo &= (((u64)-1) << 4) | 3;
160 context->lo |= (value & 3) << 2;
163 static inline void context_set_address_root(struct context_entry *context,
166 context->lo |= value & VTD_PAGE_MASK;
169 static inline void context_set_address_width(struct context_entry *context,
172 context->hi |= value & 7;
175 static inline void context_set_domain_id(struct context_entry *context,
178 context->hi |= (value & ((1 << 16) - 1)) << 8;
181 static inline void context_clear_entry(struct context_entry *context)
194 * 12-63: Host physcial address
200 static inline void dma_clear_pte(struct dma_pte *pte)
205 static inline void dma_set_pte_readable(struct dma_pte *pte)
207 pte->val |= DMA_PTE_READ;
210 static inline void dma_set_pte_writable(struct dma_pte *pte)
212 pte->val |= DMA_PTE_WRITE;
215 static inline void dma_set_pte_snp(struct dma_pte *pte)
217 pte->val |= DMA_PTE_SNP;
220 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
222 pte->val = (pte->val & ~3) | (prot & 3);
225 static inline u64 dma_pte_addr(struct dma_pte *pte)
227 return (pte->val & VTD_PAGE_MASK);
230 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
232 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
235 static inline bool dma_pte_present(struct dma_pte *pte)
237 return (pte->val & 3) != 0;
241 * This domain is a statically identity mapping domain.
242 * 1. This domain creats a static 1:1 mapping to all usable memory.
243 * 2. It maps to each iommu if successful.
244 * 3. Each iommu mapps to this domain if successful.
246 struct dmar_domain *si_domain;
248 /* devices under the same p2p bridge are owned in one domain */
249 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
251 /* domain represents a virtual machine, more than one devices
252 * across iommus may be owned in one domain, e.g. kvm guest.
254 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
256 /* si_domain contains mulitple devices */
257 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
260 int id; /* domain id */
261 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
263 struct list_head devices; /* all devices' list */
264 struct iova_domain iovad; /* iova's that belong to this domain */
266 struct dma_pte *pgd; /* virtual address */
267 spinlock_t mapping_lock; /* page table lock */
268 int gaw; /* max guest address width */
270 /* adjusted guest address width, 0 is level 2 30-bit */
273 int flags; /* flags to find out type of domain */
275 int iommu_coherency;/* indicate coherency of iommu access */
276 int iommu_snooping; /* indicate snooping control feature*/
277 int iommu_count; /* reference count of iommu */
278 spinlock_t iommu_lock; /* protect iommu set in domain */
279 u64 max_addr; /* maximum mapped address */
282 /* PCI domain-device relationship */
283 struct device_domain_info {
284 struct list_head link; /* link to domain siblings */
285 struct list_head global; /* link to global list */
286 int segment; /* PCI domain */
287 u8 bus; /* PCI bus number */
288 u8 devfn; /* PCI devfn number */
289 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
290 struct intel_iommu *iommu; /* IOMMU used by this device */
291 struct dmar_domain *domain; /* pointer to domain */
294 static void flush_unmaps_timeout(unsigned long data);
296 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
298 #define HIGH_WATER_MARK 250
299 struct deferred_flush_tables {
301 struct iova *iova[HIGH_WATER_MARK];
302 struct dmar_domain *domain[HIGH_WATER_MARK];
305 static struct deferred_flush_tables *deferred_flush;
307 /* bitmap for indexing intel_iommus */
308 static int g_num_of_iommus;
310 static DEFINE_SPINLOCK(async_umap_flush_lock);
311 static LIST_HEAD(unmaps_to_do);
314 static long list_size;
316 static void domain_remove_dev_info(struct dmar_domain *domain);
318 #ifdef CONFIG_DMAR_DEFAULT_ON
319 int dmar_disabled = 0;
321 int dmar_disabled = 1;
322 #endif /*CONFIG_DMAR_DEFAULT_ON*/
324 static int __initdata dmar_map_gfx = 1;
325 static int dmar_forcedac;
326 static int intel_iommu_strict;
328 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
329 static DEFINE_SPINLOCK(device_domain_lock);
330 static LIST_HEAD(device_domain_list);
332 static struct iommu_ops intel_iommu_ops;
334 static int __init intel_iommu_setup(char *str)
339 if (!strncmp(str, "on", 2)) {
341 printk(KERN_INFO "Intel-IOMMU: enabled\n");
342 } else if (!strncmp(str, "off", 3)) {
344 printk(KERN_INFO "Intel-IOMMU: disabled\n");
345 } else if (!strncmp(str, "igfx_off", 8)) {
348 "Intel-IOMMU: disable GFX device mapping\n");
349 } else if (!strncmp(str, "forcedac", 8)) {
351 "Intel-IOMMU: Forcing DAC for PCI devices\n");
353 } else if (!strncmp(str, "strict", 6)) {
355 "Intel-IOMMU: disable batched IOTLB flush\n");
356 intel_iommu_strict = 1;
359 str += strcspn(str, ",");
365 __setup("intel_iommu=", intel_iommu_setup);
367 static struct kmem_cache *iommu_domain_cache;
368 static struct kmem_cache *iommu_devinfo_cache;
369 static struct kmem_cache *iommu_iova_cache;
371 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
376 /* trying to avoid low memory issues */
377 flags = current->flags & PF_MEMALLOC;
378 current->flags |= PF_MEMALLOC;
379 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
380 current->flags &= (~PF_MEMALLOC | flags);
385 static inline void *alloc_pgtable_page(void)
390 /* trying to avoid low memory issues */
391 flags = current->flags & PF_MEMALLOC;
392 current->flags |= PF_MEMALLOC;
393 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
394 current->flags &= (~PF_MEMALLOC | flags);
398 static inline void free_pgtable_page(void *vaddr)
400 free_page((unsigned long)vaddr);
403 static inline void *alloc_domain_mem(void)
405 return iommu_kmem_cache_alloc(iommu_domain_cache);
408 static void free_domain_mem(void *vaddr)
410 kmem_cache_free(iommu_domain_cache, vaddr);
413 static inline void * alloc_devinfo_mem(void)
415 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
418 static inline void free_devinfo_mem(void *vaddr)
420 kmem_cache_free(iommu_devinfo_cache, vaddr);
423 struct iova *alloc_iova_mem(void)
425 return iommu_kmem_cache_alloc(iommu_iova_cache);
428 void free_iova_mem(struct iova *iova)
430 kmem_cache_free(iommu_iova_cache, iova);
434 static inline int width_to_agaw(int width);
436 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
441 sagaw = cap_sagaw(iommu->cap);
442 for (agaw = width_to_agaw(max_gaw);
444 if (test_bit(agaw, &sagaw))
452 * Calculate max SAGAW for each iommu.
454 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
456 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
460 * calculate agaw for each iommu.
461 * "SAGAW" may be different across iommus, use a default agaw, and
462 * get a supported less agaw for iommus that don't support the default agaw.
464 int iommu_calculate_agaw(struct intel_iommu *iommu)
466 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
469 /* This functionin only returns single iommu in a domain */
470 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
474 /* si_domain and vm domain should not get here. */
475 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
476 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
478 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
479 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
482 return g_iommus[iommu_id];
485 static void domain_update_iommu_coherency(struct dmar_domain *domain)
489 domain->iommu_coherency = 1;
491 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
492 for (; i < g_num_of_iommus; ) {
493 if (!ecap_coherent(g_iommus[i]->ecap)) {
494 domain->iommu_coherency = 0;
497 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
501 static void domain_update_iommu_snooping(struct dmar_domain *domain)
505 domain->iommu_snooping = 1;
507 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
508 for (; i < g_num_of_iommus; ) {
509 if (!ecap_sc_support(g_iommus[i]->ecap)) {
510 domain->iommu_snooping = 0;
513 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
517 /* Some capabilities may be different across iommus */
518 static void domain_update_iommu_cap(struct dmar_domain *domain)
520 domain_update_iommu_coherency(domain);
521 domain_update_iommu_snooping(domain);
524 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
526 struct dmar_drhd_unit *drhd = NULL;
529 for_each_drhd_unit(drhd) {
532 if (segment != drhd->segment)
535 for (i = 0; i < drhd->devices_cnt; i++) {
536 if (drhd->devices[i] &&
537 drhd->devices[i]->bus->number == bus &&
538 drhd->devices[i]->devfn == devfn)
540 if (drhd->devices[i] &&
541 drhd->devices[i]->subordinate &&
542 drhd->devices[i]->subordinate->number <= bus &&
543 drhd->devices[i]->subordinate->subordinate >= bus)
547 if (drhd->include_all)
554 static void domain_flush_cache(struct dmar_domain *domain,
555 void *addr, int size)
557 if (!domain->iommu_coherency)
558 clflush_cache_range(addr, size);
561 /* Gets context entry for a given bus and devfn */
562 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
565 struct root_entry *root;
566 struct context_entry *context;
567 unsigned long phy_addr;
570 spin_lock_irqsave(&iommu->lock, flags);
571 root = &iommu->root_entry[bus];
572 context = get_context_addr_from_root(root);
574 context = (struct context_entry *)alloc_pgtable_page();
576 spin_unlock_irqrestore(&iommu->lock, flags);
579 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
580 phy_addr = virt_to_phys((void *)context);
581 set_root_value(root, phy_addr);
582 set_root_present(root);
583 __iommu_flush_cache(iommu, root, sizeof(*root));
585 spin_unlock_irqrestore(&iommu->lock, flags);
586 return &context[devfn];
589 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
591 struct root_entry *root;
592 struct context_entry *context;
596 spin_lock_irqsave(&iommu->lock, flags);
597 root = &iommu->root_entry[bus];
598 context = get_context_addr_from_root(root);
603 ret = context_present(&context[devfn]);
605 spin_unlock_irqrestore(&iommu->lock, flags);
609 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
611 struct root_entry *root;
612 struct context_entry *context;
615 spin_lock_irqsave(&iommu->lock, flags);
616 root = &iommu->root_entry[bus];
617 context = get_context_addr_from_root(root);
619 context_clear_entry(&context[devfn]);
620 __iommu_flush_cache(iommu, &context[devfn], \
623 spin_unlock_irqrestore(&iommu->lock, flags);
626 static void free_context_table(struct intel_iommu *iommu)
628 struct root_entry *root;
631 struct context_entry *context;
633 spin_lock_irqsave(&iommu->lock, flags);
634 if (!iommu->root_entry) {
637 for (i = 0; i < ROOT_ENTRY_NR; i++) {
638 root = &iommu->root_entry[i];
639 context = get_context_addr_from_root(root);
641 free_pgtable_page(context);
643 free_pgtable_page(iommu->root_entry);
644 iommu->root_entry = NULL;
646 spin_unlock_irqrestore(&iommu->lock, flags);
649 /* page table handling */
650 #define LEVEL_STRIDE (9)
651 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
653 static inline int agaw_to_level(int agaw)
658 static inline int agaw_to_width(int agaw)
660 return 30 + agaw * LEVEL_STRIDE;
664 static inline int width_to_agaw(int width)
666 return (width - 30) / LEVEL_STRIDE;
669 static inline unsigned int level_to_offset_bits(int level)
671 return (12 + (level - 1) * LEVEL_STRIDE);
674 static inline int pfn_level_offset(unsigned long pfn, int level)
676 return (pfn >> (level_to_offset_bits(level) - 12)) & LEVEL_MASK;
679 static inline u64 level_mask(int level)
681 return ((u64)-1 << level_to_offset_bits(level));
684 static inline u64 level_size(int level)
686 return ((u64)1 << level_to_offset_bits(level));
689 static inline u64 align_to_level(u64 addr, int level)
691 return ((addr + level_size(level) - 1) & level_mask(level));
694 static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
696 int addr_width = agaw_to_width(domain->agaw);
697 struct dma_pte *parent, *pte = NULL;
698 int level = agaw_to_level(domain->agaw);
702 BUG_ON(!domain->pgd);
703 BUG_ON(addr >> addr_width);
704 parent = domain->pgd;
706 spin_lock_irqsave(&domain->mapping_lock, flags);
710 offset = pfn_level_offset(addr >> VTD_PAGE_SHIFT, level);
711 pte = &parent[offset];
715 if (!dma_pte_present(pte)) {
716 tmp_page = alloc_pgtable_page();
719 spin_unlock_irqrestore(&domain->mapping_lock,
723 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
724 dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
726 * high level table always sets r/w, last level page
727 * table control read/write
729 dma_set_pte_readable(pte);
730 dma_set_pte_writable(pte);
731 domain_flush_cache(domain, pte, sizeof(*pte));
733 parent = phys_to_virt(dma_pte_addr(pte));
737 spin_unlock_irqrestore(&domain->mapping_lock, flags);
741 /* return address's pte at specific level */
742 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
746 struct dma_pte *parent, *pte = NULL;
747 int total = agaw_to_level(domain->agaw);
750 parent = domain->pgd;
751 while (level <= total) {
752 offset = pfn_level_offset(pfn, total);
753 pte = &parent[offset];
757 if (!dma_pte_present(pte))
759 parent = phys_to_virt(dma_pte_addr(pte));
765 /* clear one page's page table */
766 static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn)
768 struct dma_pte *pte = NULL;
770 /* get last level pte */
771 pte = dma_pfn_level_pte(domain, pfn, 1);
775 domain_flush_cache(domain, pte, sizeof(*pte));
779 /* clear last level pte, a tlb flush should be followed */
780 static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
782 unsigned long start_pfn = IOVA_PFN(start);
783 unsigned long end_pfn = IOVA_PFN(end-1);
784 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
786 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
787 BUG_ON(addr_width < BITS_PER_LONG && end_pfn >> addr_width);
789 /* we don't need lock here; nobody else touches the iova range */
790 while (start_pfn <= end_pfn) {
791 dma_pte_clear_one(domain, start_pfn);
796 /* free page table pages. last level pte should already be cleared */
797 static void dma_pte_free_pagetable(struct dmar_domain *domain,
800 int addr_width = agaw_to_width(domain->agaw);
802 int total = agaw_to_level(domain->agaw);
806 BUG_ON(start >> addr_width);
807 BUG_ON(end >> addr_width);
809 /* we don't need lock here, nobody else touches the iova range */
811 while (level <= total) {
812 tmp = align_to_level(start, level);
813 if (tmp >= end || (tmp + level_size(level) > end))
817 pte = dma_pfn_level_pte(domain, tmp >> VTD_PAGE_SHIFT,
821 phys_to_virt(dma_pte_addr(pte)));
823 domain_flush_cache(domain, pte, sizeof(*pte));
825 tmp += level_size(level);
830 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
831 free_pgtable_page(domain->pgd);
837 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
839 struct root_entry *root;
842 root = (struct root_entry *)alloc_pgtable_page();
846 __iommu_flush_cache(iommu, root, ROOT_SIZE);
848 spin_lock_irqsave(&iommu->lock, flags);
849 iommu->root_entry = root;
850 spin_unlock_irqrestore(&iommu->lock, flags);
855 static void iommu_set_root_entry(struct intel_iommu *iommu)
861 addr = iommu->root_entry;
863 spin_lock_irqsave(&iommu->register_lock, flag);
864 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
866 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
868 /* Make sure hardware complete it */
869 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
870 readl, (sts & DMA_GSTS_RTPS), sts);
872 spin_unlock_irqrestore(&iommu->register_lock, flag);
875 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
880 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
883 spin_lock_irqsave(&iommu->register_lock, flag);
884 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
886 /* Make sure hardware complete it */
887 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
888 readl, (!(val & DMA_GSTS_WBFS)), val);
890 spin_unlock_irqrestore(&iommu->register_lock, flag);
893 /* return value determine if we need a write buffer flush */
894 static void __iommu_flush_context(struct intel_iommu *iommu,
895 u16 did, u16 source_id, u8 function_mask,
902 case DMA_CCMD_GLOBAL_INVL:
903 val = DMA_CCMD_GLOBAL_INVL;
905 case DMA_CCMD_DOMAIN_INVL:
906 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
908 case DMA_CCMD_DEVICE_INVL:
909 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
910 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
917 spin_lock_irqsave(&iommu->register_lock, flag);
918 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
920 /* Make sure hardware complete it */
921 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
922 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
924 spin_unlock_irqrestore(&iommu->register_lock, flag);
927 /* return value determine if we need a write buffer flush */
928 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
929 u64 addr, unsigned int size_order, u64 type)
931 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
932 u64 val = 0, val_iva = 0;
936 case DMA_TLB_GLOBAL_FLUSH:
937 /* global flush doesn't need set IVA_REG */
938 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
940 case DMA_TLB_DSI_FLUSH:
941 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
943 case DMA_TLB_PSI_FLUSH:
944 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
945 /* Note: always flush non-leaf currently */
946 val_iva = size_order | addr;
951 /* Note: set drain read/write */
954 * This is probably to be super secure.. Looks like we can
955 * ignore it without any impact.
957 if (cap_read_drain(iommu->cap))
958 val |= DMA_TLB_READ_DRAIN;
960 if (cap_write_drain(iommu->cap))
961 val |= DMA_TLB_WRITE_DRAIN;
963 spin_lock_irqsave(&iommu->register_lock, flag);
964 /* Note: Only uses first TLB reg currently */
966 dmar_writeq(iommu->reg + tlb_offset, val_iva);
967 dmar_writeq(iommu->reg + tlb_offset + 8, val);
969 /* Make sure hardware complete it */
970 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
971 dmar_readq, (!(val & DMA_TLB_IVT)), val);
973 spin_unlock_irqrestore(&iommu->register_lock, flag);
975 /* check IOTLB invalidation granularity */
976 if (DMA_TLB_IAIG(val) == 0)
977 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
978 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
979 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
980 (unsigned long long)DMA_TLB_IIRG(type),
981 (unsigned long long)DMA_TLB_IAIG(val));
984 static struct device_domain_info *iommu_support_dev_iotlb(
985 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
989 struct device_domain_info *info;
990 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
992 if (!ecap_dev_iotlb_support(iommu->ecap))
998 spin_lock_irqsave(&device_domain_lock, flags);
999 list_for_each_entry(info, &domain->devices, link)
1000 if (info->bus == bus && info->devfn == devfn) {
1004 spin_unlock_irqrestore(&device_domain_lock, flags);
1006 if (!found || !info->dev)
1009 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1012 if (!dmar_find_matched_atsr_unit(info->dev))
1015 info->iommu = iommu;
1020 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1025 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1028 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1030 if (!info->dev || !pci_ats_enabled(info->dev))
1033 pci_disable_ats(info->dev);
1036 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1037 u64 addr, unsigned mask)
1040 unsigned long flags;
1041 struct device_domain_info *info;
1043 spin_lock_irqsave(&device_domain_lock, flags);
1044 list_for_each_entry(info, &domain->devices, link) {
1045 if (!info->dev || !pci_ats_enabled(info->dev))
1048 sid = info->bus << 8 | info->devfn;
1049 qdep = pci_ats_queue_depth(info->dev);
1050 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1052 spin_unlock_irqrestore(&device_domain_lock, flags);
1055 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1056 u64 addr, unsigned int pages)
1058 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1060 BUG_ON(addr & (~VTD_PAGE_MASK));
1064 * Fallback to domain selective flush if no PSI support or the size is
1066 * PSI requires page size to be 2 ^ x, and the base address is naturally
1067 * aligned to the size
1069 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1070 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1073 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1077 * In caching mode, domain ID 0 is reserved for non-present to present
1078 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1080 if (!cap_caching_mode(iommu->cap) || did)
1081 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1084 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1087 unsigned long flags;
1089 spin_lock_irqsave(&iommu->register_lock, flags);
1090 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1091 pmen &= ~DMA_PMEN_EPM;
1092 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1094 /* wait for the protected region status bit to clear */
1095 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1096 readl, !(pmen & DMA_PMEN_PRS), pmen);
1098 spin_unlock_irqrestore(&iommu->register_lock, flags);
1101 static int iommu_enable_translation(struct intel_iommu *iommu)
1104 unsigned long flags;
1106 spin_lock_irqsave(&iommu->register_lock, flags);
1107 iommu->gcmd |= DMA_GCMD_TE;
1108 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1110 /* Make sure hardware complete it */
1111 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1112 readl, (sts & DMA_GSTS_TES), sts);
1114 spin_unlock_irqrestore(&iommu->register_lock, flags);
1118 static int iommu_disable_translation(struct intel_iommu *iommu)
1123 spin_lock_irqsave(&iommu->register_lock, flag);
1124 iommu->gcmd &= ~DMA_GCMD_TE;
1125 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1127 /* Make sure hardware complete it */
1128 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1129 readl, (!(sts & DMA_GSTS_TES)), sts);
1131 spin_unlock_irqrestore(&iommu->register_lock, flag);
1136 static int iommu_init_domains(struct intel_iommu *iommu)
1138 unsigned long ndomains;
1139 unsigned long nlongs;
1141 ndomains = cap_ndoms(iommu->cap);
1142 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1143 nlongs = BITS_TO_LONGS(ndomains);
1145 /* TBD: there might be 64K domains,
1146 * consider other allocation for future chip
1148 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1149 if (!iommu->domain_ids) {
1150 printk(KERN_ERR "Allocating domain id array failed\n");
1153 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1155 if (!iommu->domains) {
1156 printk(KERN_ERR "Allocating domain array failed\n");
1157 kfree(iommu->domain_ids);
1161 spin_lock_init(&iommu->lock);
1164 * if Caching mode is set, then invalid translations are tagged
1165 * with domainid 0. Hence we need to pre-allocate it.
1167 if (cap_caching_mode(iommu->cap))
1168 set_bit(0, iommu->domain_ids);
1173 static void domain_exit(struct dmar_domain *domain);
1174 static void vm_domain_exit(struct dmar_domain *domain);
1176 void free_dmar_iommu(struct intel_iommu *iommu)
1178 struct dmar_domain *domain;
1180 unsigned long flags;
1182 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1183 for (; i < cap_ndoms(iommu->cap); ) {
1184 domain = iommu->domains[i];
1185 clear_bit(i, iommu->domain_ids);
1187 spin_lock_irqsave(&domain->iommu_lock, flags);
1188 if (--domain->iommu_count == 0) {
1189 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1190 vm_domain_exit(domain);
1192 domain_exit(domain);
1194 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1196 i = find_next_bit(iommu->domain_ids,
1197 cap_ndoms(iommu->cap), i+1);
1200 if (iommu->gcmd & DMA_GCMD_TE)
1201 iommu_disable_translation(iommu);
1204 set_irq_data(iommu->irq, NULL);
1205 /* This will mask the irq */
1206 free_irq(iommu->irq, iommu);
1207 destroy_irq(iommu->irq);
1210 kfree(iommu->domains);
1211 kfree(iommu->domain_ids);
1213 g_iommus[iommu->seq_id] = NULL;
1215 /* if all iommus are freed, free g_iommus */
1216 for (i = 0; i < g_num_of_iommus; i++) {
1221 if (i == g_num_of_iommus)
1224 /* free context mapping */
1225 free_context_table(iommu);
1228 static struct dmar_domain *alloc_domain(void)
1230 struct dmar_domain *domain;
1232 domain = alloc_domain_mem();
1236 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1242 static int iommu_attach_domain(struct dmar_domain *domain,
1243 struct intel_iommu *iommu)
1246 unsigned long ndomains;
1247 unsigned long flags;
1249 ndomains = cap_ndoms(iommu->cap);
1251 spin_lock_irqsave(&iommu->lock, flags);
1253 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1254 if (num >= ndomains) {
1255 spin_unlock_irqrestore(&iommu->lock, flags);
1256 printk(KERN_ERR "IOMMU: no free domain ids\n");
1261 set_bit(num, iommu->domain_ids);
1262 set_bit(iommu->seq_id, &domain->iommu_bmp);
1263 iommu->domains[num] = domain;
1264 spin_unlock_irqrestore(&iommu->lock, flags);
1269 static void iommu_detach_domain(struct dmar_domain *domain,
1270 struct intel_iommu *iommu)
1272 unsigned long flags;
1276 spin_lock_irqsave(&iommu->lock, flags);
1277 ndomains = cap_ndoms(iommu->cap);
1278 num = find_first_bit(iommu->domain_ids, ndomains);
1279 for (; num < ndomains; ) {
1280 if (iommu->domains[num] == domain) {
1284 num = find_next_bit(iommu->domain_ids,
1285 cap_ndoms(iommu->cap), num+1);
1289 clear_bit(num, iommu->domain_ids);
1290 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1291 iommu->domains[num] = NULL;
1293 spin_unlock_irqrestore(&iommu->lock, flags);
1296 static struct iova_domain reserved_iova_list;
1297 static struct lock_class_key reserved_alloc_key;
1298 static struct lock_class_key reserved_rbtree_key;
1300 static void dmar_init_reserved_ranges(void)
1302 struct pci_dev *pdev = NULL;
1307 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1309 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1310 &reserved_alloc_key);
1311 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1312 &reserved_rbtree_key);
1314 /* IOAPIC ranges shouldn't be accessed by DMA */
1315 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1316 IOVA_PFN(IOAPIC_RANGE_END));
1318 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1320 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1321 for_each_pci_dev(pdev) {
1324 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1325 r = &pdev->resource[i];
1326 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1329 addr &= PHYSICAL_PAGE_MASK;
1330 size = r->end - addr;
1331 size = PAGE_ALIGN(size);
1332 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1333 IOVA_PFN(size + addr) - 1);
1335 printk(KERN_ERR "Reserve iova failed\n");
1341 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1343 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1346 static inline int guestwidth_to_adjustwidth(int gaw)
1349 int r = (gaw - 12) % 9;
1360 static int domain_init(struct dmar_domain *domain, int guest_width)
1362 struct intel_iommu *iommu;
1363 int adjust_width, agaw;
1364 unsigned long sagaw;
1366 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1367 spin_lock_init(&domain->mapping_lock);
1368 spin_lock_init(&domain->iommu_lock);
1370 domain_reserve_special_ranges(domain);
1372 /* calculate AGAW */
1373 iommu = domain_get_iommu(domain);
1374 if (guest_width > cap_mgaw(iommu->cap))
1375 guest_width = cap_mgaw(iommu->cap);
1376 domain->gaw = guest_width;
1377 adjust_width = guestwidth_to_adjustwidth(guest_width);
1378 agaw = width_to_agaw(adjust_width);
1379 sagaw = cap_sagaw(iommu->cap);
1380 if (!test_bit(agaw, &sagaw)) {
1381 /* hardware doesn't support it, choose a bigger one */
1382 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1383 agaw = find_next_bit(&sagaw, 5, agaw);
1387 domain->agaw = agaw;
1388 INIT_LIST_HEAD(&domain->devices);
1390 if (ecap_coherent(iommu->ecap))
1391 domain->iommu_coherency = 1;
1393 domain->iommu_coherency = 0;
1395 if (ecap_sc_support(iommu->ecap))
1396 domain->iommu_snooping = 1;
1398 domain->iommu_snooping = 0;
1400 domain->iommu_count = 1;
1402 /* always allocate the top pgd */
1403 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1406 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1410 static void domain_exit(struct dmar_domain *domain)
1412 struct dmar_drhd_unit *drhd;
1413 struct intel_iommu *iommu;
1416 /* Domain 0 is reserved, so dont process it */
1420 domain_remove_dev_info(domain);
1422 put_iova_domain(&domain->iovad);
1423 end = DOMAIN_MAX_ADDR(domain->gaw);
1424 end = end & (~PAGE_MASK);
1427 dma_pte_clear_range(domain, 0, end);
1429 /* free page tables */
1430 dma_pte_free_pagetable(domain, 0, end);
1432 for_each_active_iommu(iommu, drhd)
1433 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1434 iommu_detach_domain(domain, iommu);
1436 free_domain_mem(domain);
1439 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1440 u8 bus, u8 devfn, int translation)
1442 struct context_entry *context;
1443 unsigned long flags;
1444 struct intel_iommu *iommu;
1445 struct dma_pte *pgd;
1447 unsigned long ndomains;
1450 struct device_domain_info *info = NULL;
1452 pr_debug("Set context mapping for %02x:%02x.%d\n",
1453 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1455 BUG_ON(!domain->pgd);
1456 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1457 translation != CONTEXT_TT_MULTI_LEVEL);
1459 iommu = device_to_iommu(segment, bus, devfn);
1463 context = device_to_context_entry(iommu, bus, devfn);
1466 spin_lock_irqsave(&iommu->lock, flags);
1467 if (context_present(context)) {
1468 spin_unlock_irqrestore(&iommu->lock, flags);
1475 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1476 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1479 /* find an available domain id for this device in iommu */
1480 ndomains = cap_ndoms(iommu->cap);
1481 num = find_first_bit(iommu->domain_ids, ndomains);
1482 for (; num < ndomains; ) {
1483 if (iommu->domains[num] == domain) {
1488 num = find_next_bit(iommu->domain_ids,
1489 cap_ndoms(iommu->cap), num+1);
1493 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1494 if (num >= ndomains) {
1495 spin_unlock_irqrestore(&iommu->lock, flags);
1496 printk(KERN_ERR "IOMMU: no free domain ids\n");
1500 set_bit(num, iommu->domain_ids);
1501 set_bit(iommu->seq_id, &domain->iommu_bmp);
1502 iommu->domains[num] = domain;
1506 /* Skip top levels of page tables for
1507 * iommu which has less agaw than default.
1509 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1510 pgd = phys_to_virt(dma_pte_addr(pgd));
1511 if (!dma_pte_present(pgd)) {
1512 spin_unlock_irqrestore(&iommu->lock, flags);
1518 context_set_domain_id(context, id);
1520 if (translation != CONTEXT_TT_PASS_THROUGH) {
1521 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1522 translation = info ? CONTEXT_TT_DEV_IOTLB :
1523 CONTEXT_TT_MULTI_LEVEL;
1526 * In pass through mode, AW must be programmed to indicate the largest
1527 * AGAW value supported by hardware. And ASR is ignored by hardware.
1529 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1530 context_set_address_width(context, iommu->msagaw);
1532 context_set_address_root(context, virt_to_phys(pgd));
1533 context_set_address_width(context, iommu->agaw);
1536 context_set_translation_type(context, translation);
1537 context_set_fault_enable(context);
1538 context_set_present(context);
1539 domain_flush_cache(domain, context, sizeof(*context));
1542 * It's a non-present to present mapping. If hardware doesn't cache
1543 * non-present entry we only need to flush the write-buffer. If the
1544 * _does_ cache non-present entries, then it does so in the special
1545 * domain #0, which we have to flush:
1547 if (cap_caching_mode(iommu->cap)) {
1548 iommu->flush.flush_context(iommu, 0,
1549 (((u16)bus) << 8) | devfn,
1550 DMA_CCMD_MASK_NOBIT,
1551 DMA_CCMD_DEVICE_INVL);
1552 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1554 iommu_flush_write_buffer(iommu);
1556 iommu_enable_dev_iotlb(info);
1557 spin_unlock_irqrestore(&iommu->lock, flags);
1559 spin_lock_irqsave(&domain->iommu_lock, flags);
1560 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1561 domain->iommu_count++;
1562 domain_update_iommu_cap(domain);
1564 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1569 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1573 struct pci_dev *tmp, *parent;
1575 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1576 pdev->bus->number, pdev->devfn,
1581 /* dependent device mapping */
1582 tmp = pci_find_upstream_pcie_bridge(pdev);
1585 /* Secondary interface's bus number and devfn 0 */
1586 parent = pdev->bus->self;
1587 while (parent != tmp) {
1588 ret = domain_context_mapping_one(domain,
1589 pci_domain_nr(parent->bus),
1590 parent->bus->number,
1591 parent->devfn, translation);
1594 parent = parent->bus->self;
1596 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1597 return domain_context_mapping_one(domain,
1598 pci_domain_nr(tmp->subordinate),
1599 tmp->subordinate->number, 0,
1601 else /* this is a legacy PCI bridge */
1602 return domain_context_mapping_one(domain,
1603 pci_domain_nr(tmp->bus),
1609 static int domain_context_mapped(struct pci_dev *pdev)
1612 struct pci_dev *tmp, *parent;
1613 struct intel_iommu *iommu;
1615 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1620 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1623 /* dependent device mapping */
1624 tmp = pci_find_upstream_pcie_bridge(pdev);
1627 /* Secondary interface's bus number and devfn 0 */
1628 parent = pdev->bus->self;
1629 while (parent != tmp) {
1630 ret = device_context_mapped(iommu, parent->bus->number,
1634 parent = parent->bus->self;
1637 return device_context_mapped(iommu, tmp->subordinate->number,
1640 return device_context_mapped(iommu, tmp->bus->number,
1645 domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1646 u64 hpa, size_t size, int prot)
1648 u64 start_pfn, end_pfn;
1649 struct dma_pte *pte;
1651 int addr_width = agaw_to_width(domain->agaw);
1653 BUG_ON(hpa >> addr_width);
1655 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1658 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1659 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
1661 while (start_pfn < end_pfn) {
1662 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
1665 /* We don't need lock here, nobody else
1666 * touches the iova range
1668 BUG_ON(dma_pte_addr(pte));
1669 dma_set_pte_pfn(pte, start_pfn);
1670 dma_set_pte_prot(pte, prot);
1671 if (prot & DMA_PTE_SNP)
1672 dma_set_pte_snp(pte);
1673 domain_flush_cache(domain, pte, sizeof(*pte));
1680 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1685 clear_context_table(iommu, bus, devfn);
1686 iommu->flush.flush_context(iommu, 0, 0, 0,
1687 DMA_CCMD_GLOBAL_INVL);
1688 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1691 static void domain_remove_dev_info(struct dmar_domain *domain)
1693 struct device_domain_info *info;
1694 unsigned long flags;
1695 struct intel_iommu *iommu;
1697 spin_lock_irqsave(&device_domain_lock, flags);
1698 while (!list_empty(&domain->devices)) {
1699 info = list_entry(domain->devices.next,
1700 struct device_domain_info, link);
1701 list_del(&info->link);
1702 list_del(&info->global);
1704 info->dev->dev.archdata.iommu = NULL;
1705 spin_unlock_irqrestore(&device_domain_lock, flags);
1707 iommu_disable_dev_iotlb(info);
1708 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1709 iommu_detach_dev(iommu, info->bus, info->devfn);
1710 free_devinfo_mem(info);
1712 spin_lock_irqsave(&device_domain_lock, flags);
1714 spin_unlock_irqrestore(&device_domain_lock, flags);
1719 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1721 static struct dmar_domain *
1722 find_domain(struct pci_dev *pdev)
1724 struct device_domain_info *info;
1726 /* No lock here, assumes no domain exit in normal case */
1727 info = pdev->dev.archdata.iommu;
1729 return info->domain;
1733 /* domain is initialized */
1734 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1736 struct dmar_domain *domain, *found = NULL;
1737 struct intel_iommu *iommu;
1738 struct dmar_drhd_unit *drhd;
1739 struct device_domain_info *info, *tmp;
1740 struct pci_dev *dev_tmp;
1741 unsigned long flags;
1742 int bus = 0, devfn = 0;
1746 domain = find_domain(pdev);
1750 segment = pci_domain_nr(pdev->bus);
1752 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1754 if (dev_tmp->is_pcie) {
1755 bus = dev_tmp->subordinate->number;
1758 bus = dev_tmp->bus->number;
1759 devfn = dev_tmp->devfn;
1761 spin_lock_irqsave(&device_domain_lock, flags);
1762 list_for_each_entry(info, &device_domain_list, global) {
1763 if (info->segment == segment &&
1764 info->bus == bus && info->devfn == devfn) {
1765 found = info->domain;
1769 spin_unlock_irqrestore(&device_domain_lock, flags);
1770 /* pcie-pci bridge already has a domain, uses it */
1777 domain = alloc_domain();
1781 /* Allocate new domain for the device */
1782 drhd = dmar_find_matched_drhd_unit(pdev);
1784 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1788 iommu = drhd->iommu;
1790 ret = iommu_attach_domain(domain, iommu);
1792 domain_exit(domain);
1796 if (domain_init(domain, gaw)) {
1797 domain_exit(domain);
1801 /* register pcie-to-pci device */
1803 info = alloc_devinfo_mem();
1805 domain_exit(domain);
1808 info->segment = segment;
1810 info->devfn = devfn;
1812 info->domain = domain;
1813 /* This domain is shared by devices under p2p bridge */
1814 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1816 /* pcie-to-pci bridge already has a domain, uses it */
1818 spin_lock_irqsave(&device_domain_lock, flags);
1819 list_for_each_entry(tmp, &device_domain_list, global) {
1820 if (tmp->segment == segment &&
1821 tmp->bus == bus && tmp->devfn == devfn) {
1822 found = tmp->domain;
1827 free_devinfo_mem(info);
1828 domain_exit(domain);
1831 list_add(&info->link, &domain->devices);
1832 list_add(&info->global, &device_domain_list);
1834 spin_unlock_irqrestore(&device_domain_lock, flags);
1838 info = alloc_devinfo_mem();
1841 info->segment = segment;
1842 info->bus = pdev->bus->number;
1843 info->devfn = pdev->devfn;
1845 info->domain = domain;
1846 spin_lock_irqsave(&device_domain_lock, flags);
1847 /* somebody is fast */
1848 found = find_domain(pdev);
1849 if (found != NULL) {
1850 spin_unlock_irqrestore(&device_domain_lock, flags);
1851 if (found != domain) {
1852 domain_exit(domain);
1855 free_devinfo_mem(info);
1858 list_add(&info->link, &domain->devices);
1859 list_add(&info->global, &device_domain_list);
1860 pdev->dev.archdata.iommu = info;
1861 spin_unlock_irqrestore(&device_domain_lock, flags);
1864 /* recheck it here, maybe others set it */
1865 return find_domain(pdev);
1868 static int iommu_identity_mapping;
1870 static int iommu_domain_identity_map(struct dmar_domain *domain,
1871 unsigned long long start,
1872 unsigned long long end)
1875 unsigned long long base;
1877 /* The address might not be aligned */
1878 base = start & PAGE_MASK;
1880 size = PAGE_ALIGN(size);
1881 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1882 IOVA_PFN(base + size) - 1)) {
1883 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1887 pr_debug("Mapping reserved region %lx@%llx for domain %d\n",
1888 size, base, domain->id);
1890 * RMRR range might have overlap with physical memory range,
1893 dma_pte_clear_range(domain, base, base + size);
1895 return domain_page_mapping(domain, base, base, size,
1896 DMA_PTE_READ|DMA_PTE_WRITE);
1899 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1900 unsigned long long start,
1901 unsigned long long end)
1903 struct dmar_domain *domain;
1907 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1908 pci_name(pdev), start, end);
1910 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1914 ret = iommu_domain_identity_map(domain, start, end);
1918 /* context entry init */
1919 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1926 domain_exit(domain);
1930 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1931 struct pci_dev *pdev)
1933 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1935 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1936 rmrr->end_address + 1);
1939 #ifdef CONFIG_DMAR_FLOPPY_WA
1940 static inline void iommu_prepare_isa(void)
1942 struct pci_dev *pdev;
1945 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1949 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
1950 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1953 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1954 "floppy might not work\n");
1958 static inline void iommu_prepare_isa(void)
1962 #endif /* !CONFIG_DMAR_FLPY_WA */
1964 /* Initialize each context entry as pass through.*/
1965 static int __init init_context_pass_through(void)
1967 struct pci_dev *pdev = NULL;
1968 struct dmar_domain *domain;
1971 for_each_pci_dev(pdev) {
1972 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1973 ret = domain_context_mapping(domain, pdev,
1974 CONTEXT_TT_PASS_THROUGH);
1981 static int md_domain_init(struct dmar_domain *domain, int guest_width);
1983 static int __init si_domain_work_fn(unsigned long start_pfn,
1984 unsigned long end_pfn, void *datax)
1988 *ret = iommu_domain_identity_map(si_domain,
1989 (uint64_t)start_pfn << PAGE_SHIFT,
1990 (uint64_t)end_pfn << PAGE_SHIFT);
1995 static int si_domain_init(void)
1997 struct dmar_drhd_unit *drhd;
1998 struct intel_iommu *iommu;
2001 si_domain = alloc_domain();
2005 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2007 for_each_active_iommu(iommu, drhd) {
2008 ret = iommu_attach_domain(si_domain, iommu);
2010 domain_exit(si_domain);
2015 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2016 domain_exit(si_domain);
2020 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2022 for_each_online_node(nid) {
2023 work_with_active_regions(nid, si_domain_work_fn, &ret);
2031 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2032 struct pci_dev *pdev);
2033 static int identity_mapping(struct pci_dev *pdev)
2035 struct device_domain_info *info;
2037 if (likely(!iommu_identity_mapping))
2041 list_for_each_entry(info, &si_domain->devices, link)
2042 if (info->dev == pdev)
2047 static int domain_add_dev_info(struct dmar_domain *domain,
2048 struct pci_dev *pdev)
2050 struct device_domain_info *info;
2051 unsigned long flags;
2053 info = alloc_devinfo_mem();
2057 info->segment = pci_domain_nr(pdev->bus);
2058 info->bus = pdev->bus->number;
2059 info->devfn = pdev->devfn;
2061 info->domain = domain;
2063 spin_lock_irqsave(&device_domain_lock, flags);
2064 list_add(&info->link, &domain->devices);
2065 list_add(&info->global, &device_domain_list);
2066 pdev->dev.archdata.iommu = info;
2067 spin_unlock_irqrestore(&device_domain_lock, flags);
2072 static int iommu_prepare_static_identity_mapping(void)
2074 struct pci_dev *pdev = NULL;
2077 ret = si_domain_init();
2081 for_each_pci_dev(pdev) {
2082 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2085 ret = domain_context_mapping(si_domain, pdev,
2086 CONTEXT_TT_MULTI_LEVEL);
2089 ret = domain_add_dev_info(si_domain, pdev);
2097 int __init init_dmars(void)
2099 struct dmar_drhd_unit *drhd;
2100 struct dmar_rmrr_unit *rmrr;
2101 struct pci_dev *pdev;
2102 struct intel_iommu *iommu;
2104 int pass_through = 1;
2107 * In case pass through can not be enabled, iommu tries to use identity
2110 if (iommu_pass_through)
2111 iommu_identity_mapping = 1;
2116 * initialize and program root entry to not present
2119 for_each_drhd_unit(drhd) {
2122 * lock not needed as this is only incremented in the single
2123 * threaded kernel __init code path all other access are read
2128 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2131 printk(KERN_ERR "Allocating global iommu array failed\n");
2136 deferred_flush = kzalloc(g_num_of_iommus *
2137 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2138 if (!deferred_flush) {
2144 for_each_drhd_unit(drhd) {
2148 iommu = drhd->iommu;
2149 g_iommus[iommu->seq_id] = iommu;
2151 ret = iommu_init_domains(iommu);
2157 * we could share the same root & context tables
2158 * amoung all IOMMU's. Need to Split it later.
2160 ret = iommu_alloc_root_entry(iommu);
2162 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2165 if (!ecap_pass_through(iommu->ecap))
2168 if (iommu_pass_through)
2169 if (!pass_through) {
2171 "Pass Through is not supported by hardware.\n");
2172 iommu_pass_through = 0;
2176 * Start from the sane iommu hardware state.
2178 for_each_drhd_unit(drhd) {
2182 iommu = drhd->iommu;
2185 * If the queued invalidation is already initialized by us
2186 * (for example, while enabling interrupt-remapping) then
2187 * we got the things already rolling from a sane state.
2193 * Clear any previous faults.
2195 dmar_fault(-1, iommu);
2197 * Disable queued invalidation if supported and already enabled
2198 * before OS handover.
2200 dmar_disable_qi(iommu);
2203 for_each_drhd_unit(drhd) {
2207 iommu = drhd->iommu;
2209 if (dmar_enable_qi(iommu)) {
2211 * Queued Invalidate not enabled, use Register Based
2214 iommu->flush.flush_context = __iommu_flush_context;
2215 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2216 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
2218 (unsigned long long)drhd->reg_base_addr);
2220 iommu->flush.flush_context = qi_flush_context;
2221 iommu->flush.flush_iotlb = qi_flush_iotlb;
2222 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
2224 (unsigned long long)drhd->reg_base_addr);
2229 * If pass through is set and enabled, context entries of all pci
2230 * devices are intialized by pass through translation type.
2232 if (iommu_pass_through) {
2233 ret = init_context_pass_through();
2235 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2236 iommu_pass_through = 0;
2241 * If pass through is not set or not enabled, setup context entries for
2242 * identity mappings for rmrr, gfx, and isa and may fall back to static
2243 * identity mapping if iommu_identity_mapping is set.
2245 if (!iommu_pass_through) {
2246 if (iommu_identity_mapping)
2247 iommu_prepare_static_identity_mapping();
2250 * for each dev attached to rmrr
2252 * locate drhd for dev, alloc domain for dev
2253 * allocate free domain
2254 * allocate page table entries for rmrr
2255 * if context not allocated for bus
2256 * allocate and init context
2257 * set present in root table for this bus
2258 * init context with domain, translation etc
2262 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2263 for_each_rmrr_units(rmrr) {
2264 for (i = 0; i < rmrr->devices_cnt; i++) {
2265 pdev = rmrr->devices[i];
2267 * some BIOS lists non-exist devices in DMAR
2272 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2275 "IOMMU: mapping reserved region failed\n");
2279 iommu_prepare_isa();
2285 * global invalidate context cache
2286 * global invalidate iotlb
2287 * enable translation
2289 for_each_drhd_unit(drhd) {
2292 iommu = drhd->iommu;
2294 iommu_flush_write_buffer(iommu);
2296 ret = dmar_set_interrupt(iommu);
2300 iommu_set_root_entry(iommu);
2302 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2303 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2304 iommu_disable_protect_mem_regions(iommu);
2306 ret = iommu_enable_translation(iommu);
2313 for_each_drhd_unit(drhd) {
2316 iommu = drhd->iommu;
2323 static inline u64 aligned_size(u64 host_addr, size_t size)
2326 addr = (host_addr & (~PAGE_MASK)) + size;
2327 return PAGE_ALIGN(addr);
2331 iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
2335 /* Make sure it's in range */
2336 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
2337 if (!size || (IOVA_START_ADDR + size > end))
2340 piova = alloc_iova(&domain->iovad,
2341 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
2345 static struct iova *
2346 __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
2347 size_t size, u64 dma_mask)
2349 struct pci_dev *pdev = to_pci_dev(dev);
2350 struct iova *iova = NULL;
2352 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
2353 iova = iommu_alloc_iova(domain, size, dma_mask);
2356 * First try to allocate an io virtual address in
2357 * DMA_BIT_MASK(32) and if that fails then try allocating
2360 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
2362 iova = iommu_alloc_iova(domain, size, dma_mask);
2366 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2373 static struct dmar_domain *
2374 get_valid_domain_for_dev(struct pci_dev *pdev)
2376 struct dmar_domain *domain;
2379 domain = get_domain_for_dev(pdev,
2380 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2383 "Allocating domain for %s failed", pci_name(pdev));
2387 /* make sure context mapping is ok */
2388 if (unlikely(!domain_context_mapped(pdev))) {
2389 ret = domain_context_mapping(domain, pdev,
2390 CONTEXT_TT_MULTI_LEVEL);
2393 "Domain context map for %s failed",
2402 static int iommu_dummy(struct pci_dev *pdev)
2404 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2407 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2408 static int iommu_no_mapping(struct pci_dev *pdev)
2412 if (!iommu_identity_mapping)
2413 return iommu_dummy(pdev);
2415 found = identity_mapping(pdev);
2417 if (pdev->dma_mask > DMA_BIT_MASK(32))
2421 * 32 bit DMA is removed from si_domain and fall back
2422 * to non-identity mapping.
2424 domain_remove_one_dev_info(si_domain, pdev);
2425 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2431 * In case of a detached 64 bit DMA device from vm, the device
2432 * is put into si_domain for identity mapping.
2434 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2436 ret = domain_add_dev_info(si_domain, pdev);
2438 printk(KERN_INFO "64bit %s uses identity mapping\n",
2445 return iommu_dummy(pdev);
2448 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2449 size_t size, int dir, u64 dma_mask)
2451 struct pci_dev *pdev = to_pci_dev(hwdev);
2452 struct dmar_domain *domain;
2453 phys_addr_t start_paddr;
2457 struct intel_iommu *iommu;
2459 BUG_ON(dir == DMA_NONE);
2461 if (iommu_no_mapping(pdev))
2464 domain = get_valid_domain_for_dev(pdev);
2468 iommu = domain_get_iommu(domain);
2469 size = aligned_size((u64)paddr, size);
2471 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2475 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2478 * Check if DMAR supports zero-length reads on write only
2481 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2482 !cap_zlr(iommu->cap))
2483 prot |= DMA_PTE_READ;
2484 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2485 prot |= DMA_PTE_WRITE;
2487 * paddr - (paddr + size) might be partial page, we should map the whole
2488 * page. Note: if two part of one page are separately mapped, we
2489 * might have two guest_addr mapping to the same host paddr, but this
2490 * is not a big problem
2492 ret = domain_page_mapping(domain, start_paddr,
2493 ((u64)paddr) & PHYSICAL_PAGE_MASK,
2498 /* it's a non-present to present mapping. Only flush if caching mode */
2499 if (cap_caching_mode(iommu->cap))
2500 iommu_flush_iotlb_psi(iommu, 0, start_paddr,
2501 size >> VTD_PAGE_SHIFT);
2503 iommu_flush_write_buffer(iommu);
2505 return start_paddr + ((u64)paddr & (~PAGE_MASK));
2509 __free_iova(&domain->iovad, iova);
2510 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2511 pci_name(pdev), size, (unsigned long long)paddr, dir);
2515 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2516 unsigned long offset, size_t size,
2517 enum dma_data_direction dir,
2518 struct dma_attrs *attrs)
2520 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2521 dir, to_pci_dev(dev)->dma_mask);
2524 static void flush_unmaps(void)
2530 /* just flush them all */
2531 for (i = 0; i < g_num_of_iommus; i++) {
2532 struct intel_iommu *iommu = g_iommus[i];
2536 if (!deferred_flush[i].next)
2539 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2540 DMA_TLB_GLOBAL_FLUSH);
2541 for (j = 0; j < deferred_flush[i].next; j++) {
2543 struct iova *iova = deferred_flush[i].iova[j];
2545 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2546 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2547 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2548 iova->pfn_lo << PAGE_SHIFT, mask);
2549 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2551 deferred_flush[i].next = 0;
2557 static void flush_unmaps_timeout(unsigned long data)
2559 unsigned long flags;
2561 spin_lock_irqsave(&async_umap_flush_lock, flags);
2563 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2566 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2568 unsigned long flags;
2570 struct intel_iommu *iommu;
2572 spin_lock_irqsave(&async_umap_flush_lock, flags);
2573 if (list_size == HIGH_WATER_MARK)
2576 iommu = domain_get_iommu(dom);
2577 iommu_id = iommu->seq_id;
2579 next = deferred_flush[iommu_id].next;
2580 deferred_flush[iommu_id].domain[next] = dom;
2581 deferred_flush[iommu_id].iova[next] = iova;
2582 deferred_flush[iommu_id].next++;
2585 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2589 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2592 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2593 size_t size, enum dma_data_direction dir,
2594 struct dma_attrs *attrs)
2596 struct pci_dev *pdev = to_pci_dev(dev);
2597 struct dmar_domain *domain;
2598 unsigned long start_addr;
2600 struct intel_iommu *iommu;
2602 if (iommu_no_mapping(pdev))
2605 domain = find_domain(pdev);
2608 iommu = domain_get_iommu(domain);
2610 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2614 start_addr = iova->pfn_lo << PAGE_SHIFT;
2615 size = aligned_size((u64)dev_addr, size);
2617 pr_debug("Device %s unmapping: %zx@%llx\n",
2618 pci_name(pdev), size, (unsigned long long)start_addr);
2620 /* clear the whole page */
2621 dma_pte_clear_range(domain, start_addr, start_addr + size);
2622 /* free page tables */
2623 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2624 if (intel_iommu_strict) {
2625 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2626 size >> VTD_PAGE_SHIFT);
2628 __free_iova(&domain->iovad, iova);
2630 add_unmap(domain, iova);
2632 * queue up the release of the unmap to save the 1/6th of the
2633 * cpu used up by the iotlb flush operation...
2638 static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2641 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2644 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2645 dma_addr_t *dma_handle, gfp_t flags)
2650 size = PAGE_ALIGN(size);
2651 order = get_order(size);
2652 flags &= ~(GFP_DMA | GFP_DMA32);
2654 vaddr = (void *)__get_free_pages(flags, order);
2657 memset(vaddr, 0, size);
2659 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2661 hwdev->coherent_dma_mask);
2664 free_pages((unsigned long)vaddr, order);
2668 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2669 dma_addr_t dma_handle)
2673 size = PAGE_ALIGN(size);
2674 order = get_order(size);
2676 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2677 free_pages((unsigned long)vaddr, order);
2680 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2681 int nelems, enum dma_data_direction dir,
2682 struct dma_attrs *attrs)
2685 struct pci_dev *pdev = to_pci_dev(hwdev);
2686 struct dmar_domain *domain;
2687 unsigned long start_addr;
2691 struct scatterlist *sg;
2692 struct intel_iommu *iommu;
2694 if (iommu_no_mapping(pdev))
2697 domain = find_domain(pdev);
2700 iommu = domain_get_iommu(domain);
2702 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2705 for_each_sg(sglist, sg, nelems, i) {
2706 addr = page_to_phys(sg_page(sg)) + sg->offset;
2707 size += aligned_size((u64)addr, sg->length);
2710 start_addr = iova->pfn_lo << PAGE_SHIFT;
2712 /* clear the whole page */
2713 dma_pte_clear_range(domain, start_addr, start_addr + size);
2714 /* free page tables */
2715 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2717 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2718 size >> VTD_PAGE_SHIFT);
2721 __free_iova(&domain->iovad, iova);
2724 static int intel_nontranslate_map_sg(struct device *hddev,
2725 struct scatterlist *sglist, int nelems, int dir)
2728 struct scatterlist *sg;
2730 for_each_sg(sglist, sg, nelems, i) {
2731 BUG_ON(!sg_page(sg));
2732 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2733 sg->dma_length = sg->length;
2738 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2739 enum dma_data_direction dir, struct dma_attrs *attrs)
2743 struct pci_dev *pdev = to_pci_dev(hwdev);
2744 struct dmar_domain *domain;
2748 struct iova *iova = NULL;
2750 struct scatterlist *sg;
2751 unsigned long start_addr;
2752 struct intel_iommu *iommu;
2754 BUG_ON(dir == DMA_NONE);
2755 if (iommu_no_mapping(pdev))
2756 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2758 domain = get_valid_domain_for_dev(pdev);
2762 iommu = domain_get_iommu(domain);
2764 for_each_sg(sglist, sg, nelems, i) {
2765 addr = page_to_phys(sg_page(sg)) + sg->offset;
2766 size += aligned_size((u64)addr, sg->length);
2769 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2771 sglist->dma_length = 0;
2776 * Check if DMAR supports zero-length reads on write only
2779 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2780 !cap_zlr(iommu->cap))
2781 prot |= DMA_PTE_READ;
2782 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2783 prot |= DMA_PTE_WRITE;
2785 start_addr = iova->pfn_lo << PAGE_SHIFT;
2787 for_each_sg(sglist, sg, nelems, i) {
2788 addr = page_to_phys(sg_page(sg)) + sg->offset;
2789 size = aligned_size((u64)addr, sg->length);
2790 ret = domain_page_mapping(domain, start_addr + offset,
2791 ((u64)addr) & PHYSICAL_PAGE_MASK,
2794 /* clear the page */
2795 dma_pte_clear_range(domain, start_addr,
2796 start_addr + offset);
2797 /* free page tables */
2798 dma_pte_free_pagetable(domain, start_addr,
2799 start_addr + offset);
2801 __free_iova(&domain->iovad, iova);
2804 sg->dma_address = start_addr + offset +
2805 ((u64)addr & (~PAGE_MASK));
2806 sg->dma_length = sg->length;
2810 /* it's a non-present to present mapping. Only flush if caching mode */
2811 if (cap_caching_mode(iommu->cap))
2812 iommu_flush_iotlb_psi(iommu, 0, start_addr,
2813 offset >> VTD_PAGE_SHIFT);
2815 iommu_flush_write_buffer(iommu);
2820 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2825 struct dma_map_ops intel_dma_ops = {
2826 .alloc_coherent = intel_alloc_coherent,
2827 .free_coherent = intel_free_coherent,
2828 .map_sg = intel_map_sg,
2829 .unmap_sg = intel_unmap_sg,
2830 .map_page = intel_map_page,
2831 .unmap_page = intel_unmap_page,
2832 .mapping_error = intel_mapping_error,
2835 static inline int iommu_domain_cache_init(void)
2839 iommu_domain_cache = kmem_cache_create("iommu_domain",
2840 sizeof(struct dmar_domain),
2845 if (!iommu_domain_cache) {
2846 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2853 static inline int iommu_devinfo_cache_init(void)
2857 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2858 sizeof(struct device_domain_info),
2862 if (!iommu_devinfo_cache) {
2863 printk(KERN_ERR "Couldn't create devinfo cache\n");
2870 static inline int iommu_iova_cache_init(void)
2874 iommu_iova_cache = kmem_cache_create("iommu_iova",
2875 sizeof(struct iova),
2879 if (!iommu_iova_cache) {
2880 printk(KERN_ERR "Couldn't create iova cache\n");
2887 static int __init iommu_init_mempool(void)
2890 ret = iommu_iova_cache_init();
2894 ret = iommu_domain_cache_init();
2898 ret = iommu_devinfo_cache_init();
2902 kmem_cache_destroy(iommu_domain_cache);
2904 kmem_cache_destroy(iommu_iova_cache);
2909 static void __init iommu_exit_mempool(void)
2911 kmem_cache_destroy(iommu_devinfo_cache);
2912 kmem_cache_destroy(iommu_domain_cache);
2913 kmem_cache_destroy(iommu_iova_cache);
2917 static void __init init_no_remapping_devices(void)
2919 struct dmar_drhd_unit *drhd;
2921 for_each_drhd_unit(drhd) {
2922 if (!drhd->include_all) {
2924 for (i = 0; i < drhd->devices_cnt; i++)
2925 if (drhd->devices[i] != NULL)
2927 /* ignore DMAR unit if no pci devices exist */
2928 if (i == drhd->devices_cnt)
2936 for_each_drhd_unit(drhd) {
2938 if (drhd->ignored || drhd->include_all)
2941 for (i = 0; i < drhd->devices_cnt; i++)
2942 if (drhd->devices[i] &&
2943 !IS_GFX_DEVICE(drhd->devices[i]))
2946 if (i < drhd->devices_cnt)
2949 /* bypass IOMMU if it is just for gfx devices */
2951 for (i = 0; i < drhd->devices_cnt; i++) {
2952 if (!drhd->devices[i])
2954 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
2959 #ifdef CONFIG_SUSPEND
2960 static int init_iommu_hw(void)
2962 struct dmar_drhd_unit *drhd;
2963 struct intel_iommu *iommu = NULL;
2965 for_each_active_iommu(iommu, drhd)
2967 dmar_reenable_qi(iommu);
2969 for_each_active_iommu(iommu, drhd) {
2970 iommu_flush_write_buffer(iommu);
2972 iommu_set_root_entry(iommu);
2974 iommu->flush.flush_context(iommu, 0, 0, 0,
2975 DMA_CCMD_GLOBAL_INVL);
2976 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2977 DMA_TLB_GLOBAL_FLUSH);
2978 iommu_disable_protect_mem_regions(iommu);
2979 iommu_enable_translation(iommu);
2985 static void iommu_flush_all(void)
2987 struct dmar_drhd_unit *drhd;
2988 struct intel_iommu *iommu;
2990 for_each_active_iommu(iommu, drhd) {
2991 iommu->flush.flush_context(iommu, 0, 0, 0,
2992 DMA_CCMD_GLOBAL_INVL);
2993 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2994 DMA_TLB_GLOBAL_FLUSH);
2998 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3000 struct dmar_drhd_unit *drhd;
3001 struct intel_iommu *iommu = NULL;
3004 for_each_active_iommu(iommu, drhd) {
3005 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3007 if (!iommu->iommu_state)
3013 for_each_active_iommu(iommu, drhd) {
3014 iommu_disable_translation(iommu);
3016 spin_lock_irqsave(&iommu->register_lock, flag);
3018 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3019 readl(iommu->reg + DMAR_FECTL_REG);
3020 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3021 readl(iommu->reg + DMAR_FEDATA_REG);
3022 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3023 readl(iommu->reg + DMAR_FEADDR_REG);
3024 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3025 readl(iommu->reg + DMAR_FEUADDR_REG);
3027 spin_unlock_irqrestore(&iommu->register_lock, flag);
3032 for_each_active_iommu(iommu, drhd)
3033 kfree(iommu->iommu_state);
3038 static int iommu_resume(struct sys_device *dev)
3040 struct dmar_drhd_unit *drhd;
3041 struct intel_iommu *iommu = NULL;
3044 if (init_iommu_hw()) {
3045 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3049 for_each_active_iommu(iommu, drhd) {
3051 spin_lock_irqsave(&iommu->register_lock, flag);
3053 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3054 iommu->reg + DMAR_FECTL_REG);
3055 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3056 iommu->reg + DMAR_FEDATA_REG);
3057 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3058 iommu->reg + DMAR_FEADDR_REG);
3059 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3060 iommu->reg + DMAR_FEUADDR_REG);
3062 spin_unlock_irqrestore(&iommu->register_lock, flag);
3065 for_each_active_iommu(iommu, drhd)
3066 kfree(iommu->iommu_state);
3071 static struct sysdev_class iommu_sysclass = {
3073 .resume = iommu_resume,
3074 .suspend = iommu_suspend,
3077 static struct sys_device device_iommu = {
3078 .cls = &iommu_sysclass,
3081 static int __init init_iommu_sysfs(void)
3085 error = sysdev_class_register(&iommu_sysclass);
3089 error = sysdev_register(&device_iommu);
3091 sysdev_class_unregister(&iommu_sysclass);
3097 static int __init init_iommu_sysfs(void)
3101 #endif /* CONFIG_PM */
3103 int __init intel_iommu_init(void)
3107 if (dmar_table_init())
3110 if (dmar_dev_scope_init())
3114 * Check the need for DMA-remapping initialization now.
3115 * Above initialization will also be used by Interrupt-remapping.
3117 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
3120 iommu_init_mempool();
3121 dmar_init_reserved_ranges();
3123 init_no_remapping_devices();
3127 printk(KERN_ERR "IOMMU: dmar init failed\n");
3128 put_iova_domain(&reserved_iova_list);
3129 iommu_exit_mempool();
3133 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3135 init_timer(&unmap_timer);
3138 if (!iommu_pass_through) {
3140 "Multi-level page-table translation for DMAR.\n");
3141 dma_ops = &intel_dma_ops;
3144 "DMAR: Pass through translation for DMAR.\n");
3148 register_iommu(&intel_iommu_ops);
3153 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3154 struct pci_dev *pdev)
3156 struct pci_dev *tmp, *parent;
3158 if (!iommu || !pdev)
3161 /* dependent device detach */
3162 tmp = pci_find_upstream_pcie_bridge(pdev);
3163 /* Secondary interface's bus number and devfn 0 */
3165 parent = pdev->bus->self;
3166 while (parent != tmp) {
3167 iommu_detach_dev(iommu, parent->bus->number,
3169 parent = parent->bus->self;
3171 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3172 iommu_detach_dev(iommu,
3173 tmp->subordinate->number, 0);
3174 else /* this is a legacy PCI bridge */
3175 iommu_detach_dev(iommu, tmp->bus->number,
3180 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3181 struct pci_dev *pdev)
3183 struct device_domain_info *info;
3184 struct intel_iommu *iommu;
3185 unsigned long flags;
3187 struct list_head *entry, *tmp;
3189 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3194 spin_lock_irqsave(&device_domain_lock, flags);
3195 list_for_each_safe(entry, tmp, &domain->devices) {
3196 info = list_entry(entry, struct device_domain_info, link);
3197 /* No need to compare PCI domain; it has to be the same */
3198 if (info->bus == pdev->bus->number &&
3199 info->devfn == pdev->devfn) {
3200 list_del(&info->link);
3201 list_del(&info->global);
3203 info->dev->dev.archdata.iommu = NULL;
3204 spin_unlock_irqrestore(&device_domain_lock, flags);
3206 iommu_disable_dev_iotlb(info);
3207 iommu_detach_dev(iommu, info->bus, info->devfn);
3208 iommu_detach_dependent_devices(iommu, pdev);
3209 free_devinfo_mem(info);
3211 spin_lock_irqsave(&device_domain_lock, flags);
3219 /* if there is no other devices under the same iommu
3220 * owned by this domain, clear this iommu in iommu_bmp
3221 * update iommu count and coherency
3223 if (iommu == device_to_iommu(info->segment, info->bus,
3229 unsigned long tmp_flags;
3230 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3231 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3232 domain->iommu_count--;
3233 domain_update_iommu_cap(domain);
3234 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3237 spin_unlock_irqrestore(&device_domain_lock, flags);
3240 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3242 struct device_domain_info *info;
3243 struct intel_iommu *iommu;
3244 unsigned long flags1, flags2;
3246 spin_lock_irqsave(&device_domain_lock, flags1);
3247 while (!list_empty(&domain->devices)) {
3248 info = list_entry(domain->devices.next,
3249 struct device_domain_info, link);
3250 list_del(&info->link);
3251 list_del(&info->global);
3253 info->dev->dev.archdata.iommu = NULL;
3255 spin_unlock_irqrestore(&device_domain_lock, flags1);
3257 iommu_disable_dev_iotlb(info);
3258 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3259 iommu_detach_dev(iommu, info->bus, info->devfn);
3260 iommu_detach_dependent_devices(iommu, info->dev);
3262 /* clear this iommu in iommu_bmp, update iommu count
3265 spin_lock_irqsave(&domain->iommu_lock, flags2);
3266 if (test_and_clear_bit(iommu->seq_id,
3267 &domain->iommu_bmp)) {
3268 domain->iommu_count--;
3269 domain_update_iommu_cap(domain);
3271 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3273 free_devinfo_mem(info);
3274 spin_lock_irqsave(&device_domain_lock, flags1);
3276 spin_unlock_irqrestore(&device_domain_lock, flags1);
3279 /* domain id for virtual machine, it won't be set in context */
3280 static unsigned long vm_domid;
3282 static int vm_domain_min_agaw(struct dmar_domain *domain)
3285 int min_agaw = domain->agaw;
3287 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3288 for (; i < g_num_of_iommus; ) {
3289 if (min_agaw > g_iommus[i]->agaw)
3290 min_agaw = g_iommus[i]->agaw;
3292 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3298 static struct dmar_domain *iommu_alloc_vm_domain(void)
3300 struct dmar_domain *domain;
3302 domain = alloc_domain_mem();
3306 domain->id = vm_domid++;
3307 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3308 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3313 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3317 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3318 spin_lock_init(&domain->mapping_lock);
3319 spin_lock_init(&domain->iommu_lock);
3321 domain_reserve_special_ranges(domain);
3323 /* calculate AGAW */
3324 domain->gaw = guest_width;
3325 adjust_width = guestwidth_to_adjustwidth(guest_width);
3326 domain->agaw = width_to_agaw(adjust_width);
3328 INIT_LIST_HEAD(&domain->devices);
3330 domain->iommu_count = 0;
3331 domain->iommu_coherency = 0;
3332 domain->max_addr = 0;
3334 /* always allocate the top pgd */
3335 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3338 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3342 static void iommu_free_vm_domain(struct dmar_domain *domain)
3344 unsigned long flags;
3345 struct dmar_drhd_unit *drhd;
3346 struct intel_iommu *iommu;
3348 unsigned long ndomains;
3350 for_each_drhd_unit(drhd) {
3353 iommu = drhd->iommu;
3355 ndomains = cap_ndoms(iommu->cap);
3356 i = find_first_bit(iommu->domain_ids, ndomains);
3357 for (; i < ndomains; ) {
3358 if (iommu->domains[i] == domain) {
3359 spin_lock_irqsave(&iommu->lock, flags);
3360 clear_bit(i, iommu->domain_ids);
3361 iommu->domains[i] = NULL;
3362 spin_unlock_irqrestore(&iommu->lock, flags);
3365 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3370 static void vm_domain_exit(struct dmar_domain *domain)
3374 /* Domain 0 is reserved, so dont process it */
3378 vm_domain_remove_all_dev_info(domain);
3380 put_iova_domain(&domain->iovad);
3381 end = DOMAIN_MAX_ADDR(domain->gaw);
3382 end = end & (~VTD_PAGE_MASK);
3385 dma_pte_clear_range(domain, 0, end);
3387 /* free page tables */
3388 dma_pte_free_pagetable(domain, 0, end);
3390 iommu_free_vm_domain(domain);
3391 free_domain_mem(domain);
3394 static int intel_iommu_domain_init(struct iommu_domain *domain)
3396 struct dmar_domain *dmar_domain;
3398 dmar_domain = iommu_alloc_vm_domain();
3401 "intel_iommu_domain_init: dmar_domain == NULL\n");
3404 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3406 "intel_iommu_domain_init() failed\n");
3407 vm_domain_exit(dmar_domain);
3410 domain->priv = dmar_domain;
3415 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3417 struct dmar_domain *dmar_domain = domain->priv;
3419 domain->priv = NULL;
3420 vm_domain_exit(dmar_domain);
3423 static int intel_iommu_attach_device(struct iommu_domain *domain,
3426 struct dmar_domain *dmar_domain = domain->priv;
3427 struct pci_dev *pdev = to_pci_dev(dev);
3428 struct intel_iommu *iommu;
3433 /* normally pdev is not mapped */
3434 if (unlikely(domain_context_mapped(pdev))) {
3435 struct dmar_domain *old_domain;
3437 old_domain = find_domain(pdev);
3439 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3440 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3441 domain_remove_one_dev_info(old_domain, pdev);
3443 domain_remove_dev_info(old_domain);
3447 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3452 /* check if this iommu agaw is sufficient for max mapped address */
3453 addr_width = agaw_to_width(iommu->agaw);
3454 end = DOMAIN_MAX_ADDR(addr_width);
3455 end = end & VTD_PAGE_MASK;
3456 if (end < dmar_domain->max_addr) {
3457 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3458 "sufficient for the mapped address (%llx)\n",
3459 __func__, iommu->agaw, dmar_domain->max_addr);
3463 ret = domain_add_dev_info(dmar_domain, pdev);
3467 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3471 static void intel_iommu_detach_device(struct iommu_domain *domain,
3474 struct dmar_domain *dmar_domain = domain->priv;
3475 struct pci_dev *pdev = to_pci_dev(dev);
3477 domain_remove_one_dev_info(dmar_domain, pdev);
3480 static int intel_iommu_map_range(struct iommu_domain *domain,
3481 unsigned long iova, phys_addr_t hpa,
3482 size_t size, int iommu_prot)
3484 struct dmar_domain *dmar_domain = domain->priv;
3490 if (iommu_prot & IOMMU_READ)
3491 prot |= DMA_PTE_READ;
3492 if (iommu_prot & IOMMU_WRITE)
3493 prot |= DMA_PTE_WRITE;
3494 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3495 prot |= DMA_PTE_SNP;
3497 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
3498 if (dmar_domain->max_addr < max_addr) {
3502 /* check if minimum agaw is sufficient for mapped address */
3503 min_agaw = vm_domain_min_agaw(dmar_domain);
3504 addr_width = agaw_to_width(min_agaw);
3505 end = DOMAIN_MAX_ADDR(addr_width);
3506 end = end & VTD_PAGE_MASK;
3507 if (end < max_addr) {
3508 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3509 "sufficient for the mapped address (%llx)\n",
3510 __func__, min_agaw, max_addr);
3513 dmar_domain->max_addr = max_addr;
3516 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
3520 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3521 unsigned long iova, size_t size)
3523 struct dmar_domain *dmar_domain = domain->priv;
3526 /* The address might not be aligned */
3527 base = iova & VTD_PAGE_MASK;
3528 size = VTD_PAGE_ALIGN(size);
3529 dma_pte_clear_range(dmar_domain, base, base + size);
3531 if (dmar_domain->max_addr == base + size)
3532 dmar_domain->max_addr = base;
3535 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3538 struct dmar_domain *dmar_domain = domain->priv;
3539 struct dma_pte *pte;
3542 pte = addr_to_dma_pte(dmar_domain, iova);
3544 phys = dma_pte_addr(pte);
3549 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3552 struct dmar_domain *dmar_domain = domain->priv;
3554 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3555 return dmar_domain->iommu_snooping;
3560 static struct iommu_ops intel_iommu_ops = {
3561 .domain_init = intel_iommu_domain_init,
3562 .domain_destroy = intel_iommu_domain_destroy,
3563 .attach_dev = intel_iommu_attach_device,
3564 .detach_dev = intel_iommu_detach_device,
3565 .map = intel_iommu_map_range,
3566 .unmap = intel_iommu_unmap_range,
3567 .iova_to_phys = intel_iommu_iova_to_phys,
3568 .domain_has_cap = intel_iommu_domain_has_cap,
3571 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3574 * Mobile 4 Series Chipset neglects to set RWBF capability,
3577 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3581 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);