2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59 #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
61 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
63 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
66 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67 are never going to work. */
68 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
70 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
73 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
75 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
77 static inline unsigned long page_to_dma_pfn(struct page *pg)
79 return mm_to_dma_pfn(page_to_pfn(pg));
81 static inline unsigned long virt_to_dma_pfn(void *p)
83 return page_to_dma_pfn(virt_to_page(p));
86 /* global iommu list, set NULL for ignored DMAR units */
87 static struct intel_iommu **g_iommus;
89 static int rwbf_quirk;
94 * 12-63: Context Ptr (12 - (haw-1))
101 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102 static inline bool root_present(struct root_entry *root)
104 return (root->val & 1);
106 static inline void set_root_present(struct root_entry *root)
110 static inline void set_root_value(struct root_entry *root, unsigned long value)
112 root->val |= value & VTD_PAGE_MASK;
115 static inline struct context_entry *
116 get_context_addr_from_root(struct root_entry *root)
118 return (struct context_entry *)
119 (root_present(root)?phys_to_virt(
120 root->val & VTD_PAGE_MASK) :
127 * 1: fault processing disable
128 * 2-3: translation type
129 * 12-63: address space root
135 struct context_entry {
140 static inline bool context_present(struct context_entry *context)
142 return (context->lo & 1);
144 static inline void context_set_present(struct context_entry *context)
149 static inline void context_set_fault_enable(struct context_entry *context)
151 context->lo &= (((u64)-1) << 2) | 1;
154 static inline void context_set_translation_type(struct context_entry *context,
157 context->lo &= (((u64)-1) << 4) | 3;
158 context->lo |= (value & 3) << 2;
161 static inline void context_set_address_root(struct context_entry *context,
164 context->lo |= value & VTD_PAGE_MASK;
167 static inline void context_set_address_width(struct context_entry *context,
170 context->hi |= value & 7;
173 static inline void context_set_domain_id(struct context_entry *context,
176 context->hi |= (value & ((1 << 16) - 1)) << 8;
179 static inline void context_clear_entry(struct context_entry *context)
192 * 12-63: Host physcial address
198 static inline void dma_clear_pte(struct dma_pte *pte)
203 static inline void dma_set_pte_readable(struct dma_pte *pte)
205 pte->val |= DMA_PTE_READ;
208 static inline void dma_set_pte_writable(struct dma_pte *pte)
210 pte->val |= DMA_PTE_WRITE;
213 static inline void dma_set_pte_snp(struct dma_pte *pte)
215 pte->val |= DMA_PTE_SNP;
218 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
220 pte->val = (pte->val & ~3) | (prot & 3);
223 static inline u64 dma_pte_addr(struct dma_pte *pte)
225 return (pte->val & VTD_PAGE_MASK);
228 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
230 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
233 static inline bool dma_pte_present(struct dma_pte *pte)
235 return (pte->val & 3) != 0;
239 * This domain is a statically identity mapping domain.
240 * 1. This domain creats a static 1:1 mapping to all usable memory.
241 * 2. It maps to each iommu if successful.
242 * 3. Each iommu mapps to this domain if successful.
244 struct dmar_domain *si_domain;
246 /* devices under the same p2p bridge are owned in one domain */
247 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
249 /* domain represents a virtual machine, more than one devices
250 * across iommus may be owned in one domain, e.g. kvm guest.
252 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
254 /* si_domain contains mulitple devices */
255 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
258 int id; /* domain id */
259 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
261 struct list_head devices; /* all devices' list */
262 struct iova_domain iovad; /* iova's that belong to this domain */
264 struct dma_pte *pgd; /* virtual address */
265 spinlock_t mapping_lock; /* page table lock */
266 int gaw; /* max guest address width */
268 /* adjusted guest address width, 0 is level 2 30-bit */
271 int flags; /* flags to find out type of domain */
273 int iommu_coherency;/* indicate coherency of iommu access */
274 int iommu_snooping; /* indicate snooping control feature*/
275 int iommu_count; /* reference count of iommu */
276 spinlock_t iommu_lock; /* protect iommu set in domain */
277 u64 max_addr; /* maximum mapped address */
280 /* PCI domain-device relationship */
281 struct device_domain_info {
282 struct list_head link; /* link to domain siblings */
283 struct list_head global; /* link to global list */
284 int segment; /* PCI domain */
285 u8 bus; /* PCI bus number */
286 u8 devfn; /* PCI devfn number */
287 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
288 struct intel_iommu *iommu; /* IOMMU used by this device */
289 struct dmar_domain *domain; /* pointer to domain */
292 static void flush_unmaps_timeout(unsigned long data);
294 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
296 #define HIGH_WATER_MARK 250
297 struct deferred_flush_tables {
299 struct iova *iova[HIGH_WATER_MARK];
300 struct dmar_domain *domain[HIGH_WATER_MARK];
303 static struct deferred_flush_tables *deferred_flush;
305 /* bitmap for indexing intel_iommus */
306 static int g_num_of_iommus;
308 static DEFINE_SPINLOCK(async_umap_flush_lock);
309 static LIST_HEAD(unmaps_to_do);
312 static long list_size;
314 static void domain_remove_dev_info(struct dmar_domain *domain);
316 #ifdef CONFIG_DMAR_DEFAULT_ON
317 int dmar_disabled = 0;
319 int dmar_disabled = 1;
320 #endif /*CONFIG_DMAR_DEFAULT_ON*/
322 static int __initdata dmar_map_gfx = 1;
323 static int dmar_forcedac;
324 static int intel_iommu_strict;
326 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
327 static DEFINE_SPINLOCK(device_domain_lock);
328 static LIST_HEAD(device_domain_list);
330 static struct iommu_ops intel_iommu_ops;
332 static int __init intel_iommu_setup(char *str)
337 if (!strncmp(str, "on", 2)) {
339 printk(KERN_INFO "Intel-IOMMU: enabled\n");
340 } else if (!strncmp(str, "off", 3)) {
342 printk(KERN_INFO "Intel-IOMMU: disabled\n");
343 } else if (!strncmp(str, "igfx_off", 8)) {
346 "Intel-IOMMU: disable GFX device mapping\n");
347 } else if (!strncmp(str, "forcedac", 8)) {
349 "Intel-IOMMU: Forcing DAC for PCI devices\n");
351 } else if (!strncmp(str, "strict", 6)) {
353 "Intel-IOMMU: disable batched IOTLB flush\n");
354 intel_iommu_strict = 1;
357 str += strcspn(str, ",");
363 __setup("intel_iommu=", intel_iommu_setup);
365 static struct kmem_cache *iommu_domain_cache;
366 static struct kmem_cache *iommu_devinfo_cache;
367 static struct kmem_cache *iommu_iova_cache;
369 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
374 /* trying to avoid low memory issues */
375 flags = current->flags & PF_MEMALLOC;
376 current->flags |= PF_MEMALLOC;
377 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
378 current->flags &= (~PF_MEMALLOC | flags);
383 static inline void *alloc_pgtable_page(void)
388 /* trying to avoid low memory issues */
389 flags = current->flags & PF_MEMALLOC;
390 current->flags |= PF_MEMALLOC;
391 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
392 current->flags &= (~PF_MEMALLOC | flags);
396 static inline void free_pgtable_page(void *vaddr)
398 free_page((unsigned long)vaddr);
401 static inline void *alloc_domain_mem(void)
403 return iommu_kmem_cache_alloc(iommu_domain_cache);
406 static void free_domain_mem(void *vaddr)
408 kmem_cache_free(iommu_domain_cache, vaddr);
411 static inline void * alloc_devinfo_mem(void)
413 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
416 static inline void free_devinfo_mem(void *vaddr)
418 kmem_cache_free(iommu_devinfo_cache, vaddr);
421 struct iova *alloc_iova_mem(void)
423 return iommu_kmem_cache_alloc(iommu_iova_cache);
426 void free_iova_mem(struct iova *iova)
428 kmem_cache_free(iommu_iova_cache, iova);
432 static inline int width_to_agaw(int width);
434 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
439 sagaw = cap_sagaw(iommu->cap);
440 for (agaw = width_to_agaw(max_gaw);
442 if (test_bit(agaw, &sagaw))
450 * Calculate max SAGAW for each iommu.
452 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
454 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
458 * calculate agaw for each iommu.
459 * "SAGAW" may be different across iommus, use a default agaw, and
460 * get a supported less agaw for iommus that don't support the default agaw.
462 int iommu_calculate_agaw(struct intel_iommu *iommu)
464 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
467 /* This functionin only returns single iommu in a domain */
468 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
472 /* si_domain and vm domain should not get here. */
473 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
474 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
476 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
477 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
480 return g_iommus[iommu_id];
483 static void domain_update_iommu_coherency(struct dmar_domain *domain)
487 domain->iommu_coherency = 1;
489 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
490 for (; i < g_num_of_iommus; ) {
491 if (!ecap_coherent(g_iommus[i]->ecap)) {
492 domain->iommu_coherency = 0;
495 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
499 static void domain_update_iommu_snooping(struct dmar_domain *domain)
503 domain->iommu_snooping = 1;
505 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
506 for (; i < g_num_of_iommus; ) {
507 if (!ecap_sc_support(g_iommus[i]->ecap)) {
508 domain->iommu_snooping = 0;
511 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
515 /* Some capabilities may be different across iommus */
516 static void domain_update_iommu_cap(struct dmar_domain *domain)
518 domain_update_iommu_coherency(domain);
519 domain_update_iommu_snooping(domain);
522 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
524 struct dmar_drhd_unit *drhd = NULL;
527 for_each_drhd_unit(drhd) {
530 if (segment != drhd->segment)
533 for (i = 0; i < drhd->devices_cnt; i++) {
534 if (drhd->devices[i] &&
535 drhd->devices[i]->bus->number == bus &&
536 drhd->devices[i]->devfn == devfn)
538 if (drhd->devices[i] &&
539 drhd->devices[i]->subordinate &&
540 drhd->devices[i]->subordinate->number <= bus &&
541 drhd->devices[i]->subordinate->subordinate >= bus)
545 if (drhd->include_all)
552 static void domain_flush_cache(struct dmar_domain *domain,
553 void *addr, int size)
555 if (!domain->iommu_coherency)
556 clflush_cache_range(addr, size);
559 /* Gets context entry for a given bus and devfn */
560 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
563 struct root_entry *root;
564 struct context_entry *context;
565 unsigned long phy_addr;
568 spin_lock_irqsave(&iommu->lock, flags);
569 root = &iommu->root_entry[bus];
570 context = get_context_addr_from_root(root);
572 context = (struct context_entry *)alloc_pgtable_page();
574 spin_unlock_irqrestore(&iommu->lock, flags);
577 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
578 phy_addr = virt_to_phys((void *)context);
579 set_root_value(root, phy_addr);
580 set_root_present(root);
581 __iommu_flush_cache(iommu, root, sizeof(*root));
583 spin_unlock_irqrestore(&iommu->lock, flags);
584 return &context[devfn];
587 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
589 struct root_entry *root;
590 struct context_entry *context;
594 spin_lock_irqsave(&iommu->lock, flags);
595 root = &iommu->root_entry[bus];
596 context = get_context_addr_from_root(root);
601 ret = context_present(&context[devfn]);
603 spin_unlock_irqrestore(&iommu->lock, flags);
607 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
609 struct root_entry *root;
610 struct context_entry *context;
613 spin_lock_irqsave(&iommu->lock, flags);
614 root = &iommu->root_entry[bus];
615 context = get_context_addr_from_root(root);
617 context_clear_entry(&context[devfn]);
618 __iommu_flush_cache(iommu, &context[devfn], \
621 spin_unlock_irqrestore(&iommu->lock, flags);
624 static void free_context_table(struct intel_iommu *iommu)
626 struct root_entry *root;
629 struct context_entry *context;
631 spin_lock_irqsave(&iommu->lock, flags);
632 if (!iommu->root_entry) {
635 for (i = 0; i < ROOT_ENTRY_NR; i++) {
636 root = &iommu->root_entry[i];
637 context = get_context_addr_from_root(root);
639 free_pgtable_page(context);
641 free_pgtable_page(iommu->root_entry);
642 iommu->root_entry = NULL;
644 spin_unlock_irqrestore(&iommu->lock, flags);
647 /* page table handling */
648 #define LEVEL_STRIDE (9)
649 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
651 static inline int agaw_to_level(int agaw)
656 static inline int agaw_to_width(int agaw)
658 return 30 + agaw * LEVEL_STRIDE;
662 static inline int width_to_agaw(int width)
664 return (width - 30) / LEVEL_STRIDE;
667 static inline unsigned int level_to_offset_bits(int level)
669 return (level - 1) * LEVEL_STRIDE;
672 static inline int pfn_level_offset(unsigned long pfn, int level)
674 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
677 static inline unsigned long level_mask(int level)
679 return -1UL << level_to_offset_bits(level);
682 static inline unsigned long level_size(int level)
684 return 1UL << level_to_offset_bits(level);
687 static inline unsigned long align_to_level(unsigned long pfn, int level)
689 return (pfn + level_size(level) - 1) & level_mask(level);
692 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
695 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
696 struct dma_pte *parent, *pte = NULL;
697 int level = agaw_to_level(domain->agaw);
701 BUG_ON(!domain->pgd);
702 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
703 parent = domain->pgd;
705 spin_lock_irqsave(&domain->mapping_lock, flags);
709 offset = pfn_level_offset(pfn, level);
710 pte = &parent[offset];
714 if (!dma_pte_present(pte)) {
715 tmp_page = alloc_pgtable_page();
718 spin_unlock_irqrestore(&domain->mapping_lock,
722 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
723 dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
725 * high level table always sets r/w, last level page
726 * table control read/write
728 dma_set_pte_readable(pte);
729 dma_set_pte_writable(pte);
730 domain_flush_cache(domain, pte, sizeof(*pte));
732 parent = phys_to_virt(dma_pte_addr(pte));
736 spin_unlock_irqrestore(&domain->mapping_lock, flags);
740 /* return address's pte at specific level */
741 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
745 struct dma_pte *parent, *pte = NULL;
746 int total = agaw_to_level(domain->agaw);
749 parent = domain->pgd;
750 while (level <= total) {
751 offset = pfn_level_offset(pfn, total);
752 pte = &parent[offset];
756 if (!dma_pte_present(pte))
758 parent = phys_to_virt(dma_pte_addr(pte));
764 /* clear one page's page table */
765 static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn)
767 struct dma_pte *pte = NULL;
769 /* get last level pte */
770 pte = dma_pfn_level_pte(domain, pfn, 1);
774 domain_flush_cache(domain, pte, sizeof(*pte));
778 /* clear last level pte, a tlb flush should be followed */
779 static void dma_pte_clear_range(struct dmar_domain *domain,
780 unsigned long start_pfn,
781 unsigned long last_pfn)
783 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
785 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
786 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
788 /* we don't need lock here; nobody else touches the iova range */
789 while (start_pfn <= last_pfn) {
790 dma_pte_clear_one(domain, start_pfn);
795 /* free page table pages. last level pte should already be cleared */
796 static void dma_pte_free_pagetable(struct dmar_domain *domain,
797 unsigned long start_pfn,
798 unsigned long last_pfn)
800 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
802 int total = agaw_to_level(domain->agaw);
806 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
807 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
809 /* we don't need lock here, nobody else touches the iova range */
811 while (level <= total) {
812 tmp = align_to_level(start_pfn, level);
814 /* Only clear this pte/pmd if we're asked to clear its
816 if (tmp + level_size(level) - 1 > last_pfn)
819 while (tmp <= last_pfn) {
820 pte = dma_pfn_level_pte(domain, tmp, level);
823 phys_to_virt(dma_pte_addr(pte)));
825 domain_flush_cache(domain, pte, sizeof(*pte));
827 tmp += level_size(level);
832 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
833 free_pgtable_page(domain->pgd);
839 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
841 struct root_entry *root;
844 root = (struct root_entry *)alloc_pgtable_page();
848 __iommu_flush_cache(iommu, root, ROOT_SIZE);
850 spin_lock_irqsave(&iommu->lock, flags);
851 iommu->root_entry = root;
852 spin_unlock_irqrestore(&iommu->lock, flags);
857 static void iommu_set_root_entry(struct intel_iommu *iommu)
863 addr = iommu->root_entry;
865 spin_lock_irqsave(&iommu->register_lock, flag);
866 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
868 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
870 /* Make sure hardware complete it */
871 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
872 readl, (sts & DMA_GSTS_RTPS), sts);
874 spin_unlock_irqrestore(&iommu->register_lock, flag);
877 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
882 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
885 spin_lock_irqsave(&iommu->register_lock, flag);
886 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
888 /* Make sure hardware complete it */
889 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
890 readl, (!(val & DMA_GSTS_WBFS)), val);
892 spin_unlock_irqrestore(&iommu->register_lock, flag);
895 /* return value determine if we need a write buffer flush */
896 static void __iommu_flush_context(struct intel_iommu *iommu,
897 u16 did, u16 source_id, u8 function_mask,
904 case DMA_CCMD_GLOBAL_INVL:
905 val = DMA_CCMD_GLOBAL_INVL;
907 case DMA_CCMD_DOMAIN_INVL:
908 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
910 case DMA_CCMD_DEVICE_INVL:
911 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
912 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
919 spin_lock_irqsave(&iommu->register_lock, flag);
920 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
922 /* Make sure hardware complete it */
923 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
924 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
926 spin_unlock_irqrestore(&iommu->register_lock, flag);
929 /* return value determine if we need a write buffer flush */
930 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
931 u64 addr, unsigned int size_order, u64 type)
933 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
934 u64 val = 0, val_iva = 0;
938 case DMA_TLB_GLOBAL_FLUSH:
939 /* global flush doesn't need set IVA_REG */
940 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
942 case DMA_TLB_DSI_FLUSH:
943 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
945 case DMA_TLB_PSI_FLUSH:
946 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
947 /* Note: always flush non-leaf currently */
948 val_iva = size_order | addr;
953 /* Note: set drain read/write */
956 * This is probably to be super secure.. Looks like we can
957 * ignore it without any impact.
959 if (cap_read_drain(iommu->cap))
960 val |= DMA_TLB_READ_DRAIN;
962 if (cap_write_drain(iommu->cap))
963 val |= DMA_TLB_WRITE_DRAIN;
965 spin_lock_irqsave(&iommu->register_lock, flag);
966 /* Note: Only uses first TLB reg currently */
968 dmar_writeq(iommu->reg + tlb_offset, val_iva);
969 dmar_writeq(iommu->reg + tlb_offset + 8, val);
971 /* Make sure hardware complete it */
972 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
973 dmar_readq, (!(val & DMA_TLB_IVT)), val);
975 spin_unlock_irqrestore(&iommu->register_lock, flag);
977 /* check IOTLB invalidation granularity */
978 if (DMA_TLB_IAIG(val) == 0)
979 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
980 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
981 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
982 (unsigned long long)DMA_TLB_IIRG(type),
983 (unsigned long long)DMA_TLB_IAIG(val));
986 static struct device_domain_info *iommu_support_dev_iotlb(
987 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
991 struct device_domain_info *info;
992 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
994 if (!ecap_dev_iotlb_support(iommu->ecap))
1000 spin_lock_irqsave(&device_domain_lock, flags);
1001 list_for_each_entry(info, &domain->devices, link)
1002 if (info->bus == bus && info->devfn == devfn) {
1006 spin_unlock_irqrestore(&device_domain_lock, flags);
1008 if (!found || !info->dev)
1011 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1014 if (!dmar_find_matched_atsr_unit(info->dev))
1017 info->iommu = iommu;
1022 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1027 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1030 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1032 if (!info->dev || !pci_ats_enabled(info->dev))
1035 pci_disable_ats(info->dev);
1038 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1039 u64 addr, unsigned mask)
1042 unsigned long flags;
1043 struct device_domain_info *info;
1045 spin_lock_irqsave(&device_domain_lock, flags);
1046 list_for_each_entry(info, &domain->devices, link) {
1047 if (!info->dev || !pci_ats_enabled(info->dev))
1050 sid = info->bus << 8 | info->devfn;
1051 qdep = pci_ats_queue_depth(info->dev);
1052 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1054 spin_unlock_irqrestore(&device_domain_lock, flags);
1057 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1058 unsigned long pfn, unsigned int pages)
1060 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1061 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1066 * Fallback to domain selective flush if no PSI support or the size is
1068 * PSI requires page size to be 2 ^ x, and the base address is naturally
1069 * aligned to the size
1071 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1072 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1075 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1079 * In caching mode, domain ID 0 is reserved for non-present to present
1080 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1082 if (!cap_caching_mode(iommu->cap) || did)
1083 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1086 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1089 unsigned long flags;
1091 spin_lock_irqsave(&iommu->register_lock, flags);
1092 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1093 pmen &= ~DMA_PMEN_EPM;
1094 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1096 /* wait for the protected region status bit to clear */
1097 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1098 readl, !(pmen & DMA_PMEN_PRS), pmen);
1100 spin_unlock_irqrestore(&iommu->register_lock, flags);
1103 static int iommu_enable_translation(struct intel_iommu *iommu)
1106 unsigned long flags;
1108 spin_lock_irqsave(&iommu->register_lock, flags);
1109 iommu->gcmd |= DMA_GCMD_TE;
1110 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1112 /* Make sure hardware complete it */
1113 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1114 readl, (sts & DMA_GSTS_TES), sts);
1116 spin_unlock_irqrestore(&iommu->register_lock, flags);
1120 static int iommu_disable_translation(struct intel_iommu *iommu)
1125 spin_lock_irqsave(&iommu->register_lock, flag);
1126 iommu->gcmd &= ~DMA_GCMD_TE;
1127 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1129 /* Make sure hardware complete it */
1130 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1131 readl, (!(sts & DMA_GSTS_TES)), sts);
1133 spin_unlock_irqrestore(&iommu->register_lock, flag);
1138 static int iommu_init_domains(struct intel_iommu *iommu)
1140 unsigned long ndomains;
1141 unsigned long nlongs;
1143 ndomains = cap_ndoms(iommu->cap);
1144 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1145 nlongs = BITS_TO_LONGS(ndomains);
1147 /* TBD: there might be 64K domains,
1148 * consider other allocation for future chip
1150 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1151 if (!iommu->domain_ids) {
1152 printk(KERN_ERR "Allocating domain id array failed\n");
1155 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1157 if (!iommu->domains) {
1158 printk(KERN_ERR "Allocating domain array failed\n");
1159 kfree(iommu->domain_ids);
1163 spin_lock_init(&iommu->lock);
1166 * if Caching mode is set, then invalid translations are tagged
1167 * with domainid 0. Hence we need to pre-allocate it.
1169 if (cap_caching_mode(iommu->cap))
1170 set_bit(0, iommu->domain_ids);
1175 static void domain_exit(struct dmar_domain *domain);
1176 static void vm_domain_exit(struct dmar_domain *domain);
1178 void free_dmar_iommu(struct intel_iommu *iommu)
1180 struct dmar_domain *domain;
1182 unsigned long flags;
1184 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1185 for (; i < cap_ndoms(iommu->cap); ) {
1186 domain = iommu->domains[i];
1187 clear_bit(i, iommu->domain_ids);
1189 spin_lock_irqsave(&domain->iommu_lock, flags);
1190 if (--domain->iommu_count == 0) {
1191 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1192 vm_domain_exit(domain);
1194 domain_exit(domain);
1196 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1198 i = find_next_bit(iommu->domain_ids,
1199 cap_ndoms(iommu->cap), i+1);
1202 if (iommu->gcmd & DMA_GCMD_TE)
1203 iommu_disable_translation(iommu);
1206 set_irq_data(iommu->irq, NULL);
1207 /* This will mask the irq */
1208 free_irq(iommu->irq, iommu);
1209 destroy_irq(iommu->irq);
1212 kfree(iommu->domains);
1213 kfree(iommu->domain_ids);
1215 g_iommus[iommu->seq_id] = NULL;
1217 /* if all iommus are freed, free g_iommus */
1218 for (i = 0; i < g_num_of_iommus; i++) {
1223 if (i == g_num_of_iommus)
1226 /* free context mapping */
1227 free_context_table(iommu);
1230 static struct dmar_domain *alloc_domain(void)
1232 struct dmar_domain *domain;
1234 domain = alloc_domain_mem();
1238 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1244 static int iommu_attach_domain(struct dmar_domain *domain,
1245 struct intel_iommu *iommu)
1248 unsigned long ndomains;
1249 unsigned long flags;
1251 ndomains = cap_ndoms(iommu->cap);
1253 spin_lock_irqsave(&iommu->lock, flags);
1255 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1256 if (num >= ndomains) {
1257 spin_unlock_irqrestore(&iommu->lock, flags);
1258 printk(KERN_ERR "IOMMU: no free domain ids\n");
1263 set_bit(num, iommu->domain_ids);
1264 set_bit(iommu->seq_id, &domain->iommu_bmp);
1265 iommu->domains[num] = domain;
1266 spin_unlock_irqrestore(&iommu->lock, flags);
1271 static void iommu_detach_domain(struct dmar_domain *domain,
1272 struct intel_iommu *iommu)
1274 unsigned long flags;
1278 spin_lock_irqsave(&iommu->lock, flags);
1279 ndomains = cap_ndoms(iommu->cap);
1280 num = find_first_bit(iommu->domain_ids, ndomains);
1281 for (; num < ndomains; ) {
1282 if (iommu->domains[num] == domain) {
1286 num = find_next_bit(iommu->domain_ids,
1287 cap_ndoms(iommu->cap), num+1);
1291 clear_bit(num, iommu->domain_ids);
1292 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1293 iommu->domains[num] = NULL;
1295 spin_unlock_irqrestore(&iommu->lock, flags);
1298 static struct iova_domain reserved_iova_list;
1299 static struct lock_class_key reserved_alloc_key;
1300 static struct lock_class_key reserved_rbtree_key;
1302 static void dmar_init_reserved_ranges(void)
1304 struct pci_dev *pdev = NULL;
1308 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1310 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1311 &reserved_alloc_key);
1312 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1313 &reserved_rbtree_key);
1315 /* IOAPIC ranges shouldn't be accessed by DMA */
1316 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1317 IOVA_PFN(IOAPIC_RANGE_END));
1319 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1321 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1322 for_each_pci_dev(pdev) {
1325 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1326 r = &pdev->resource[i];
1327 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1329 iova = reserve_iova(&reserved_iova_list,
1333 printk(KERN_ERR "Reserve iova failed\n");
1339 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1341 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1344 static inline int guestwidth_to_adjustwidth(int gaw)
1347 int r = (gaw - 12) % 9;
1358 static int domain_init(struct dmar_domain *domain, int guest_width)
1360 struct intel_iommu *iommu;
1361 int adjust_width, agaw;
1362 unsigned long sagaw;
1364 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1365 spin_lock_init(&domain->mapping_lock);
1366 spin_lock_init(&domain->iommu_lock);
1368 domain_reserve_special_ranges(domain);
1370 /* calculate AGAW */
1371 iommu = domain_get_iommu(domain);
1372 if (guest_width > cap_mgaw(iommu->cap))
1373 guest_width = cap_mgaw(iommu->cap);
1374 domain->gaw = guest_width;
1375 adjust_width = guestwidth_to_adjustwidth(guest_width);
1376 agaw = width_to_agaw(adjust_width);
1377 sagaw = cap_sagaw(iommu->cap);
1378 if (!test_bit(agaw, &sagaw)) {
1379 /* hardware doesn't support it, choose a bigger one */
1380 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1381 agaw = find_next_bit(&sagaw, 5, agaw);
1385 domain->agaw = agaw;
1386 INIT_LIST_HEAD(&domain->devices);
1388 if (ecap_coherent(iommu->ecap))
1389 domain->iommu_coherency = 1;
1391 domain->iommu_coherency = 0;
1393 if (ecap_sc_support(iommu->ecap))
1394 domain->iommu_snooping = 1;
1396 domain->iommu_snooping = 0;
1398 domain->iommu_count = 1;
1400 /* always allocate the top pgd */
1401 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1404 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1408 static void domain_exit(struct dmar_domain *domain)
1410 struct dmar_drhd_unit *drhd;
1411 struct intel_iommu *iommu;
1413 /* Domain 0 is reserved, so dont process it */
1417 domain_remove_dev_info(domain);
1419 put_iova_domain(&domain->iovad);
1422 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1424 /* free page tables */
1425 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1427 for_each_active_iommu(iommu, drhd)
1428 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1429 iommu_detach_domain(domain, iommu);
1431 free_domain_mem(domain);
1434 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1435 u8 bus, u8 devfn, int translation)
1437 struct context_entry *context;
1438 unsigned long flags;
1439 struct intel_iommu *iommu;
1440 struct dma_pte *pgd;
1442 unsigned long ndomains;
1445 struct device_domain_info *info = NULL;
1447 pr_debug("Set context mapping for %02x:%02x.%d\n",
1448 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1450 BUG_ON(!domain->pgd);
1451 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1452 translation != CONTEXT_TT_MULTI_LEVEL);
1454 iommu = device_to_iommu(segment, bus, devfn);
1458 context = device_to_context_entry(iommu, bus, devfn);
1461 spin_lock_irqsave(&iommu->lock, flags);
1462 if (context_present(context)) {
1463 spin_unlock_irqrestore(&iommu->lock, flags);
1470 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1471 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1474 /* find an available domain id for this device in iommu */
1475 ndomains = cap_ndoms(iommu->cap);
1476 num = find_first_bit(iommu->domain_ids, ndomains);
1477 for (; num < ndomains; ) {
1478 if (iommu->domains[num] == domain) {
1483 num = find_next_bit(iommu->domain_ids,
1484 cap_ndoms(iommu->cap), num+1);
1488 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1489 if (num >= ndomains) {
1490 spin_unlock_irqrestore(&iommu->lock, flags);
1491 printk(KERN_ERR "IOMMU: no free domain ids\n");
1495 set_bit(num, iommu->domain_ids);
1496 set_bit(iommu->seq_id, &domain->iommu_bmp);
1497 iommu->domains[num] = domain;
1501 /* Skip top levels of page tables for
1502 * iommu which has less agaw than default.
1504 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1505 pgd = phys_to_virt(dma_pte_addr(pgd));
1506 if (!dma_pte_present(pgd)) {
1507 spin_unlock_irqrestore(&iommu->lock, flags);
1513 context_set_domain_id(context, id);
1515 if (translation != CONTEXT_TT_PASS_THROUGH) {
1516 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1517 translation = info ? CONTEXT_TT_DEV_IOTLB :
1518 CONTEXT_TT_MULTI_LEVEL;
1521 * In pass through mode, AW must be programmed to indicate the largest
1522 * AGAW value supported by hardware. And ASR is ignored by hardware.
1524 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1525 context_set_address_width(context, iommu->msagaw);
1527 context_set_address_root(context, virt_to_phys(pgd));
1528 context_set_address_width(context, iommu->agaw);
1531 context_set_translation_type(context, translation);
1532 context_set_fault_enable(context);
1533 context_set_present(context);
1534 domain_flush_cache(domain, context, sizeof(*context));
1537 * It's a non-present to present mapping. If hardware doesn't cache
1538 * non-present entry we only need to flush the write-buffer. If the
1539 * _does_ cache non-present entries, then it does so in the special
1540 * domain #0, which we have to flush:
1542 if (cap_caching_mode(iommu->cap)) {
1543 iommu->flush.flush_context(iommu, 0,
1544 (((u16)bus) << 8) | devfn,
1545 DMA_CCMD_MASK_NOBIT,
1546 DMA_CCMD_DEVICE_INVL);
1547 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1549 iommu_flush_write_buffer(iommu);
1551 iommu_enable_dev_iotlb(info);
1552 spin_unlock_irqrestore(&iommu->lock, flags);
1554 spin_lock_irqsave(&domain->iommu_lock, flags);
1555 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1556 domain->iommu_count++;
1557 domain_update_iommu_cap(domain);
1559 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1564 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1568 struct pci_dev *tmp, *parent;
1570 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1571 pdev->bus->number, pdev->devfn,
1576 /* dependent device mapping */
1577 tmp = pci_find_upstream_pcie_bridge(pdev);
1580 /* Secondary interface's bus number and devfn 0 */
1581 parent = pdev->bus->self;
1582 while (parent != tmp) {
1583 ret = domain_context_mapping_one(domain,
1584 pci_domain_nr(parent->bus),
1585 parent->bus->number,
1586 parent->devfn, translation);
1589 parent = parent->bus->self;
1591 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1592 return domain_context_mapping_one(domain,
1593 pci_domain_nr(tmp->subordinate),
1594 tmp->subordinate->number, 0,
1596 else /* this is a legacy PCI bridge */
1597 return domain_context_mapping_one(domain,
1598 pci_domain_nr(tmp->bus),
1604 static int domain_context_mapped(struct pci_dev *pdev)
1607 struct pci_dev *tmp, *parent;
1608 struct intel_iommu *iommu;
1610 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1615 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1618 /* dependent device mapping */
1619 tmp = pci_find_upstream_pcie_bridge(pdev);
1622 /* Secondary interface's bus number and devfn 0 */
1623 parent = pdev->bus->self;
1624 while (parent != tmp) {
1625 ret = device_context_mapped(iommu, parent->bus->number,
1629 parent = parent->bus->self;
1632 return device_context_mapped(iommu, tmp->subordinate->number,
1635 return device_context_mapped(iommu, tmp->bus->number,
1639 static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1640 unsigned long phys_pfn, unsigned long nr_pages,
1643 struct dma_pte *pte;
1644 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1646 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1648 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1651 while (nr_pages--) {
1652 pte = pfn_to_dma_pte(domain, iov_pfn);
1655 /* We don't need lock here, nobody else
1656 * touches the iova range
1658 BUG_ON(dma_pte_addr(pte));
1659 dma_set_pte_pfn(pte, phys_pfn);
1660 dma_set_pte_prot(pte, prot);
1661 if (prot & DMA_PTE_SNP)
1662 dma_set_pte_snp(pte);
1663 domain_flush_cache(domain, pte, sizeof(*pte));
1670 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1675 clear_context_table(iommu, bus, devfn);
1676 iommu->flush.flush_context(iommu, 0, 0, 0,
1677 DMA_CCMD_GLOBAL_INVL);
1678 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1681 static void domain_remove_dev_info(struct dmar_domain *domain)
1683 struct device_domain_info *info;
1684 unsigned long flags;
1685 struct intel_iommu *iommu;
1687 spin_lock_irqsave(&device_domain_lock, flags);
1688 while (!list_empty(&domain->devices)) {
1689 info = list_entry(domain->devices.next,
1690 struct device_domain_info, link);
1691 list_del(&info->link);
1692 list_del(&info->global);
1694 info->dev->dev.archdata.iommu = NULL;
1695 spin_unlock_irqrestore(&device_domain_lock, flags);
1697 iommu_disable_dev_iotlb(info);
1698 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1699 iommu_detach_dev(iommu, info->bus, info->devfn);
1700 free_devinfo_mem(info);
1702 spin_lock_irqsave(&device_domain_lock, flags);
1704 spin_unlock_irqrestore(&device_domain_lock, flags);
1709 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1711 static struct dmar_domain *
1712 find_domain(struct pci_dev *pdev)
1714 struct device_domain_info *info;
1716 /* No lock here, assumes no domain exit in normal case */
1717 info = pdev->dev.archdata.iommu;
1719 return info->domain;
1723 /* domain is initialized */
1724 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1726 struct dmar_domain *domain, *found = NULL;
1727 struct intel_iommu *iommu;
1728 struct dmar_drhd_unit *drhd;
1729 struct device_domain_info *info, *tmp;
1730 struct pci_dev *dev_tmp;
1731 unsigned long flags;
1732 int bus = 0, devfn = 0;
1736 domain = find_domain(pdev);
1740 segment = pci_domain_nr(pdev->bus);
1742 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1744 if (dev_tmp->is_pcie) {
1745 bus = dev_tmp->subordinate->number;
1748 bus = dev_tmp->bus->number;
1749 devfn = dev_tmp->devfn;
1751 spin_lock_irqsave(&device_domain_lock, flags);
1752 list_for_each_entry(info, &device_domain_list, global) {
1753 if (info->segment == segment &&
1754 info->bus == bus && info->devfn == devfn) {
1755 found = info->domain;
1759 spin_unlock_irqrestore(&device_domain_lock, flags);
1760 /* pcie-pci bridge already has a domain, uses it */
1767 domain = alloc_domain();
1771 /* Allocate new domain for the device */
1772 drhd = dmar_find_matched_drhd_unit(pdev);
1774 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1778 iommu = drhd->iommu;
1780 ret = iommu_attach_domain(domain, iommu);
1782 domain_exit(domain);
1786 if (domain_init(domain, gaw)) {
1787 domain_exit(domain);
1791 /* register pcie-to-pci device */
1793 info = alloc_devinfo_mem();
1795 domain_exit(domain);
1798 info->segment = segment;
1800 info->devfn = devfn;
1802 info->domain = domain;
1803 /* This domain is shared by devices under p2p bridge */
1804 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1806 /* pcie-to-pci bridge already has a domain, uses it */
1808 spin_lock_irqsave(&device_domain_lock, flags);
1809 list_for_each_entry(tmp, &device_domain_list, global) {
1810 if (tmp->segment == segment &&
1811 tmp->bus == bus && tmp->devfn == devfn) {
1812 found = tmp->domain;
1817 free_devinfo_mem(info);
1818 domain_exit(domain);
1821 list_add(&info->link, &domain->devices);
1822 list_add(&info->global, &device_domain_list);
1824 spin_unlock_irqrestore(&device_domain_lock, flags);
1828 info = alloc_devinfo_mem();
1831 info->segment = segment;
1832 info->bus = pdev->bus->number;
1833 info->devfn = pdev->devfn;
1835 info->domain = domain;
1836 spin_lock_irqsave(&device_domain_lock, flags);
1837 /* somebody is fast */
1838 found = find_domain(pdev);
1839 if (found != NULL) {
1840 spin_unlock_irqrestore(&device_domain_lock, flags);
1841 if (found != domain) {
1842 domain_exit(domain);
1845 free_devinfo_mem(info);
1848 list_add(&info->link, &domain->devices);
1849 list_add(&info->global, &device_domain_list);
1850 pdev->dev.archdata.iommu = info;
1851 spin_unlock_irqrestore(&device_domain_lock, flags);
1854 /* recheck it here, maybe others set it */
1855 return find_domain(pdev);
1858 static int iommu_identity_mapping;
1860 static int iommu_domain_identity_map(struct dmar_domain *domain,
1861 unsigned long long start,
1862 unsigned long long end)
1864 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1865 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1867 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1868 dma_to_mm_pfn(last_vpfn))) {
1869 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1873 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1874 start, end, domain->id);
1876 * RMRR range might have overlap with physical memory range,
1879 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
1881 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1882 last_vpfn - first_vpfn + 1,
1883 DMA_PTE_READ|DMA_PTE_WRITE);
1886 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1887 unsigned long long start,
1888 unsigned long long end)
1890 struct dmar_domain *domain;
1894 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1895 pci_name(pdev), start, end);
1897 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1901 ret = iommu_domain_identity_map(domain, start, end);
1905 /* context entry init */
1906 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1913 domain_exit(domain);
1917 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1918 struct pci_dev *pdev)
1920 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1922 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1923 rmrr->end_address + 1);
1926 #ifdef CONFIG_DMAR_FLOPPY_WA
1927 static inline void iommu_prepare_isa(void)
1929 struct pci_dev *pdev;
1932 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1936 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
1937 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1940 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1941 "floppy might not work\n");
1945 static inline void iommu_prepare_isa(void)
1949 #endif /* !CONFIG_DMAR_FLPY_WA */
1951 /* Initialize each context entry as pass through.*/
1952 static int __init init_context_pass_through(void)
1954 struct pci_dev *pdev = NULL;
1955 struct dmar_domain *domain;
1958 for_each_pci_dev(pdev) {
1959 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1960 ret = domain_context_mapping(domain, pdev,
1961 CONTEXT_TT_PASS_THROUGH);
1968 static int md_domain_init(struct dmar_domain *domain, int guest_width);
1970 static int __init si_domain_work_fn(unsigned long start_pfn,
1971 unsigned long end_pfn, void *datax)
1975 *ret = iommu_domain_identity_map(si_domain,
1976 (uint64_t)start_pfn << PAGE_SHIFT,
1977 (uint64_t)end_pfn << PAGE_SHIFT);
1982 static int si_domain_init(void)
1984 struct dmar_drhd_unit *drhd;
1985 struct intel_iommu *iommu;
1988 si_domain = alloc_domain();
1992 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
1994 for_each_active_iommu(iommu, drhd) {
1995 ret = iommu_attach_domain(si_domain, iommu);
1997 domain_exit(si_domain);
2002 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2003 domain_exit(si_domain);
2007 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2009 for_each_online_node(nid) {
2010 work_with_active_regions(nid, si_domain_work_fn, &ret);
2018 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2019 struct pci_dev *pdev);
2020 static int identity_mapping(struct pci_dev *pdev)
2022 struct device_domain_info *info;
2024 if (likely(!iommu_identity_mapping))
2028 list_for_each_entry(info, &si_domain->devices, link)
2029 if (info->dev == pdev)
2034 static int domain_add_dev_info(struct dmar_domain *domain,
2035 struct pci_dev *pdev)
2037 struct device_domain_info *info;
2038 unsigned long flags;
2040 info = alloc_devinfo_mem();
2044 info->segment = pci_domain_nr(pdev->bus);
2045 info->bus = pdev->bus->number;
2046 info->devfn = pdev->devfn;
2048 info->domain = domain;
2050 spin_lock_irqsave(&device_domain_lock, flags);
2051 list_add(&info->link, &domain->devices);
2052 list_add(&info->global, &device_domain_list);
2053 pdev->dev.archdata.iommu = info;
2054 spin_unlock_irqrestore(&device_domain_lock, flags);
2059 static int iommu_prepare_static_identity_mapping(void)
2061 struct pci_dev *pdev = NULL;
2064 ret = si_domain_init();
2068 for_each_pci_dev(pdev) {
2069 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2072 ret = domain_context_mapping(si_domain, pdev,
2073 CONTEXT_TT_MULTI_LEVEL);
2076 ret = domain_add_dev_info(si_domain, pdev);
2084 int __init init_dmars(void)
2086 struct dmar_drhd_unit *drhd;
2087 struct dmar_rmrr_unit *rmrr;
2088 struct pci_dev *pdev;
2089 struct intel_iommu *iommu;
2091 int pass_through = 1;
2094 * In case pass through can not be enabled, iommu tries to use identity
2097 if (iommu_pass_through)
2098 iommu_identity_mapping = 1;
2103 * initialize and program root entry to not present
2106 for_each_drhd_unit(drhd) {
2109 * lock not needed as this is only incremented in the single
2110 * threaded kernel __init code path all other access are read
2115 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2118 printk(KERN_ERR "Allocating global iommu array failed\n");
2123 deferred_flush = kzalloc(g_num_of_iommus *
2124 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2125 if (!deferred_flush) {
2131 for_each_drhd_unit(drhd) {
2135 iommu = drhd->iommu;
2136 g_iommus[iommu->seq_id] = iommu;
2138 ret = iommu_init_domains(iommu);
2144 * we could share the same root & context tables
2145 * amoung all IOMMU's. Need to Split it later.
2147 ret = iommu_alloc_root_entry(iommu);
2149 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2152 if (!ecap_pass_through(iommu->ecap))
2155 if (iommu_pass_through)
2156 if (!pass_through) {
2158 "Pass Through is not supported by hardware.\n");
2159 iommu_pass_through = 0;
2163 * Start from the sane iommu hardware state.
2165 for_each_drhd_unit(drhd) {
2169 iommu = drhd->iommu;
2172 * If the queued invalidation is already initialized by us
2173 * (for example, while enabling interrupt-remapping) then
2174 * we got the things already rolling from a sane state.
2180 * Clear any previous faults.
2182 dmar_fault(-1, iommu);
2184 * Disable queued invalidation if supported and already enabled
2185 * before OS handover.
2187 dmar_disable_qi(iommu);
2190 for_each_drhd_unit(drhd) {
2194 iommu = drhd->iommu;
2196 if (dmar_enable_qi(iommu)) {
2198 * Queued Invalidate not enabled, use Register Based
2201 iommu->flush.flush_context = __iommu_flush_context;
2202 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2203 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
2205 (unsigned long long)drhd->reg_base_addr);
2207 iommu->flush.flush_context = qi_flush_context;
2208 iommu->flush.flush_iotlb = qi_flush_iotlb;
2209 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
2211 (unsigned long long)drhd->reg_base_addr);
2216 * If pass through is set and enabled, context entries of all pci
2217 * devices are intialized by pass through translation type.
2219 if (iommu_pass_through) {
2220 ret = init_context_pass_through();
2222 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2223 iommu_pass_through = 0;
2228 * If pass through is not set or not enabled, setup context entries for
2229 * identity mappings for rmrr, gfx, and isa and may fall back to static
2230 * identity mapping if iommu_identity_mapping is set.
2232 if (!iommu_pass_through) {
2233 if (iommu_identity_mapping)
2234 iommu_prepare_static_identity_mapping();
2237 * for each dev attached to rmrr
2239 * locate drhd for dev, alloc domain for dev
2240 * allocate free domain
2241 * allocate page table entries for rmrr
2242 * if context not allocated for bus
2243 * allocate and init context
2244 * set present in root table for this bus
2245 * init context with domain, translation etc
2249 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2250 for_each_rmrr_units(rmrr) {
2251 for (i = 0; i < rmrr->devices_cnt; i++) {
2252 pdev = rmrr->devices[i];
2254 * some BIOS lists non-exist devices in DMAR
2259 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2262 "IOMMU: mapping reserved region failed\n");
2266 iommu_prepare_isa();
2272 * global invalidate context cache
2273 * global invalidate iotlb
2274 * enable translation
2276 for_each_drhd_unit(drhd) {
2279 iommu = drhd->iommu;
2281 iommu_flush_write_buffer(iommu);
2283 ret = dmar_set_interrupt(iommu);
2287 iommu_set_root_entry(iommu);
2289 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2290 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2291 iommu_disable_protect_mem_regions(iommu);
2293 ret = iommu_enable_translation(iommu);
2300 for_each_drhd_unit(drhd) {
2303 iommu = drhd->iommu;
2310 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2313 host_addr &= ~PAGE_MASK;
2314 host_addr += size + PAGE_SIZE - 1;
2316 return host_addr >> VTD_PAGE_SHIFT;
2320 iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
2324 /* Make sure it's in range */
2325 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
2326 if (!size || (IOVA_START_ADDR + size > end))
2329 piova = alloc_iova(&domain->iovad,
2330 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
2334 static struct iova *
2335 __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
2336 size_t size, u64 dma_mask)
2338 struct pci_dev *pdev = to_pci_dev(dev);
2339 struct iova *iova = NULL;
2341 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
2342 iova = iommu_alloc_iova(domain, size, dma_mask);
2345 * First try to allocate an io virtual address in
2346 * DMA_BIT_MASK(32) and if that fails then try allocating
2349 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
2351 iova = iommu_alloc_iova(domain, size, dma_mask);
2355 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2362 static struct dmar_domain *
2363 get_valid_domain_for_dev(struct pci_dev *pdev)
2365 struct dmar_domain *domain;
2368 domain = get_domain_for_dev(pdev,
2369 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2372 "Allocating domain for %s failed", pci_name(pdev));
2376 /* make sure context mapping is ok */
2377 if (unlikely(!domain_context_mapped(pdev))) {
2378 ret = domain_context_mapping(domain, pdev,
2379 CONTEXT_TT_MULTI_LEVEL);
2382 "Domain context map for %s failed",
2391 static int iommu_dummy(struct pci_dev *pdev)
2393 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2396 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2397 static int iommu_no_mapping(struct pci_dev *pdev)
2401 if (!iommu_identity_mapping)
2402 return iommu_dummy(pdev);
2404 found = identity_mapping(pdev);
2406 if (pdev->dma_mask > DMA_BIT_MASK(32))
2410 * 32 bit DMA is removed from si_domain and fall back
2411 * to non-identity mapping.
2413 domain_remove_one_dev_info(si_domain, pdev);
2414 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2420 * In case of a detached 64 bit DMA device from vm, the device
2421 * is put into si_domain for identity mapping.
2423 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2425 ret = domain_add_dev_info(si_domain, pdev);
2427 printk(KERN_INFO "64bit %s uses identity mapping\n",
2434 return iommu_dummy(pdev);
2437 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2438 size_t size, int dir, u64 dma_mask)
2440 struct pci_dev *pdev = to_pci_dev(hwdev);
2441 struct dmar_domain *domain;
2442 phys_addr_t start_paddr;
2446 struct intel_iommu *iommu;
2448 BUG_ON(dir == DMA_NONE);
2450 if (iommu_no_mapping(pdev))
2453 domain = get_valid_domain_for_dev(pdev);
2457 iommu = domain_get_iommu(domain);
2458 size = aligned_nrpages(paddr, size);
2460 iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT, pdev->dma_mask);
2465 * Check if DMAR supports zero-length reads on write only
2468 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2469 !cap_zlr(iommu->cap))
2470 prot |= DMA_PTE_READ;
2471 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2472 prot |= DMA_PTE_WRITE;
2474 * paddr - (paddr + size) might be partial page, we should map the whole
2475 * page. Note: if two part of one page are separately mapped, we
2476 * might have two guest_addr mapping to the same host paddr, but this
2477 * is not a big problem
2479 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2480 paddr >> VTD_PAGE_SHIFT, size, prot);
2484 /* it's a non-present to present mapping. Only flush if caching mode */
2485 if (cap_caching_mode(iommu->cap))
2486 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
2488 iommu_flush_write_buffer(iommu);
2490 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2491 start_paddr += paddr & ~PAGE_MASK;
2496 __free_iova(&domain->iovad, iova);
2497 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2498 pci_name(pdev), size, (unsigned long long)paddr, dir);
2502 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2503 unsigned long offset, size_t size,
2504 enum dma_data_direction dir,
2505 struct dma_attrs *attrs)
2507 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2508 dir, to_pci_dev(dev)->dma_mask);
2511 static void flush_unmaps(void)
2517 /* just flush them all */
2518 for (i = 0; i < g_num_of_iommus; i++) {
2519 struct intel_iommu *iommu = g_iommus[i];
2523 if (!deferred_flush[i].next)
2526 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2527 DMA_TLB_GLOBAL_FLUSH);
2528 for (j = 0; j < deferred_flush[i].next; j++) {
2530 struct iova *iova = deferred_flush[i].iova[j];
2532 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2533 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2534 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2535 iova->pfn_lo << PAGE_SHIFT, mask);
2536 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2538 deferred_flush[i].next = 0;
2544 static void flush_unmaps_timeout(unsigned long data)
2546 unsigned long flags;
2548 spin_lock_irqsave(&async_umap_flush_lock, flags);
2550 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2553 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2555 unsigned long flags;
2557 struct intel_iommu *iommu;
2559 spin_lock_irqsave(&async_umap_flush_lock, flags);
2560 if (list_size == HIGH_WATER_MARK)
2563 iommu = domain_get_iommu(dom);
2564 iommu_id = iommu->seq_id;
2566 next = deferred_flush[iommu_id].next;
2567 deferred_flush[iommu_id].domain[next] = dom;
2568 deferred_flush[iommu_id].iova[next] = iova;
2569 deferred_flush[iommu_id].next++;
2572 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2576 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2579 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2580 size_t size, enum dma_data_direction dir,
2581 struct dma_attrs *attrs)
2583 struct pci_dev *pdev = to_pci_dev(dev);
2584 struct dmar_domain *domain;
2585 unsigned long start_pfn, last_pfn;
2587 struct intel_iommu *iommu;
2589 if (iommu_no_mapping(pdev))
2592 domain = find_domain(pdev);
2595 iommu = domain_get_iommu(domain);
2597 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2601 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2602 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2604 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2605 pci_name(pdev), start_pfn, last_pfn);
2607 /* clear the whole page */
2608 dma_pte_clear_range(domain, start_pfn, last_pfn);
2610 /* free page tables */
2611 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2613 if (intel_iommu_strict) {
2614 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2615 last_pfn - start_pfn + 1);
2617 __free_iova(&domain->iovad, iova);
2619 add_unmap(domain, iova);
2621 * queue up the release of the unmap to save the 1/6th of the
2622 * cpu used up by the iotlb flush operation...
2627 static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2630 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2633 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2634 dma_addr_t *dma_handle, gfp_t flags)
2639 size = PAGE_ALIGN(size);
2640 order = get_order(size);
2641 flags &= ~(GFP_DMA | GFP_DMA32);
2643 vaddr = (void *)__get_free_pages(flags, order);
2646 memset(vaddr, 0, size);
2648 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2650 hwdev->coherent_dma_mask);
2653 free_pages((unsigned long)vaddr, order);
2657 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2658 dma_addr_t dma_handle)
2662 size = PAGE_ALIGN(size);
2663 order = get_order(size);
2665 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2666 free_pages((unsigned long)vaddr, order);
2669 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2670 int nelems, enum dma_data_direction dir,
2671 struct dma_attrs *attrs)
2673 struct pci_dev *pdev = to_pci_dev(hwdev);
2674 struct dmar_domain *domain;
2675 unsigned long start_pfn, last_pfn;
2677 struct intel_iommu *iommu;
2679 if (iommu_no_mapping(pdev))
2682 domain = find_domain(pdev);
2685 iommu = domain_get_iommu(domain);
2687 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2691 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2692 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2694 /* clear the whole page */
2695 dma_pte_clear_range(domain, start_pfn, last_pfn);
2697 /* free page tables */
2698 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2700 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2701 (last_pfn - start_pfn + 1));
2704 __free_iova(&domain->iovad, iova);
2707 static int intel_nontranslate_map_sg(struct device *hddev,
2708 struct scatterlist *sglist, int nelems, int dir)
2711 struct scatterlist *sg;
2713 for_each_sg(sglist, sg, nelems, i) {
2714 BUG_ON(!sg_page(sg));
2715 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2716 sg->dma_length = sg->length;
2721 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2722 enum dma_data_direction dir, struct dma_attrs *attrs)
2725 struct pci_dev *pdev = to_pci_dev(hwdev);
2726 struct dmar_domain *domain;
2729 size_t offset_pfn = 0;
2730 struct iova *iova = NULL;
2732 struct scatterlist *sg;
2733 unsigned long start_vpfn;
2734 struct intel_iommu *iommu;
2736 BUG_ON(dir == DMA_NONE);
2737 if (iommu_no_mapping(pdev))
2738 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2740 domain = get_valid_domain_for_dev(pdev);
2744 iommu = domain_get_iommu(domain);
2746 for_each_sg(sglist, sg, nelems, i)
2747 size += aligned_nrpages(sg->offset, sg->length);
2749 iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT,
2752 sglist->dma_length = 0;
2757 * Check if DMAR supports zero-length reads on write only
2760 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2761 !cap_zlr(iommu->cap))
2762 prot |= DMA_PTE_READ;
2763 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2764 prot |= DMA_PTE_WRITE;
2766 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2768 for_each_sg(sglist, sg, nelems, i) {
2769 int nr_pages = aligned_nrpages(sg->offset, sg->length);
2770 ret = domain_pfn_mapping(domain, start_vpfn + offset_pfn,
2771 page_to_dma_pfn(sg_page(sg)),
2774 /* clear the page */
2775 dma_pte_clear_range(domain, start_vpfn,
2776 start_vpfn + offset_pfn);
2777 /* free page tables */
2778 dma_pte_free_pagetable(domain, start_vpfn,
2779 start_vpfn + offset_pfn);
2781 __free_iova(&domain->iovad, iova);
2784 sg->dma_address = ((dma_addr_t)(start_vpfn + offset_pfn)
2785 << VTD_PAGE_SHIFT) + sg->offset;
2786 sg->dma_length = sg->length;
2787 offset_pfn += nr_pages;
2790 /* it's a non-present to present mapping. Only flush if caching mode */
2791 if (cap_caching_mode(iommu->cap))
2792 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
2794 iommu_flush_write_buffer(iommu);
2799 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2804 struct dma_map_ops intel_dma_ops = {
2805 .alloc_coherent = intel_alloc_coherent,
2806 .free_coherent = intel_free_coherent,
2807 .map_sg = intel_map_sg,
2808 .unmap_sg = intel_unmap_sg,
2809 .map_page = intel_map_page,
2810 .unmap_page = intel_unmap_page,
2811 .mapping_error = intel_mapping_error,
2814 static inline int iommu_domain_cache_init(void)
2818 iommu_domain_cache = kmem_cache_create("iommu_domain",
2819 sizeof(struct dmar_domain),
2824 if (!iommu_domain_cache) {
2825 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2832 static inline int iommu_devinfo_cache_init(void)
2836 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2837 sizeof(struct device_domain_info),
2841 if (!iommu_devinfo_cache) {
2842 printk(KERN_ERR "Couldn't create devinfo cache\n");
2849 static inline int iommu_iova_cache_init(void)
2853 iommu_iova_cache = kmem_cache_create("iommu_iova",
2854 sizeof(struct iova),
2858 if (!iommu_iova_cache) {
2859 printk(KERN_ERR "Couldn't create iova cache\n");
2866 static int __init iommu_init_mempool(void)
2869 ret = iommu_iova_cache_init();
2873 ret = iommu_domain_cache_init();
2877 ret = iommu_devinfo_cache_init();
2881 kmem_cache_destroy(iommu_domain_cache);
2883 kmem_cache_destroy(iommu_iova_cache);
2888 static void __init iommu_exit_mempool(void)
2890 kmem_cache_destroy(iommu_devinfo_cache);
2891 kmem_cache_destroy(iommu_domain_cache);
2892 kmem_cache_destroy(iommu_iova_cache);
2896 static void __init init_no_remapping_devices(void)
2898 struct dmar_drhd_unit *drhd;
2900 for_each_drhd_unit(drhd) {
2901 if (!drhd->include_all) {
2903 for (i = 0; i < drhd->devices_cnt; i++)
2904 if (drhd->devices[i] != NULL)
2906 /* ignore DMAR unit if no pci devices exist */
2907 if (i == drhd->devices_cnt)
2915 for_each_drhd_unit(drhd) {
2917 if (drhd->ignored || drhd->include_all)
2920 for (i = 0; i < drhd->devices_cnt; i++)
2921 if (drhd->devices[i] &&
2922 !IS_GFX_DEVICE(drhd->devices[i]))
2925 if (i < drhd->devices_cnt)
2928 /* bypass IOMMU if it is just for gfx devices */
2930 for (i = 0; i < drhd->devices_cnt; i++) {
2931 if (!drhd->devices[i])
2933 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
2938 #ifdef CONFIG_SUSPEND
2939 static int init_iommu_hw(void)
2941 struct dmar_drhd_unit *drhd;
2942 struct intel_iommu *iommu = NULL;
2944 for_each_active_iommu(iommu, drhd)
2946 dmar_reenable_qi(iommu);
2948 for_each_active_iommu(iommu, drhd) {
2949 iommu_flush_write_buffer(iommu);
2951 iommu_set_root_entry(iommu);
2953 iommu->flush.flush_context(iommu, 0, 0, 0,
2954 DMA_CCMD_GLOBAL_INVL);
2955 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2956 DMA_TLB_GLOBAL_FLUSH);
2957 iommu_disable_protect_mem_regions(iommu);
2958 iommu_enable_translation(iommu);
2964 static void iommu_flush_all(void)
2966 struct dmar_drhd_unit *drhd;
2967 struct intel_iommu *iommu;
2969 for_each_active_iommu(iommu, drhd) {
2970 iommu->flush.flush_context(iommu, 0, 0, 0,
2971 DMA_CCMD_GLOBAL_INVL);
2972 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2973 DMA_TLB_GLOBAL_FLUSH);
2977 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
2979 struct dmar_drhd_unit *drhd;
2980 struct intel_iommu *iommu = NULL;
2983 for_each_active_iommu(iommu, drhd) {
2984 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
2986 if (!iommu->iommu_state)
2992 for_each_active_iommu(iommu, drhd) {
2993 iommu_disable_translation(iommu);
2995 spin_lock_irqsave(&iommu->register_lock, flag);
2997 iommu->iommu_state[SR_DMAR_FECTL_REG] =
2998 readl(iommu->reg + DMAR_FECTL_REG);
2999 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3000 readl(iommu->reg + DMAR_FEDATA_REG);
3001 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3002 readl(iommu->reg + DMAR_FEADDR_REG);
3003 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3004 readl(iommu->reg + DMAR_FEUADDR_REG);
3006 spin_unlock_irqrestore(&iommu->register_lock, flag);
3011 for_each_active_iommu(iommu, drhd)
3012 kfree(iommu->iommu_state);
3017 static int iommu_resume(struct sys_device *dev)
3019 struct dmar_drhd_unit *drhd;
3020 struct intel_iommu *iommu = NULL;
3023 if (init_iommu_hw()) {
3024 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3028 for_each_active_iommu(iommu, drhd) {
3030 spin_lock_irqsave(&iommu->register_lock, flag);
3032 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3033 iommu->reg + DMAR_FECTL_REG);
3034 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3035 iommu->reg + DMAR_FEDATA_REG);
3036 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3037 iommu->reg + DMAR_FEADDR_REG);
3038 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3039 iommu->reg + DMAR_FEUADDR_REG);
3041 spin_unlock_irqrestore(&iommu->register_lock, flag);
3044 for_each_active_iommu(iommu, drhd)
3045 kfree(iommu->iommu_state);
3050 static struct sysdev_class iommu_sysclass = {
3052 .resume = iommu_resume,
3053 .suspend = iommu_suspend,
3056 static struct sys_device device_iommu = {
3057 .cls = &iommu_sysclass,
3060 static int __init init_iommu_sysfs(void)
3064 error = sysdev_class_register(&iommu_sysclass);
3068 error = sysdev_register(&device_iommu);
3070 sysdev_class_unregister(&iommu_sysclass);
3076 static int __init init_iommu_sysfs(void)
3080 #endif /* CONFIG_PM */
3082 int __init intel_iommu_init(void)
3086 if (dmar_table_init())
3089 if (dmar_dev_scope_init())
3093 * Check the need for DMA-remapping initialization now.
3094 * Above initialization will also be used by Interrupt-remapping.
3096 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
3099 iommu_init_mempool();
3100 dmar_init_reserved_ranges();
3102 init_no_remapping_devices();
3106 printk(KERN_ERR "IOMMU: dmar init failed\n");
3107 put_iova_domain(&reserved_iova_list);
3108 iommu_exit_mempool();
3112 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3114 init_timer(&unmap_timer);
3117 if (!iommu_pass_through) {
3119 "Multi-level page-table translation for DMAR.\n");
3120 dma_ops = &intel_dma_ops;
3123 "DMAR: Pass through translation for DMAR.\n");
3127 register_iommu(&intel_iommu_ops);
3132 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3133 struct pci_dev *pdev)
3135 struct pci_dev *tmp, *parent;
3137 if (!iommu || !pdev)
3140 /* dependent device detach */
3141 tmp = pci_find_upstream_pcie_bridge(pdev);
3142 /* Secondary interface's bus number and devfn 0 */
3144 parent = pdev->bus->self;
3145 while (parent != tmp) {
3146 iommu_detach_dev(iommu, parent->bus->number,
3148 parent = parent->bus->self;
3150 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3151 iommu_detach_dev(iommu,
3152 tmp->subordinate->number, 0);
3153 else /* this is a legacy PCI bridge */
3154 iommu_detach_dev(iommu, tmp->bus->number,
3159 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3160 struct pci_dev *pdev)
3162 struct device_domain_info *info;
3163 struct intel_iommu *iommu;
3164 unsigned long flags;
3166 struct list_head *entry, *tmp;
3168 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3173 spin_lock_irqsave(&device_domain_lock, flags);
3174 list_for_each_safe(entry, tmp, &domain->devices) {
3175 info = list_entry(entry, struct device_domain_info, link);
3176 /* No need to compare PCI domain; it has to be the same */
3177 if (info->bus == pdev->bus->number &&
3178 info->devfn == pdev->devfn) {
3179 list_del(&info->link);
3180 list_del(&info->global);
3182 info->dev->dev.archdata.iommu = NULL;
3183 spin_unlock_irqrestore(&device_domain_lock, flags);
3185 iommu_disable_dev_iotlb(info);
3186 iommu_detach_dev(iommu, info->bus, info->devfn);
3187 iommu_detach_dependent_devices(iommu, pdev);
3188 free_devinfo_mem(info);
3190 spin_lock_irqsave(&device_domain_lock, flags);
3198 /* if there is no other devices under the same iommu
3199 * owned by this domain, clear this iommu in iommu_bmp
3200 * update iommu count and coherency
3202 if (iommu == device_to_iommu(info->segment, info->bus,
3208 unsigned long tmp_flags;
3209 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3210 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3211 domain->iommu_count--;
3212 domain_update_iommu_cap(domain);
3213 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3216 spin_unlock_irqrestore(&device_domain_lock, flags);
3219 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3221 struct device_domain_info *info;
3222 struct intel_iommu *iommu;
3223 unsigned long flags1, flags2;
3225 spin_lock_irqsave(&device_domain_lock, flags1);
3226 while (!list_empty(&domain->devices)) {
3227 info = list_entry(domain->devices.next,
3228 struct device_domain_info, link);
3229 list_del(&info->link);
3230 list_del(&info->global);
3232 info->dev->dev.archdata.iommu = NULL;
3234 spin_unlock_irqrestore(&device_domain_lock, flags1);
3236 iommu_disable_dev_iotlb(info);
3237 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3238 iommu_detach_dev(iommu, info->bus, info->devfn);
3239 iommu_detach_dependent_devices(iommu, info->dev);
3241 /* clear this iommu in iommu_bmp, update iommu count
3244 spin_lock_irqsave(&domain->iommu_lock, flags2);
3245 if (test_and_clear_bit(iommu->seq_id,
3246 &domain->iommu_bmp)) {
3247 domain->iommu_count--;
3248 domain_update_iommu_cap(domain);
3250 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3252 free_devinfo_mem(info);
3253 spin_lock_irqsave(&device_domain_lock, flags1);
3255 spin_unlock_irqrestore(&device_domain_lock, flags1);
3258 /* domain id for virtual machine, it won't be set in context */
3259 static unsigned long vm_domid;
3261 static int vm_domain_min_agaw(struct dmar_domain *domain)
3264 int min_agaw = domain->agaw;
3266 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3267 for (; i < g_num_of_iommus; ) {
3268 if (min_agaw > g_iommus[i]->agaw)
3269 min_agaw = g_iommus[i]->agaw;
3271 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3277 static struct dmar_domain *iommu_alloc_vm_domain(void)
3279 struct dmar_domain *domain;
3281 domain = alloc_domain_mem();
3285 domain->id = vm_domid++;
3286 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3287 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3292 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3296 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3297 spin_lock_init(&domain->mapping_lock);
3298 spin_lock_init(&domain->iommu_lock);
3300 domain_reserve_special_ranges(domain);
3302 /* calculate AGAW */
3303 domain->gaw = guest_width;
3304 adjust_width = guestwidth_to_adjustwidth(guest_width);
3305 domain->agaw = width_to_agaw(adjust_width);
3307 INIT_LIST_HEAD(&domain->devices);
3309 domain->iommu_count = 0;
3310 domain->iommu_coherency = 0;
3311 domain->max_addr = 0;
3313 /* always allocate the top pgd */
3314 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3317 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3321 static void iommu_free_vm_domain(struct dmar_domain *domain)
3323 unsigned long flags;
3324 struct dmar_drhd_unit *drhd;
3325 struct intel_iommu *iommu;
3327 unsigned long ndomains;
3329 for_each_drhd_unit(drhd) {
3332 iommu = drhd->iommu;
3334 ndomains = cap_ndoms(iommu->cap);
3335 i = find_first_bit(iommu->domain_ids, ndomains);
3336 for (; i < ndomains; ) {
3337 if (iommu->domains[i] == domain) {
3338 spin_lock_irqsave(&iommu->lock, flags);
3339 clear_bit(i, iommu->domain_ids);
3340 iommu->domains[i] = NULL;
3341 spin_unlock_irqrestore(&iommu->lock, flags);
3344 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3349 static void vm_domain_exit(struct dmar_domain *domain)
3351 /* Domain 0 is reserved, so dont process it */
3355 vm_domain_remove_all_dev_info(domain);
3357 put_iova_domain(&domain->iovad);
3360 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3362 /* free page tables */
3363 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3365 iommu_free_vm_domain(domain);
3366 free_domain_mem(domain);
3369 static int intel_iommu_domain_init(struct iommu_domain *domain)
3371 struct dmar_domain *dmar_domain;
3373 dmar_domain = iommu_alloc_vm_domain();
3376 "intel_iommu_domain_init: dmar_domain == NULL\n");
3379 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3381 "intel_iommu_domain_init() failed\n");
3382 vm_domain_exit(dmar_domain);
3385 domain->priv = dmar_domain;
3390 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3392 struct dmar_domain *dmar_domain = domain->priv;
3394 domain->priv = NULL;
3395 vm_domain_exit(dmar_domain);
3398 static int intel_iommu_attach_device(struct iommu_domain *domain,
3401 struct dmar_domain *dmar_domain = domain->priv;
3402 struct pci_dev *pdev = to_pci_dev(dev);
3403 struct intel_iommu *iommu;
3408 /* normally pdev is not mapped */
3409 if (unlikely(domain_context_mapped(pdev))) {
3410 struct dmar_domain *old_domain;
3412 old_domain = find_domain(pdev);
3414 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3415 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3416 domain_remove_one_dev_info(old_domain, pdev);
3418 domain_remove_dev_info(old_domain);
3422 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3427 /* check if this iommu agaw is sufficient for max mapped address */
3428 addr_width = agaw_to_width(iommu->agaw);
3429 end = DOMAIN_MAX_ADDR(addr_width);
3430 end = end & VTD_PAGE_MASK;
3431 if (end < dmar_domain->max_addr) {
3432 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3433 "sufficient for the mapped address (%llx)\n",
3434 __func__, iommu->agaw, dmar_domain->max_addr);
3438 ret = domain_add_dev_info(dmar_domain, pdev);
3442 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3446 static void intel_iommu_detach_device(struct iommu_domain *domain,
3449 struct dmar_domain *dmar_domain = domain->priv;
3450 struct pci_dev *pdev = to_pci_dev(dev);
3452 domain_remove_one_dev_info(dmar_domain, pdev);
3455 static int intel_iommu_map_range(struct iommu_domain *domain,
3456 unsigned long iova, phys_addr_t hpa,
3457 size_t size, int iommu_prot)
3459 struct dmar_domain *dmar_domain = domain->priv;
3465 if (iommu_prot & IOMMU_READ)
3466 prot |= DMA_PTE_READ;
3467 if (iommu_prot & IOMMU_WRITE)
3468 prot |= DMA_PTE_WRITE;
3469 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3470 prot |= DMA_PTE_SNP;
3472 max_addr = iova + size;
3473 if (dmar_domain->max_addr < max_addr) {
3477 /* check if minimum agaw is sufficient for mapped address */
3478 min_agaw = vm_domain_min_agaw(dmar_domain);
3479 addr_width = agaw_to_width(min_agaw);
3480 end = DOMAIN_MAX_ADDR(addr_width);
3481 end = end & VTD_PAGE_MASK;
3482 if (end < max_addr) {
3483 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3484 "sufficient for the mapped address (%llx)\n",
3485 __func__, min_agaw, max_addr);
3488 dmar_domain->max_addr = max_addr;
3490 /* Round up size to next multiple of PAGE_SIZE, if it and
3491 the low bits of hpa would take us onto the next page */
3492 size = aligned_nrpages(hpa, size);
3493 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3494 hpa >> VTD_PAGE_SHIFT, size, prot);
3498 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3499 unsigned long iova, size_t size)
3501 struct dmar_domain *dmar_domain = domain->priv;
3503 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3504 (iova + size - 1) >> VTD_PAGE_SHIFT);
3506 if (dmar_domain->max_addr == iova + size)
3507 dmar_domain->max_addr = iova;
3510 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3513 struct dmar_domain *dmar_domain = domain->priv;
3514 struct dma_pte *pte;
3517 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
3519 phys = dma_pte_addr(pte);
3524 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3527 struct dmar_domain *dmar_domain = domain->priv;
3529 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3530 return dmar_domain->iommu_snooping;
3535 static struct iommu_ops intel_iommu_ops = {
3536 .domain_init = intel_iommu_domain_init,
3537 .domain_destroy = intel_iommu_domain_destroy,
3538 .attach_dev = intel_iommu_attach_device,
3539 .detach_dev = intel_iommu_detach_device,
3540 .map = intel_iommu_map_range,
3541 .unmap = intel_iommu_unmap_range,
3542 .iova_to_phys = intel_iommu_iova_to_phys,
3543 .domain_has_cap = intel_iommu_domain_has_cap,
3546 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3549 * Mobile 4 Series Chipset neglects to set RWBF capability,
3552 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3556 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);