2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59 #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
61 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
63 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
65 #ifndef PHYSICAL_PAGE_MASK
66 #define PHYSICAL_PAGE_MASK PAGE_MASK
69 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
70 are never going to work. */
71 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
73 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
76 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
78 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
80 static inline unsigned long page_to_dma_pfn(struct page *pg)
82 return mm_to_dma_pfn(page_to_pfn(pg));
84 static inline unsigned long virt_to_dma_pfn(void *p)
86 return page_to_dma_pfn(virt_to_page(p));
89 /* global iommu list, set NULL for ignored DMAR units */
90 static struct intel_iommu **g_iommus;
92 static int rwbf_quirk;
97 * 12-63: Context Ptr (12 - (haw-1))
104 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
105 static inline bool root_present(struct root_entry *root)
107 return (root->val & 1);
109 static inline void set_root_present(struct root_entry *root)
113 static inline void set_root_value(struct root_entry *root, unsigned long value)
115 root->val |= value & VTD_PAGE_MASK;
118 static inline struct context_entry *
119 get_context_addr_from_root(struct root_entry *root)
121 return (struct context_entry *)
122 (root_present(root)?phys_to_virt(
123 root->val & VTD_PAGE_MASK) :
130 * 1: fault processing disable
131 * 2-3: translation type
132 * 12-63: address space root
138 struct context_entry {
143 static inline bool context_present(struct context_entry *context)
145 return (context->lo & 1);
147 static inline void context_set_present(struct context_entry *context)
152 static inline void context_set_fault_enable(struct context_entry *context)
154 context->lo &= (((u64)-1) << 2) | 1;
157 static inline void context_set_translation_type(struct context_entry *context,
160 context->lo &= (((u64)-1) << 4) | 3;
161 context->lo |= (value & 3) << 2;
164 static inline void context_set_address_root(struct context_entry *context,
167 context->lo |= value & VTD_PAGE_MASK;
170 static inline void context_set_address_width(struct context_entry *context,
173 context->hi |= value & 7;
176 static inline void context_set_domain_id(struct context_entry *context,
179 context->hi |= (value & ((1 << 16) - 1)) << 8;
182 static inline void context_clear_entry(struct context_entry *context)
195 * 12-63: Host physcial address
201 static inline void dma_clear_pte(struct dma_pte *pte)
206 static inline void dma_set_pte_readable(struct dma_pte *pte)
208 pte->val |= DMA_PTE_READ;
211 static inline void dma_set_pte_writable(struct dma_pte *pte)
213 pte->val |= DMA_PTE_WRITE;
216 static inline void dma_set_pte_snp(struct dma_pte *pte)
218 pte->val |= DMA_PTE_SNP;
221 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
223 pte->val = (pte->val & ~3) | (prot & 3);
226 static inline u64 dma_pte_addr(struct dma_pte *pte)
228 return (pte->val & VTD_PAGE_MASK);
231 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
233 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
236 static inline bool dma_pte_present(struct dma_pte *pte)
238 return (pte->val & 3) != 0;
242 * This domain is a statically identity mapping domain.
243 * 1. This domain creats a static 1:1 mapping to all usable memory.
244 * 2. It maps to each iommu if successful.
245 * 3. Each iommu mapps to this domain if successful.
247 struct dmar_domain *si_domain;
249 /* devices under the same p2p bridge are owned in one domain */
250 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
252 /* domain represents a virtual machine, more than one devices
253 * across iommus may be owned in one domain, e.g. kvm guest.
255 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
257 /* si_domain contains mulitple devices */
258 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
261 int id; /* domain id */
262 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
264 struct list_head devices; /* all devices' list */
265 struct iova_domain iovad; /* iova's that belong to this domain */
267 struct dma_pte *pgd; /* virtual address */
268 spinlock_t mapping_lock; /* page table lock */
269 int gaw; /* max guest address width */
271 /* adjusted guest address width, 0 is level 2 30-bit */
274 int flags; /* flags to find out type of domain */
276 int iommu_coherency;/* indicate coherency of iommu access */
277 int iommu_snooping; /* indicate snooping control feature*/
278 int iommu_count; /* reference count of iommu */
279 spinlock_t iommu_lock; /* protect iommu set in domain */
280 u64 max_addr; /* maximum mapped address */
283 /* PCI domain-device relationship */
284 struct device_domain_info {
285 struct list_head link; /* link to domain siblings */
286 struct list_head global; /* link to global list */
287 int segment; /* PCI domain */
288 u8 bus; /* PCI bus number */
289 u8 devfn; /* PCI devfn number */
290 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
291 struct intel_iommu *iommu; /* IOMMU used by this device */
292 struct dmar_domain *domain; /* pointer to domain */
295 static void flush_unmaps_timeout(unsigned long data);
297 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
299 #define HIGH_WATER_MARK 250
300 struct deferred_flush_tables {
302 struct iova *iova[HIGH_WATER_MARK];
303 struct dmar_domain *domain[HIGH_WATER_MARK];
306 static struct deferred_flush_tables *deferred_flush;
308 /* bitmap for indexing intel_iommus */
309 static int g_num_of_iommus;
311 static DEFINE_SPINLOCK(async_umap_flush_lock);
312 static LIST_HEAD(unmaps_to_do);
315 static long list_size;
317 static void domain_remove_dev_info(struct dmar_domain *domain);
319 #ifdef CONFIG_DMAR_DEFAULT_ON
320 int dmar_disabled = 0;
322 int dmar_disabled = 1;
323 #endif /*CONFIG_DMAR_DEFAULT_ON*/
325 static int __initdata dmar_map_gfx = 1;
326 static int dmar_forcedac;
327 static int intel_iommu_strict;
329 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
330 static DEFINE_SPINLOCK(device_domain_lock);
331 static LIST_HEAD(device_domain_list);
333 static struct iommu_ops intel_iommu_ops;
335 static int __init intel_iommu_setup(char *str)
340 if (!strncmp(str, "on", 2)) {
342 printk(KERN_INFO "Intel-IOMMU: enabled\n");
343 } else if (!strncmp(str, "off", 3)) {
345 printk(KERN_INFO "Intel-IOMMU: disabled\n");
346 } else if (!strncmp(str, "igfx_off", 8)) {
349 "Intel-IOMMU: disable GFX device mapping\n");
350 } else if (!strncmp(str, "forcedac", 8)) {
352 "Intel-IOMMU: Forcing DAC for PCI devices\n");
354 } else if (!strncmp(str, "strict", 6)) {
356 "Intel-IOMMU: disable batched IOTLB flush\n");
357 intel_iommu_strict = 1;
360 str += strcspn(str, ",");
366 __setup("intel_iommu=", intel_iommu_setup);
368 static struct kmem_cache *iommu_domain_cache;
369 static struct kmem_cache *iommu_devinfo_cache;
370 static struct kmem_cache *iommu_iova_cache;
372 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
377 /* trying to avoid low memory issues */
378 flags = current->flags & PF_MEMALLOC;
379 current->flags |= PF_MEMALLOC;
380 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
381 current->flags &= (~PF_MEMALLOC | flags);
386 static inline void *alloc_pgtable_page(void)
391 /* trying to avoid low memory issues */
392 flags = current->flags & PF_MEMALLOC;
393 current->flags |= PF_MEMALLOC;
394 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
395 current->flags &= (~PF_MEMALLOC | flags);
399 static inline void free_pgtable_page(void *vaddr)
401 free_page((unsigned long)vaddr);
404 static inline void *alloc_domain_mem(void)
406 return iommu_kmem_cache_alloc(iommu_domain_cache);
409 static void free_domain_mem(void *vaddr)
411 kmem_cache_free(iommu_domain_cache, vaddr);
414 static inline void * alloc_devinfo_mem(void)
416 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
419 static inline void free_devinfo_mem(void *vaddr)
421 kmem_cache_free(iommu_devinfo_cache, vaddr);
424 struct iova *alloc_iova_mem(void)
426 return iommu_kmem_cache_alloc(iommu_iova_cache);
429 void free_iova_mem(struct iova *iova)
431 kmem_cache_free(iommu_iova_cache, iova);
435 static inline int width_to_agaw(int width);
437 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
442 sagaw = cap_sagaw(iommu->cap);
443 for (agaw = width_to_agaw(max_gaw);
445 if (test_bit(agaw, &sagaw))
453 * Calculate max SAGAW for each iommu.
455 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
457 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
461 * calculate agaw for each iommu.
462 * "SAGAW" may be different across iommus, use a default agaw, and
463 * get a supported less agaw for iommus that don't support the default agaw.
465 int iommu_calculate_agaw(struct intel_iommu *iommu)
467 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
470 /* This functionin only returns single iommu in a domain */
471 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
475 /* si_domain and vm domain should not get here. */
476 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
477 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
479 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
480 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
483 return g_iommus[iommu_id];
486 static void domain_update_iommu_coherency(struct dmar_domain *domain)
490 domain->iommu_coherency = 1;
492 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
493 for (; i < g_num_of_iommus; ) {
494 if (!ecap_coherent(g_iommus[i]->ecap)) {
495 domain->iommu_coherency = 0;
498 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
502 static void domain_update_iommu_snooping(struct dmar_domain *domain)
506 domain->iommu_snooping = 1;
508 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
509 for (; i < g_num_of_iommus; ) {
510 if (!ecap_sc_support(g_iommus[i]->ecap)) {
511 domain->iommu_snooping = 0;
514 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
518 /* Some capabilities may be different across iommus */
519 static void domain_update_iommu_cap(struct dmar_domain *domain)
521 domain_update_iommu_coherency(domain);
522 domain_update_iommu_snooping(domain);
525 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
527 struct dmar_drhd_unit *drhd = NULL;
530 for_each_drhd_unit(drhd) {
533 if (segment != drhd->segment)
536 for (i = 0; i < drhd->devices_cnt; i++) {
537 if (drhd->devices[i] &&
538 drhd->devices[i]->bus->number == bus &&
539 drhd->devices[i]->devfn == devfn)
541 if (drhd->devices[i] &&
542 drhd->devices[i]->subordinate &&
543 drhd->devices[i]->subordinate->number <= bus &&
544 drhd->devices[i]->subordinate->subordinate >= bus)
548 if (drhd->include_all)
555 static void domain_flush_cache(struct dmar_domain *domain,
556 void *addr, int size)
558 if (!domain->iommu_coherency)
559 clflush_cache_range(addr, size);
562 /* Gets context entry for a given bus and devfn */
563 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
566 struct root_entry *root;
567 struct context_entry *context;
568 unsigned long phy_addr;
571 spin_lock_irqsave(&iommu->lock, flags);
572 root = &iommu->root_entry[bus];
573 context = get_context_addr_from_root(root);
575 context = (struct context_entry *)alloc_pgtable_page();
577 spin_unlock_irqrestore(&iommu->lock, flags);
580 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
581 phy_addr = virt_to_phys((void *)context);
582 set_root_value(root, phy_addr);
583 set_root_present(root);
584 __iommu_flush_cache(iommu, root, sizeof(*root));
586 spin_unlock_irqrestore(&iommu->lock, flags);
587 return &context[devfn];
590 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
592 struct root_entry *root;
593 struct context_entry *context;
597 spin_lock_irqsave(&iommu->lock, flags);
598 root = &iommu->root_entry[bus];
599 context = get_context_addr_from_root(root);
604 ret = context_present(&context[devfn]);
606 spin_unlock_irqrestore(&iommu->lock, flags);
610 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
612 struct root_entry *root;
613 struct context_entry *context;
616 spin_lock_irqsave(&iommu->lock, flags);
617 root = &iommu->root_entry[bus];
618 context = get_context_addr_from_root(root);
620 context_clear_entry(&context[devfn]);
621 __iommu_flush_cache(iommu, &context[devfn], \
624 spin_unlock_irqrestore(&iommu->lock, flags);
627 static void free_context_table(struct intel_iommu *iommu)
629 struct root_entry *root;
632 struct context_entry *context;
634 spin_lock_irqsave(&iommu->lock, flags);
635 if (!iommu->root_entry) {
638 for (i = 0; i < ROOT_ENTRY_NR; i++) {
639 root = &iommu->root_entry[i];
640 context = get_context_addr_from_root(root);
642 free_pgtable_page(context);
644 free_pgtable_page(iommu->root_entry);
645 iommu->root_entry = NULL;
647 spin_unlock_irqrestore(&iommu->lock, flags);
650 /* page table handling */
651 #define LEVEL_STRIDE (9)
652 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
654 static inline int agaw_to_level(int agaw)
659 static inline int agaw_to_width(int agaw)
661 return 30 + agaw * LEVEL_STRIDE;
665 static inline int width_to_agaw(int width)
667 return (width - 30) / LEVEL_STRIDE;
670 static inline unsigned int level_to_offset_bits(int level)
672 return (level - 1) * LEVEL_STRIDE;
675 static inline int pfn_level_offset(unsigned long pfn, int level)
677 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
680 static inline unsigned long level_mask(int level)
682 return -1UL << level_to_offset_bits(level);
685 static inline unsigned long level_size(int level)
687 return 1UL << level_to_offset_bits(level);
690 static inline unsigned long align_to_level(unsigned long pfn, int level)
692 return (pfn + level_size(level) - 1) & level_mask(level);
695 static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
697 int addr_width = agaw_to_width(domain->agaw);
698 struct dma_pte *parent, *pte = NULL;
699 int level = agaw_to_level(domain->agaw);
703 BUG_ON(!domain->pgd);
704 BUG_ON(addr >> addr_width);
705 parent = domain->pgd;
707 spin_lock_irqsave(&domain->mapping_lock, flags);
711 offset = pfn_level_offset(addr >> VTD_PAGE_SHIFT, level);
712 pte = &parent[offset];
716 if (!dma_pte_present(pte)) {
717 tmp_page = alloc_pgtable_page();
720 spin_unlock_irqrestore(&domain->mapping_lock,
724 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
725 dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
727 * high level table always sets r/w, last level page
728 * table control read/write
730 dma_set_pte_readable(pte);
731 dma_set_pte_writable(pte);
732 domain_flush_cache(domain, pte, sizeof(*pte));
734 parent = phys_to_virt(dma_pte_addr(pte));
738 spin_unlock_irqrestore(&domain->mapping_lock, flags);
742 /* return address's pte at specific level */
743 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
747 struct dma_pte *parent, *pte = NULL;
748 int total = agaw_to_level(domain->agaw);
751 parent = domain->pgd;
752 while (level <= total) {
753 offset = pfn_level_offset(pfn, total);
754 pte = &parent[offset];
758 if (!dma_pte_present(pte))
760 parent = phys_to_virt(dma_pte_addr(pte));
766 /* clear one page's page table */
767 static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn)
769 struct dma_pte *pte = NULL;
771 /* get last level pte */
772 pte = dma_pfn_level_pte(domain, pfn, 1);
776 domain_flush_cache(domain, pte, sizeof(*pte));
780 /* clear last level pte, a tlb flush should be followed */
781 static void dma_pte_clear_range(struct dmar_domain *domain,
782 unsigned long start_pfn,
783 unsigned long last_pfn)
785 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
787 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
788 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
790 /* we don't need lock here; nobody else touches the iova range */
791 while (start_pfn <= last_pfn) {
792 dma_pte_clear_one(domain, start_pfn);
797 /* free page table pages. last level pte should already be cleared */
798 static void dma_pte_free_pagetable(struct dmar_domain *domain,
801 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
802 unsigned long start_pfn = start >> VTD_PAGE_SHIFT;
803 unsigned long last_pfn = (end-1) >> VTD_PAGE_SHIFT;
805 int total = agaw_to_level(domain->agaw);
809 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
810 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
812 /* we don't need lock here, nobody else touches the iova range */
814 while (level <= total) {
815 tmp = align_to_level(start_pfn, level);
817 /* Only clear this pte/pmd if we're asked to clear its
819 if (tmp + level_size(level) - 1 > last_pfn)
822 while (tmp <= last_pfn) {
823 pte = dma_pfn_level_pte(domain, tmp, level);
826 phys_to_virt(dma_pte_addr(pte)));
828 domain_flush_cache(domain, pte, sizeof(*pte));
830 tmp += level_size(level);
835 if (start == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
836 free_pgtable_page(domain->pgd);
842 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
844 struct root_entry *root;
847 root = (struct root_entry *)alloc_pgtable_page();
851 __iommu_flush_cache(iommu, root, ROOT_SIZE);
853 spin_lock_irqsave(&iommu->lock, flags);
854 iommu->root_entry = root;
855 spin_unlock_irqrestore(&iommu->lock, flags);
860 static void iommu_set_root_entry(struct intel_iommu *iommu)
866 addr = iommu->root_entry;
868 spin_lock_irqsave(&iommu->register_lock, flag);
869 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
871 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
873 /* Make sure hardware complete it */
874 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
875 readl, (sts & DMA_GSTS_RTPS), sts);
877 spin_unlock_irqrestore(&iommu->register_lock, flag);
880 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
885 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
888 spin_lock_irqsave(&iommu->register_lock, flag);
889 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
891 /* Make sure hardware complete it */
892 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
893 readl, (!(val & DMA_GSTS_WBFS)), val);
895 spin_unlock_irqrestore(&iommu->register_lock, flag);
898 /* return value determine if we need a write buffer flush */
899 static void __iommu_flush_context(struct intel_iommu *iommu,
900 u16 did, u16 source_id, u8 function_mask,
907 case DMA_CCMD_GLOBAL_INVL:
908 val = DMA_CCMD_GLOBAL_INVL;
910 case DMA_CCMD_DOMAIN_INVL:
911 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
913 case DMA_CCMD_DEVICE_INVL:
914 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
915 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
922 spin_lock_irqsave(&iommu->register_lock, flag);
923 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
925 /* Make sure hardware complete it */
926 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
927 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
929 spin_unlock_irqrestore(&iommu->register_lock, flag);
932 /* return value determine if we need a write buffer flush */
933 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
934 u64 addr, unsigned int size_order, u64 type)
936 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
937 u64 val = 0, val_iva = 0;
941 case DMA_TLB_GLOBAL_FLUSH:
942 /* global flush doesn't need set IVA_REG */
943 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
945 case DMA_TLB_DSI_FLUSH:
946 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
948 case DMA_TLB_PSI_FLUSH:
949 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
950 /* Note: always flush non-leaf currently */
951 val_iva = size_order | addr;
956 /* Note: set drain read/write */
959 * This is probably to be super secure.. Looks like we can
960 * ignore it without any impact.
962 if (cap_read_drain(iommu->cap))
963 val |= DMA_TLB_READ_DRAIN;
965 if (cap_write_drain(iommu->cap))
966 val |= DMA_TLB_WRITE_DRAIN;
968 spin_lock_irqsave(&iommu->register_lock, flag);
969 /* Note: Only uses first TLB reg currently */
971 dmar_writeq(iommu->reg + tlb_offset, val_iva);
972 dmar_writeq(iommu->reg + tlb_offset + 8, val);
974 /* Make sure hardware complete it */
975 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
976 dmar_readq, (!(val & DMA_TLB_IVT)), val);
978 spin_unlock_irqrestore(&iommu->register_lock, flag);
980 /* check IOTLB invalidation granularity */
981 if (DMA_TLB_IAIG(val) == 0)
982 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
983 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
984 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
985 (unsigned long long)DMA_TLB_IIRG(type),
986 (unsigned long long)DMA_TLB_IAIG(val));
989 static struct device_domain_info *iommu_support_dev_iotlb(
990 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
994 struct device_domain_info *info;
995 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
997 if (!ecap_dev_iotlb_support(iommu->ecap))
1003 spin_lock_irqsave(&device_domain_lock, flags);
1004 list_for_each_entry(info, &domain->devices, link)
1005 if (info->bus == bus && info->devfn == devfn) {
1009 spin_unlock_irqrestore(&device_domain_lock, flags);
1011 if (!found || !info->dev)
1014 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1017 if (!dmar_find_matched_atsr_unit(info->dev))
1020 info->iommu = iommu;
1025 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1030 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1033 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1035 if (!info->dev || !pci_ats_enabled(info->dev))
1038 pci_disable_ats(info->dev);
1041 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1042 u64 addr, unsigned mask)
1045 unsigned long flags;
1046 struct device_domain_info *info;
1048 spin_lock_irqsave(&device_domain_lock, flags);
1049 list_for_each_entry(info, &domain->devices, link) {
1050 if (!info->dev || !pci_ats_enabled(info->dev))
1053 sid = info->bus << 8 | info->devfn;
1054 qdep = pci_ats_queue_depth(info->dev);
1055 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1057 spin_unlock_irqrestore(&device_domain_lock, flags);
1060 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1061 u64 addr, unsigned int pages)
1063 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1065 BUG_ON(addr & (~VTD_PAGE_MASK));
1069 * Fallback to domain selective flush if no PSI support or the size is
1071 * PSI requires page size to be 2 ^ x, and the base address is naturally
1072 * aligned to the size
1074 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1075 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1078 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1082 * In caching mode, domain ID 0 is reserved for non-present to present
1083 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1085 if (!cap_caching_mode(iommu->cap) || did)
1086 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1089 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1092 unsigned long flags;
1094 spin_lock_irqsave(&iommu->register_lock, flags);
1095 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1096 pmen &= ~DMA_PMEN_EPM;
1097 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1099 /* wait for the protected region status bit to clear */
1100 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1101 readl, !(pmen & DMA_PMEN_PRS), pmen);
1103 spin_unlock_irqrestore(&iommu->register_lock, flags);
1106 static int iommu_enable_translation(struct intel_iommu *iommu)
1109 unsigned long flags;
1111 spin_lock_irqsave(&iommu->register_lock, flags);
1112 iommu->gcmd |= DMA_GCMD_TE;
1113 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1115 /* Make sure hardware complete it */
1116 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1117 readl, (sts & DMA_GSTS_TES), sts);
1119 spin_unlock_irqrestore(&iommu->register_lock, flags);
1123 static int iommu_disable_translation(struct intel_iommu *iommu)
1128 spin_lock_irqsave(&iommu->register_lock, flag);
1129 iommu->gcmd &= ~DMA_GCMD_TE;
1130 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1132 /* Make sure hardware complete it */
1133 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1134 readl, (!(sts & DMA_GSTS_TES)), sts);
1136 spin_unlock_irqrestore(&iommu->register_lock, flag);
1141 static int iommu_init_domains(struct intel_iommu *iommu)
1143 unsigned long ndomains;
1144 unsigned long nlongs;
1146 ndomains = cap_ndoms(iommu->cap);
1147 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1148 nlongs = BITS_TO_LONGS(ndomains);
1150 /* TBD: there might be 64K domains,
1151 * consider other allocation for future chip
1153 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1154 if (!iommu->domain_ids) {
1155 printk(KERN_ERR "Allocating domain id array failed\n");
1158 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1160 if (!iommu->domains) {
1161 printk(KERN_ERR "Allocating domain array failed\n");
1162 kfree(iommu->domain_ids);
1166 spin_lock_init(&iommu->lock);
1169 * if Caching mode is set, then invalid translations are tagged
1170 * with domainid 0. Hence we need to pre-allocate it.
1172 if (cap_caching_mode(iommu->cap))
1173 set_bit(0, iommu->domain_ids);
1178 static void domain_exit(struct dmar_domain *domain);
1179 static void vm_domain_exit(struct dmar_domain *domain);
1181 void free_dmar_iommu(struct intel_iommu *iommu)
1183 struct dmar_domain *domain;
1185 unsigned long flags;
1187 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1188 for (; i < cap_ndoms(iommu->cap); ) {
1189 domain = iommu->domains[i];
1190 clear_bit(i, iommu->domain_ids);
1192 spin_lock_irqsave(&domain->iommu_lock, flags);
1193 if (--domain->iommu_count == 0) {
1194 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1195 vm_domain_exit(domain);
1197 domain_exit(domain);
1199 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1201 i = find_next_bit(iommu->domain_ids,
1202 cap_ndoms(iommu->cap), i+1);
1205 if (iommu->gcmd & DMA_GCMD_TE)
1206 iommu_disable_translation(iommu);
1209 set_irq_data(iommu->irq, NULL);
1210 /* This will mask the irq */
1211 free_irq(iommu->irq, iommu);
1212 destroy_irq(iommu->irq);
1215 kfree(iommu->domains);
1216 kfree(iommu->domain_ids);
1218 g_iommus[iommu->seq_id] = NULL;
1220 /* if all iommus are freed, free g_iommus */
1221 for (i = 0; i < g_num_of_iommus; i++) {
1226 if (i == g_num_of_iommus)
1229 /* free context mapping */
1230 free_context_table(iommu);
1233 static struct dmar_domain *alloc_domain(void)
1235 struct dmar_domain *domain;
1237 domain = alloc_domain_mem();
1241 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1247 static int iommu_attach_domain(struct dmar_domain *domain,
1248 struct intel_iommu *iommu)
1251 unsigned long ndomains;
1252 unsigned long flags;
1254 ndomains = cap_ndoms(iommu->cap);
1256 spin_lock_irqsave(&iommu->lock, flags);
1258 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1259 if (num >= ndomains) {
1260 spin_unlock_irqrestore(&iommu->lock, flags);
1261 printk(KERN_ERR "IOMMU: no free domain ids\n");
1266 set_bit(num, iommu->domain_ids);
1267 set_bit(iommu->seq_id, &domain->iommu_bmp);
1268 iommu->domains[num] = domain;
1269 spin_unlock_irqrestore(&iommu->lock, flags);
1274 static void iommu_detach_domain(struct dmar_domain *domain,
1275 struct intel_iommu *iommu)
1277 unsigned long flags;
1281 spin_lock_irqsave(&iommu->lock, flags);
1282 ndomains = cap_ndoms(iommu->cap);
1283 num = find_first_bit(iommu->domain_ids, ndomains);
1284 for (; num < ndomains; ) {
1285 if (iommu->domains[num] == domain) {
1289 num = find_next_bit(iommu->domain_ids,
1290 cap_ndoms(iommu->cap), num+1);
1294 clear_bit(num, iommu->domain_ids);
1295 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1296 iommu->domains[num] = NULL;
1298 spin_unlock_irqrestore(&iommu->lock, flags);
1301 static struct iova_domain reserved_iova_list;
1302 static struct lock_class_key reserved_alloc_key;
1303 static struct lock_class_key reserved_rbtree_key;
1305 static void dmar_init_reserved_ranges(void)
1307 struct pci_dev *pdev = NULL;
1312 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1314 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1315 &reserved_alloc_key);
1316 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1317 &reserved_rbtree_key);
1319 /* IOAPIC ranges shouldn't be accessed by DMA */
1320 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1321 IOVA_PFN(IOAPIC_RANGE_END));
1323 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1325 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1326 for_each_pci_dev(pdev) {
1329 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1330 r = &pdev->resource[i];
1331 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1334 addr &= PHYSICAL_PAGE_MASK;
1335 size = r->end - addr;
1336 size = PAGE_ALIGN(size);
1337 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1338 IOVA_PFN(size + addr) - 1);
1340 printk(KERN_ERR "Reserve iova failed\n");
1346 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1348 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1351 static inline int guestwidth_to_adjustwidth(int gaw)
1354 int r = (gaw - 12) % 9;
1365 static int domain_init(struct dmar_domain *domain, int guest_width)
1367 struct intel_iommu *iommu;
1368 int adjust_width, agaw;
1369 unsigned long sagaw;
1371 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1372 spin_lock_init(&domain->mapping_lock);
1373 spin_lock_init(&domain->iommu_lock);
1375 domain_reserve_special_ranges(domain);
1377 /* calculate AGAW */
1378 iommu = domain_get_iommu(domain);
1379 if (guest_width > cap_mgaw(iommu->cap))
1380 guest_width = cap_mgaw(iommu->cap);
1381 domain->gaw = guest_width;
1382 adjust_width = guestwidth_to_adjustwidth(guest_width);
1383 agaw = width_to_agaw(adjust_width);
1384 sagaw = cap_sagaw(iommu->cap);
1385 if (!test_bit(agaw, &sagaw)) {
1386 /* hardware doesn't support it, choose a bigger one */
1387 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1388 agaw = find_next_bit(&sagaw, 5, agaw);
1392 domain->agaw = agaw;
1393 INIT_LIST_HEAD(&domain->devices);
1395 if (ecap_coherent(iommu->ecap))
1396 domain->iommu_coherency = 1;
1398 domain->iommu_coherency = 0;
1400 if (ecap_sc_support(iommu->ecap))
1401 domain->iommu_snooping = 1;
1403 domain->iommu_snooping = 0;
1405 domain->iommu_count = 1;
1407 /* always allocate the top pgd */
1408 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1411 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1415 static void domain_exit(struct dmar_domain *domain)
1417 struct dmar_drhd_unit *drhd;
1418 struct intel_iommu *iommu;
1421 /* Domain 0 is reserved, so dont process it */
1425 domain_remove_dev_info(domain);
1427 put_iova_domain(&domain->iovad);
1428 end = DOMAIN_MAX_ADDR(domain->gaw);
1429 end = end & (~PAGE_MASK);
1432 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1434 /* free page tables */
1435 dma_pte_free_pagetable(domain, 0, end);
1437 for_each_active_iommu(iommu, drhd)
1438 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1439 iommu_detach_domain(domain, iommu);
1441 free_domain_mem(domain);
1444 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1445 u8 bus, u8 devfn, int translation)
1447 struct context_entry *context;
1448 unsigned long flags;
1449 struct intel_iommu *iommu;
1450 struct dma_pte *pgd;
1452 unsigned long ndomains;
1455 struct device_domain_info *info = NULL;
1457 pr_debug("Set context mapping for %02x:%02x.%d\n",
1458 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1460 BUG_ON(!domain->pgd);
1461 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1462 translation != CONTEXT_TT_MULTI_LEVEL);
1464 iommu = device_to_iommu(segment, bus, devfn);
1468 context = device_to_context_entry(iommu, bus, devfn);
1471 spin_lock_irqsave(&iommu->lock, flags);
1472 if (context_present(context)) {
1473 spin_unlock_irqrestore(&iommu->lock, flags);
1480 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1481 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1484 /* find an available domain id for this device in iommu */
1485 ndomains = cap_ndoms(iommu->cap);
1486 num = find_first_bit(iommu->domain_ids, ndomains);
1487 for (; num < ndomains; ) {
1488 if (iommu->domains[num] == domain) {
1493 num = find_next_bit(iommu->domain_ids,
1494 cap_ndoms(iommu->cap), num+1);
1498 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1499 if (num >= ndomains) {
1500 spin_unlock_irqrestore(&iommu->lock, flags);
1501 printk(KERN_ERR "IOMMU: no free domain ids\n");
1505 set_bit(num, iommu->domain_ids);
1506 set_bit(iommu->seq_id, &domain->iommu_bmp);
1507 iommu->domains[num] = domain;
1511 /* Skip top levels of page tables for
1512 * iommu which has less agaw than default.
1514 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1515 pgd = phys_to_virt(dma_pte_addr(pgd));
1516 if (!dma_pte_present(pgd)) {
1517 spin_unlock_irqrestore(&iommu->lock, flags);
1523 context_set_domain_id(context, id);
1525 if (translation != CONTEXT_TT_PASS_THROUGH) {
1526 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1527 translation = info ? CONTEXT_TT_DEV_IOTLB :
1528 CONTEXT_TT_MULTI_LEVEL;
1531 * In pass through mode, AW must be programmed to indicate the largest
1532 * AGAW value supported by hardware. And ASR is ignored by hardware.
1534 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1535 context_set_address_width(context, iommu->msagaw);
1537 context_set_address_root(context, virt_to_phys(pgd));
1538 context_set_address_width(context, iommu->agaw);
1541 context_set_translation_type(context, translation);
1542 context_set_fault_enable(context);
1543 context_set_present(context);
1544 domain_flush_cache(domain, context, sizeof(*context));
1547 * It's a non-present to present mapping. If hardware doesn't cache
1548 * non-present entry we only need to flush the write-buffer. If the
1549 * _does_ cache non-present entries, then it does so in the special
1550 * domain #0, which we have to flush:
1552 if (cap_caching_mode(iommu->cap)) {
1553 iommu->flush.flush_context(iommu, 0,
1554 (((u16)bus) << 8) | devfn,
1555 DMA_CCMD_MASK_NOBIT,
1556 DMA_CCMD_DEVICE_INVL);
1557 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1559 iommu_flush_write_buffer(iommu);
1561 iommu_enable_dev_iotlb(info);
1562 spin_unlock_irqrestore(&iommu->lock, flags);
1564 spin_lock_irqsave(&domain->iommu_lock, flags);
1565 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1566 domain->iommu_count++;
1567 domain_update_iommu_cap(domain);
1569 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1574 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1578 struct pci_dev *tmp, *parent;
1580 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1581 pdev->bus->number, pdev->devfn,
1586 /* dependent device mapping */
1587 tmp = pci_find_upstream_pcie_bridge(pdev);
1590 /* Secondary interface's bus number and devfn 0 */
1591 parent = pdev->bus->self;
1592 while (parent != tmp) {
1593 ret = domain_context_mapping_one(domain,
1594 pci_domain_nr(parent->bus),
1595 parent->bus->number,
1596 parent->devfn, translation);
1599 parent = parent->bus->self;
1601 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1602 return domain_context_mapping_one(domain,
1603 pci_domain_nr(tmp->subordinate),
1604 tmp->subordinate->number, 0,
1606 else /* this is a legacy PCI bridge */
1607 return domain_context_mapping_one(domain,
1608 pci_domain_nr(tmp->bus),
1614 static int domain_context_mapped(struct pci_dev *pdev)
1617 struct pci_dev *tmp, *parent;
1618 struct intel_iommu *iommu;
1620 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1625 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1628 /* dependent device mapping */
1629 tmp = pci_find_upstream_pcie_bridge(pdev);
1632 /* Secondary interface's bus number and devfn 0 */
1633 parent = pdev->bus->self;
1634 while (parent != tmp) {
1635 ret = device_context_mapped(iommu, parent->bus->number,
1639 parent = parent->bus->self;
1642 return device_context_mapped(iommu, tmp->subordinate->number,
1645 return device_context_mapped(iommu, tmp->bus->number,
1650 domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1651 u64 hpa, size_t size, int prot)
1653 u64 start_pfn, end_pfn;
1654 struct dma_pte *pte;
1656 int addr_width = agaw_to_width(domain->agaw);
1658 BUG_ON(hpa >> addr_width);
1660 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1663 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1664 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
1666 while (start_pfn < end_pfn) {
1667 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
1670 /* We don't need lock here, nobody else
1671 * touches the iova range
1673 BUG_ON(dma_pte_addr(pte));
1674 dma_set_pte_pfn(pte, start_pfn);
1675 dma_set_pte_prot(pte, prot);
1676 if (prot & DMA_PTE_SNP)
1677 dma_set_pte_snp(pte);
1678 domain_flush_cache(domain, pte, sizeof(*pte));
1685 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1690 clear_context_table(iommu, bus, devfn);
1691 iommu->flush.flush_context(iommu, 0, 0, 0,
1692 DMA_CCMD_GLOBAL_INVL);
1693 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1696 static void domain_remove_dev_info(struct dmar_domain *domain)
1698 struct device_domain_info *info;
1699 unsigned long flags;
1700 struct intel_iommu *iommu;
1702 spin_lock_irqsave(&device_domain_lock, flags);
1703 while (!list_empty(&domain->devices)) {
1704 info = list_entry(domain->devices.next,
1705 struct device_domain_info, link);
1706 list_del(&info->link);
1707 list_del(&info->global);
1709 info->dev->dev.archdata.iommu = NULL;
1710 spin_unlock_irqrestore(&device_domain_lock, flags);
1712 iommu_disable_dev_iotlb(info);
1713 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1714 iommu_detach_dev(iommu, info->bus, info->devfn);
1715 free_devinfo_mem(info);
1717 spin_lock_irqsave(&device_domain_lock, flags);
1719 spin_unlock_irqrestore(&device_domain_lock, flags);
1724 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1726 static struct dmar_domain *
1727 find_domain(struct pci_dev *pdev)
1729 struct device_domain_info *info;
1731 /* No lock here, assumes no domain exit in normal case */
1732 info = pdev->dev.archdata.iommu;
1734 return info->domain;
1738 /* domain is initialized */
1739 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1741 struct dmar_domain *domain, *found = NULL;
1742 struct intel_iommu *iommu;
1743 struct dmar_drhd_unit *drhd;
1744 struct device_domain_info *info, *tmp;
1745 struct pci_dev *dev_tmp;
1746 unsigned long flags;
1747 int bus = 0, devfn = 0;
1751 domain = find_domain(pdev);
1755 segment = pci_domain_nr(pdev->bus);
1757 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1759 if (dev_tmp->is_pcie) {
1760 bus = dev_tmp->subordinate->number;
1763 bus = dev_tmp->bus->number;
1764 devfn = dev_tmp->devfn;
1766 spin_lock_irqsave(&device_domain_lock, flags);
1767 list_for_each_entry(info, &device_domain_list, global) {
1768 if (info->segment == segment &&
1769 info->bus == bus && info->devfn == devfn) {
1770 found = info->domain;
1774 spin_unlock_irqrestore(&device_domain_lock, flags);
1775 /* pcie-pci bridge already has a domain, uses it */
1782 domain = alloc_domain();
1786 /* Allocate new domain for the device */
1787 drhd = dmar_find_matched_drhd_unit(pdev);
1789 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1793 iommu = drhd->iommu;
1795 ret = iommu_attach_domain(domain, iommu);
1797 domain_exit(domain);
1801 if (domain_init(domain, gaw)) {
1802 domain_exit(domain);
1806 /* register pcie-to-pci device */
1808 info = alloc_devinfo_mem();
1810 domain_exit(domain);
1813 info->segment = segment;
1815 info->devfn = devfn;
1817 info->domain = domain;
1818 /* This domain is shared by devices under p2p bridge */
1819 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1821 /* pcie-to-pci bridge already has a domain, uses it */
1823 spin_lock_irqsave(&device_domain_lock, flags);
1824 list_for_each_entry(tmp, &device_domain_list, global) {
1825 if (tmp->segment == segment &&
1826 tmp->bus == bus && tmp->devfn == devfn) {
1827 found = tmp->domain;
1832 free_devinfo_mem(info);
1833 domain_exit(domain);
1836 list_add(&info->link, &domain->devices);
1837 list_add(&info->global, &device_domain_list);
1839 spin_unlock_irqrestore(&device_domain_lock, flags);
1843 info = alloc_devinfo_mem();
1846 info->segment = segment;
1847 info->bus = pdev->bus->number;
1848 info->devfn = pdev->devfn;
1850 info->domain = domain;
1851 spin_lock_irqsave(&device_domain_lock, flags);
1852 /* somebody is fast */
1853 found = find_domain(pdev);
1854 if (found != NULL) {
1855 spin_unlock_irqrestore(&device_domain_lock, flags);
1856 if (found != domain) {
1857 domain_exit(domain);
1860 free_devinfo_mem(info);
1863 list_add(&info->link, &domain->devices);
1864 list_add(&info->global, &device_domain_list);
1865 pdev->dev.archdata.iommu = info;
1866 spin_unlock_irqrestore(&device_domain_lock, flags);
1869 /* recheck it here, maybe others set it */
1870 return find_domain(pdev);
1873 static int iommu_identity_mapping;
1875 static int iommu_domain_identity_map(struct dmar_domain *domain,
1876 unsigned long long start,
1877 unsigned long long end)
1880 unsigned long long base;
1882 /* The address might not be aligned */
1883 base = start & PAGE_MASK;
1885 size = PAGE_ALIGN(size);
1886 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1887 IOVA_PFN(base + size) - 1)) {
1888 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1892 pr_debug("Mapping reserved region %lx@%llx for domain %d\n",
1893 size, base, domain->id);
1895 * RMRR range might have overlap with physical memory range,
1898 dma_pte_clear_range(domain, base >> VTD_PAGE_SHIFT,
1899 (base + size - 1) >> VTD_PAGE_SHIFT);
1901 return domain_page_mapping(domain, base, base, size,
1902 DMA_PTE_READ|DMA_PTE_WRITE);
1905 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1906 unsigned long long start,
1907 unsigned long long end)
1909 struct dmar_domain *domain;
1913 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1914 pci_name(pdev), start, end);
1916 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1920 ret = iommu_domain_identity_map(domain, start, end);
1924 /* context entry init */
1925 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1932 domain_exit(domain);
1936 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1937 struct pci_dev *pdev)
1939 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1941 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1942 rmrr->end_address + 1);
1945 #ifdef CONFIG_DMAR_FLOPPY_WA
1946 static inline void iommu_prepare_isa(void)
1948 struct pci_dev *pdev;
1951 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1955 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
1956 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1959 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1960 "floppy might not work\n");
1964 static inline void iommu_prepare_isa(void)
1968 #endif /* !CONFIG_DMAR_FLPY_WA */
1970 /* Initialize each context entry as pass through.*/
1971 static int __init init_context_pass_through(void)
1973 struct pci_dev *pdev = NULL;
1974 struct dmar_domain *domain;
1977 for_each_pci_dev(pdev) {
1978 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1979 ret = domain_context_mapping(domain, pdev,
1980 CONTEXT_TT_PASS_THROUGH);
1987 static int md_domain_init(struct dmar_domain *domain, int guest_width);
1989 static int __init si_domain_work_fn(unsigned long start_pfn,
1990 unsigned long end_pfn, void *datax)
1994 *ret = iommu_domain_identity_map(si_domain,
1995 (uint64_t)start_pfn << PAGE_SHIFT,
1996 (uint64_t)end_pfn << PAGE_SHIFT);
2001 static int si_domain_init(void)
2003 struct dmar_drhd_unit *drhd;
2004 struct intel_iommu *iommu;
2007 si_domain = alloc_domain();
2011 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2013 for_each_active_iommu(iommu, drhd) {
2014 ret = iommu_attach_domain(si_domain, iommu);
2016 domain_exit(si_domain);
2021 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2022 domain_exit(si_domain);
2026 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2028 for_each_online_node(nid) {
2029 work_with_active_regions(nid, si_domain_work_fn, &ret);
2037 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2038 struct pci_dev *pdev);
2039 static int identity_mapping(struct pci_dev *pdev)
2041 struct device_domain_info *info;
2043 if (likely(!iommu_identity_mapping))
2047 list_for_each_entry(info, &si_domain->devices, link)
2048 if (info->dev == pdev)
2053 static int domain_add_dev_info(struct dmar_domain *domain,
2054 struct pci_dev *pdev)
2056 struct device_domain_info *info;
2057 unsigned long flags;
2059 info = alloc_devinfo_mem();
2063 info->segment = pci_domain_nr(pdev->bus);
2064 info->bus = pdev->bus->number;
2065 info->devfn = pdev->devfn;
2067 info->domain = domain;
2069 spin_lock_irqsave(&device_domain_lock, flags);
2070 list_add(&info->link, &domain->devices);
2071 list_add(&info->global, &device_domain_list);
2072 pdev->dev.archdata.iommu = info;
2073 spin_unlock_irqrestore(&device_domain_lock, flags);
2078 static int iommu_prepare_static_identity_mapping(void)
2080 struct pci_dev *pdev = NULL;
2083 ret = si_domain_init();
2087 for_each_pci_dev(pdev) {
2088 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2091 ret = domain_context_mapping(si_domain, pdev,
2092 CONTEXT_TT_MULTI_LEVEL);
2095 ret = domain_add_dev_info(si_domain, pdev);
2103 int __init init_dmars(void)
2105 struct dmar_drhd_unit *drhd;
2106 struct dmar_rmrr_unit *rmrr;
2107 struct pci_dev *pdev;
2108 struct intel_iommu *iommu;
2110 int pass_through = 1;
2113 * In case pass through can not be enabled, iommu tries to use identity
2116 if (iommu_pass_through)
2117 iommu_identity_mapping = 1;
2122 * initialize and program root entry to not present
2125 for_each_drhd_unit(drhd) {
2128 * lock not needed as this is only incremented in the single
2129 * threaded kernel __init code path all other access are read
2134 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2137 printk(KERN_ERR "Allocating global iommu array failed\n");
2142 deferred_flush = kzalloc(g_num_of_iommus *
2143 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2144 if (!deferred_flush) {
2150 for_each_drhd_unit(drhd) {
2154 iommu = drhd->iommu;
2155 g_iommus[iommu->seq_id] = iommu;
2157 ret = iommu_init_domains(iommu);
2163 * we could share the same root & context tables
2164 * amoung all IOMMU's. Need to Split it later.
2166 ret = iommu_alloc_root_entry(iommu);
2168 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2171 if (!ecap_pass_through(iommu->ecap))
2174 if (iommu_pass_through)
2175 if (!pass_through) {
2177 "Pass Through is not supported by hardware.\n");
2178 iommu_pass_through = 0;
2182 * Start from the sane iommu hardware state.
2184 for_each_drhd_unit(drhd) {
2188 iommu = drhd->iommu;
2191 * If the queued invalidation is already initialized by us
2192 * (for example, while enabling interrupt-remapping) then
2193 * we got the things already rolling from a sane state.
2199 * Clear any previous faults.
2201 dmar_fault(-1, iommu);
2203 * Disable queued invalidation if supported and already enabled
2204 * before OS handover.
2206 dmar_disable_qi(iommu);
2209 for_each_drhd_unit(drhd) {
2213 iommu = drhd->iommu;
2215 if (dmar_enable_qi(iommu)) {
2217 * Queued Invalidate not enabled, use Register Based
2220 iommu->flush.flush_context = __iommu_flush_context;
2221 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2222 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
2224 (unsigned long long)drhd->reg_base_addr);
2226 iommu->flush.flush_context = qi_flush_context;
2227 iommu->flush.flush_iotlb = qi_flush_iotlb;
2228 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
2230 (unsigned long long)drhd->reg_base_addr);
2235 * If pass through is set and enabled, context entries of all pci
2236 * devices are intialized by pass through translation type.
2238 if (iommu_pass_through) {
2239 ret = init_context_pass_through();
2241 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2242 iommu_pass_through = 0;
2247 * If pass through is not set or not enabled, setup context entries for
2248 * identity mappings for rmrr, gfx, and isa and may fall back to static
2249 * identity mapping if iommu_identity_mapping is set.
2251 if (!iommu_pass_through) {
2252 if (iommu_identity_mapping)
2253 iommu_prepare_static_identity_mapping();
2256 * for each dev attached to rmrr
2258 * locate drhd for dev, alloc domain for dev
2259 * allocate free domain
2260 * allocate page table entries for rmrr
2261 * if context not allocated for bus
2262 * allocate and init context
2263 * set present in root table for this bus
2264 * init context with domain, translation etc
2268 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2269 for_each_rmrr_units(rmrr) {
2270 for (i = 0; i < rmrr->devices_cnt; i++) {
2271 pdev = rmrr->devices[i];
2273 * some BIOS lists non-exist devices in DMAR
2278 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2281 "IOMMU: mapping reserved region failed\n");
2285 iommu_prepare_isa();
2291 * global invalidate context cache
2292 * global invalidate iotlb
2293 * enable translation
2295 for_each_drhd_unit(drhd) {
2298 iommu = drhd->iommu;
2300 iommu_flush_write_buffer(iommu);
2302 ret = dmar_set_interrupt(iommu);
2306 iommu_set_root_entry(iommu);
2308 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2309 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2310 iommu_disable_protect_mem_regions(iommu);
2312 ret = iommu_enable_translation(iommu);
2319 for_each_drhd_unit(drhd) {
2322 iommu = drhd->iommu;
2329 static inline u64 aligned_size(u64 host_addr, size_t size)
2332 addr = (host_addr & (~PAGE_MASK)) + size;
2333 return PAGE_ALIGN(addr);
2337 iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
2341 /* Make sure it's in range */
2342 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
2343 if (!size || (IOVA_START_ADDR + size > end))
2346 piova = alloc_iova(&domain->iovad,
2347 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
2351 static struct iova *
2352 __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
2353 size_t size, u64 dma_mask)
2355 struct pci_dev *pdev = to_pci_dev(dev);
2356 struct iova *iova = NULL;
2358 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
2359 iova = iommu_alloc_iova(domain, size, dma_mask);
2362 * First try to allocate an io virtual address in
2363 * DMA_BIT_MASK(32) and if that fails then try allocating
2366 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
2368 iova = iommu_alloc_iova(domain, size, dma_mask);
2372 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2379 static struct dmar_domain *
2380 get_valid_domain_for_dev(struct pci_dev *pdev)
2382 struct dmar_domain *domain;
2385 domain = get_domain_for_dev(pdev,
2386 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2389 "Allocating domain for %s failed", pci_name(pdev));
2393 /* make sure context mapping is ok */
2394 if (unlikely(!domain_context_mapped(pdev))) {
2395 ret = domain_context_mapping(domain, pdev,
2396 CONTEXT_TT_MULTI_LEVEL);
2399 "Domain context map for %s failed",
2408 static int iommu_dummy(struct pci_dev *pdev)
2410 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2413 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2414 static int iommu_no_mapping(struct pci_dev *pdev)
2418 if (!iommu_identity_mapping)
2419 return iommu_dummy(pdev);
2421 found = identity_mapping(pdev);
2423 if (pdev->dma_mask > DMA_BIT_MASK(32))
2427 * 32 bit DMA is removed from si_domain and fall back
2428 * to non-identity mapping.
2430 domain_remove_one_dev_info(si_domain, pdev);
2431 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2437 * In case of a detached 64 bit DMA device from vm, the device
2438 * is put into si_domain for identity mapping.
2440 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2442 ret = domain_add_dev_info(si_domain, pdev);
2444 printk(KERN_INFO "64bit %s uses identity mapping\n",
2451 return iommu_dummy(pdev);
2454 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2455 size_t size, int dir, u64 dma_mask)
2457 struct pci_dev *pdev = to_pci_dev(hwdev);
2458 struct dmar_domain *domain;
2459 phys_addr_t start_paddr;
2463 struct intel_iommu *iommu;
2465 BUG_ON(dir == DMA_NONE);
2467 if (iommu_no_mapping(pdev))
2470 domain = get_valid_domain_for_dev(pdev);
2474 iommu = domain_get_iommu(domain);
2475 size = aligned_size((u64)paddr, size);
2477 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2481 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2484 * Check if DMAR supports zero-length reads on write only
2487 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2488 !cap_zlr(iommu->cap))
2489 prot |= DMA_PTE_READ;
2490 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2491 prot |= DMA_PTE_WRITE;
2493 * paddr - (paddr + size) might be partial page, we should map the whole
2494 * page. Note: if two part of one page are separately mapped, we
2495 * might have two guest_addr mapping to the same host paddr, but this
2496 * is not a big problem
2498 ret = domain_page_mapping(domain, start_paddr,
2499 ((u64)paddr) & PHYSICAL_PAGE_MASK,
2504 /* it's a non-present to present mapping. Only flush if caching mode */
2505 if (cap_caching_mode(iommu->cap))
2506 iommu_flush_iotlb_psi(iommu, 0, start_paddr,
2507 size >> VTD_PAGE_SHIFT);
2509 iommu_flush_write_buffer(iommu);
2511 return start_paddr + ((u64)paddr & (~PAGE_MASK));
2515 __free_iova(&domain->iovad, iova);
2516 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2517 pci_name(pdev), size, (unsigned long long)paddr, dir);
2521 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2522 unsigned long offset, size_t size,
2523 enum dma_data_direction dir,
2524 struct dma_attrs *attrs)
2526 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2527 dir, to_pci_dev(dev)->dma_mask);
2530 static void flush_unmaps(void)
2536 /* just flush them all */
2537 for (i = 0; i < g_num_of_iommus; i++) {
2538 struct intel_iommu *iommu = g_iommus[i];
2542 if (!deferred_flush[i].next)
2545 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2546 DMA_TLB_GLOBAL_FLUSH);
2547 for (j = 0; j < deferred_flush[i].next; j++) {
2549 struct iova *iova = deferred_flush[i].iova[j];
2551 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2552 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2553 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2554 iova->pfn_lo << PAGE_SHIFT, mask);
2555 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2557 deferred_flush[i].next = 0;
2563 static void flush_unmaps_timeout(unsigned long data)
2565 unsigned long flags;
2567 spin_lock_irqsave(&async_umap_flush_lock, flags);
2569 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2572 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2574 unsigned long flags;
2576 struct intel_iommu *iommu;
2578 spin_lock_irqsave(&async_umap_flush_lock, flags);
2579 if (list_size == HIGH_WATER_MARK)
2582 iommu = domain_get_iommu(dom);
2583 iommu_id = iommu->seq_id;
2585 next = deferred_flush[iommu_id].next;
2586 deferred_flush[iommu_id].domain[next] = dom;
2587 deferred_flush[iommu_id].iova[next] = iova;
2588 deferred_flush[iommu_id].next++;
2591 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2595 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2598 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2599 size_t size, enum dma_data_direction dir,
2600 struct dma_attrs *attrs)
2602 struct pci_dev *pdev = to_pci_dev(dev);
2603 struct dmar_domain *domain;
2604 unsigned long start_addr;
2606 struct intel_iommu *iommu;
2608 if (iommu_no_mapping(pdev))
2611 domain = find_domain(pdev);
2614 iommu = domain_get_iommu(domain);
2616 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2620 start_addr = iova->pfn_lo << PAGE_SHIFT;
2621 size = aligned_size((u64)dev_addr, size);
2623 pr_debug("Device %s unmapping: %zx@%llx\n",
2624 pci_name(pdev), size, (unsigned long long)start_addr);
2626 /* clear the whole page */
2627 dma_pte_clear_range(domain, start_addr >> VTD_PAGE_SHIFT,
2628 (start_addr + size - 1) >> VTD_PAGE_SHIFT);
2629 /* free page tables */
2630 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2631 if (intel_iommu_strict) {
2632 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2633 size >> VTD_PAGE_SHIFT);
2635 __free_iova(&domain->iovad, iova);
2637 add_unmap(domain, iova);
2639 * queue up the release of the unmap to save the 1/6th of the
2640 * cpu used up by the iotlb flush operation...
2645 static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2648 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2651 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2652 dma_addr_t *dma_handle, gfp_t flags)
2657 size = PAGE_ALIGN(size);
2658 order = get_order(size);
2659 flags &= ~(GFP_DMA | GFP_DMA32);
2661 vaddr = (void *)__get_free_pages(flags, order);
2664 memset(vaddr, 0, size);
2666 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2668 hwdev->coherent_dma_mask);
2671 free_pages((unsigned long)vaddr, order);
2675 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2676 dma_addr_t dma_handle)
2680 size = PAGE_ALIGN(size);
2681 order = get_order(size);
2683 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2684 free_pages((unsigned long)vaddr, order);
2687 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2688 int nelems, enum dma_data_direction dir,
2689 struct dma_attrs *attrs)
2692 struct pci_dev *pdev = to_pci_dev(hwdev);
2693 struct dmar_domain *domain;
2694 unsigned long start_addr;
2698 struct scatterlist *sg;
2699 struct intel_iommu *iommu;
2701 if (iommu_no_mapping(pdev))
2704 domain = find_domain(pdev);
2707 iommu = domain_get_iommu(domain);
2709 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2712 for_each_sg(sglist, sg, nelems, i) {
2713 addr = page_to_phys(sg_page(sg)) + sg->offset;
2714 size += aligned_size((u64)addr, sg->length);
2717 start_addr = iova->pfn_lo << PAGE_SHIFT;
2719 /* clear the whole page */
2720 dma_pte_clear_range(domain, start_addr >> VTD_PAGE_SHIFT,
2721 (start_addr + size - 1) >> VTD_PAGE_SHIFT);
2722 /* free page tables */
2723 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2725 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2726 size >> VTD_PAGE_SHIFT);
2729 __free_iova(&domain->iovad, iova);
2732 static int intel_nontranslate_map_sg(struct device *hddev,
2733 struct scatterlist *sglist, int nelems, int dir)
2736 struct scatterlist *sg;
2738 for_each_sg(sglist, sg, nelems, i) {
2739 BUG_ON(!sg_page(sg));
2740 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2741 sg->dma_length = sg->length;
2746 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2747 enum dma_data_direction dir, struct dma_attrs *attrs)
2751 struct pci_dev *pdev = to_pci_dev(hwdev);
2752 struct dmar_domain *domain;
2756 struct iova *iova = NULL;
2758 struct scatterlist *sg;
2759 unsigned long start_addr;
2760 struct intel_iommu *iommu;
2762 BUG_ON(dir == DMA_NONE);
2763 if (iommu_no_mapping(pdev))
2764 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2766 domain = get_valid_domain_for_dev(pdev);
2770 iommu = domain_get_iommu(domain);
2772 for_each_sg(sglist, sg, nelems, i) {
2773 addr = page_to_phys(sg_page(sg)) + sg->offset;
2774 size += aligned_size((u64)addr, sg->length);
2777 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2779 sglist->dma_length = 0;
2784 * Check if DMAR supports zero-length reads on write only
2787 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2788 !cap_zlr(iommu->cap))
2789 prot |= DMA_PTE_READ;
2790 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2791 prot |= DMA_PTE_WRITE;
2793 start_addr = iova->pfn_lo << PAGE_SHIFT;
2795 for_each_sg(sglist, sg, nelems, i) {
2796 addr = page_to_phys(sg_page(sg)) + sg->offset;
2797 size = aligned_size((u64)addr, sg->length);
2798 ret = domain_page_mapping(domain, start_addr + offset,
2799 ((u64)addr) & PHYSICAL_PAGE_MASK,
2802 /* clear the page */
2803 dma_pte_clear_range(domain,
2804 start_addr >> VTD_PAGE_SHIFT,
2805 (start_addr + offset - 1) >> VTD_PAGE_SHIFT);
2806 /* free page tables */
2807 dma_pte_free_pagetable(domain, start_addr,
2808 start_addr + offset);
2810 __free_iova(&domain->iovad, iova);
2813 sg->dma_address = start_addr + offset +
2814 ((u64)addr & (~PAGE_MASK));
2815 sg->dma_length = sg->length;
2819 /* it's a non-present to present mapping. Only flush if caching mode */
2820 if (cap_caching_mode(iommu->cap))
2821 iommu_flush_iotlb_psi(iommu, 0, start_addr,
2822 offset >> VTD_PAGE_SHIFT);
2824 iommu_flush_write_buffer(iommu);
2829 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2834 struct dma_map_ops intel_dma_ops = {
2835 .alloc_coherent = intel_alloc_coherent,
2836 .free_coherent = intel_free_coherent,
2837 .map_sg = intel_map_sg,
2838 .unmap_sg = intel_unmap_sg,
2839 .map_page = intel_map_page,
2840 .unmap_page = intel_unmap_page,
2841 .mapping_error = intel_mapping_error,
2844 static inline int iommu_domain_cache_init(void)
2848 iommu_domain_cache = kmem_cache_create("iommu_domain",
2849 sizeof(struct dmar_domain),
2854 if (!iommu_domain_cache) {
2855 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2862 static inline int iommu_devinfo_cache_init(void)
2866 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2867 sizeof(struct device_domain_info),
2871 if (!iommu_devinfo_cache) {
2872 printk(KERN_ERR "Couldn't create devinfo cache\n");
2879 static inline int iommu_iova_cache_init(void)
2883 iommu_iova_cache = kmem_cache_create("iommu_iova",
2884 sizeof(struct iova),
2888 if (!iommu_iova_cache) {
2889 printk(KERN_ERR "Couldn't create iova cache\n");
2896 static int __init iommu_init_mempool(void)
2899 ret = iommu_iova_cache_init();
2903 ret = iommu_domain_cache_init();
2907 ret = iommu_devinfo_cache_init();
2911 kmem_cache_destroy(iommu_domain_cache);
2913 kmem_cache_destroy(iommu_iova_cache);
2918 static void __init iommu_exit_mempool(void)
2920 kmem_cache_destroy(iommu_devinfo_cache);
2921 kmem_cache_destroy(iommu_domain_cache);
2922 kmem_cache_destroy(iommu_iova_cache);
2926 static void __init init_no_remapping_devices(void)
2928 struct dmar_drhd_unit *drhd;
2930 for_each_drhd_unit(drhd) {
2931 if (!drhd->include_all) {
2933 for (i = 0; i < drhd->devices_cnt; i++)
2934 if (drhd->devices[i] != NULL)
2936 /* ignore DMAR unit if no pci devices exist */
2937 if (i == drhd->devices_cnt)
2945 for_each_drhd_unit(drhd) {
2947 if (drhd->ignored || drhd->include_all)
2950 for (i = 0; i < drhd->devices_cnt; i++)
2951 if (drhd->devices[i] &&
2952 !IS_GFX_DEVICE(drhd->devices[i]))
2955 if (i < drhd->devices_cnt)
2958 /* bypass IOMMU if it is just for gfx devices */
2960 for (i = 0; i < drhd->devices_cnt; i++) {
2961 if (!drhd->devices[i])
2963 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
2968 #ifdef CONFIG_SUSPEND
2969 static int init_iommu_hw(void)
2971 struct dmar_drhd_unit *drhd;
2972 struct intel_iommu *iommu = NULL;
2974 for_each_active_iommu(iommu, drhd)
2976 dmar_reenable_qi(iommu);
2978 for_each_active_iommu(iommu, drhd) {
2979 iommu_flush_write_buffer(iommu);
2981 iommu_set_root_entry(iommu);
2983 iommu->flush.flush_context(iommu, 0, 0, 0,
2984 DMA_CCMD_GLOBAL_INVL);
2985 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2986 DMA_TLB_GLOBAL_FLUSH);
2987 iommu_disable_protect_mem_regions(iommu);
2988 iommu_enable_translation(iommu);
2994 static void iommu_flush_all(void)
2996 struct dmar_drhd_unit *drhd;
2997 struct intel_iommu *iommu;
2999 for_each_active_iommu(iommu, drhd) {
3000 iommu->flush.flush_context(iommu, 0, 0, 0,
3001 DMA_CCMD_GLOBAL_INVL);
3002 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3003 DMA_TLB_GLOBAL_FLUSH);
3007 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3009 struct dmar_drhd_unit *drhd;
3010 struct intel_iommu *iommu = NULL;
3013 for_each_active_iommu(iommu, drhd) {
3014 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3016 if (!iommu->iommu_state)
3022 for_each_active_iommu(iommu, drhd) {
3023 iommu_disable_translation(iommu);
3025 spin_lock_irqsave(&iommu->register_lock, flag);
3027 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3028 readl(iommu->reg + DMAR_FECTL_REG);
3029 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3030 readl(iommu->reg + DMAR_FEDATA_REG);
3031 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3032 readl(iommu->reg + DMAR_FEADDR_REG);
3033 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3034 readl(iommu->reg + DMAR_FEUADDR_REG);
3036 spin_unlock_irqrestore(&iommu->register_lock, flag);
3041 for_each_active_iommu(iommu, drhd)
3042 kfree(iommu->iommu_state);
3047 static int iommu_resume(struct sys_device *dev)
3049 struct dmar_drhd_unit *drhd;
3050 struct intel_iommu *iommu = NULL;
3053 if (init_iommu_hw()) {
3054 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3058 for_each_active_iommu(iommu, drhd) {
3060 spin_lock_irqsave(&iommu->register_lock, flag);
3062 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3063 iommu->reg + DMAR_FECTL_REG);
3064 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3065 iommu->reg + DMAR_FEDATA_REG);
3066 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3067 iommu->reg + DMAR_FEADDR_REG);
3068 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3069 iommu->reg + DMAR_FEUADDR_REG);
3071 spin_unlock_irqrestore(&iommu->register_lock, flag);
3074 for_each_active_iommu(iommu, drhd)
3075 kfree(iommu->iommu_state);
3080 static struct sysdev_class iommu_sysclass = {
3082 .resume = iommu_resume,
3083 .suspend = iommu_suspend,
3086 static struct sys_device device_iommu = {
3087 .cls = &iommu_sysclass,
3090 static int __init init_iommu_sysfs(void)
3094 error = sysdev_class_register(&iommu_sysclass);
3098 error = sysdev_register(&device_iommu);
3100 sysdev_class_unregister(&iommu_sysclass);
3106 static int __init init_iommu_sysfs(void)
3110 #endif /* CONFIG_PM */
3112 int __init intel_iommu_init(void)
3116 if (dmar_table_init())
3119 if (dmar_dev_scope_init())
3123 * Check the need for DMA-remapping initialization now.
3124 * Above initialization will also be used by Interrupt-remapping.
3126 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
3129 iommu_init_mempool();
3130 dmar_init_reserved_ranges();
3132 init_no_remapping_devices();
3136 printk(KERN_ERR "IOMMU: dmar init failed\n");
3137 put_iova_domain(&reserved_iova_list);
3138 iommu_exit_mempool();
3142 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3144 init_timer(&unmap_timer);
3147 if (!iommu_pass_through) {
3149 "Multi-level page-table translation for DMAR.\n");
3150 dma_ops = &intel_dma_ops;
3153 "DMAR: Pass through translation for DMAR.\n");
3157 register_iommu(&intel_iommu_ops);
3162 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3163 struct pci_dev *pdev)
3165 struct pci_dev *tmp, *parent;
3167 if (!iommu || !pdev)
3170 /* dependent device detach */
3171 tmp = pci_find_upstream_pcie_bridge(pdev);
3172 /* Secondary interface's bus number and devfn 0 */
3174 parent = pdev->bus->self;
3175 while (parent != tmp) {
3176 iommu_detach_dev(iommu, parent->bus->number,
3178 parent = parent->bus->self;
3180 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3181 iommu_detach_dev(iommu,
3182 tmp->subordinate->number, 0);
3183 else /* this is a legacy PCI bridge */
3184 iommu_detach_dev(iommu, tmp->bus->number,
3189 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3190 struct pci_dev *pdev)
3192 struct device_domain_info *info;
3193 struct intel_iommu *iommu;
3194 unsigned long flags;
3196 struct list_head *entry, *tmp;
3198 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3203 spin_lock_irqsave(&device_domain_lock, flags);
3204 list_for_each_safe(entry, tmp, &domain->devices) {
3205 info = list_entry(entry, struct device_domain_info, link);
3206 /* No need to compare PCI domain; it has to be the same */
3207 if (info->bus == pdev->bus->number &&
3208 info->devfn == pdev->devfn) {
3209 list_del(&info->link);
3210 list_del(&info->global);
3212 info->dev->dev.archdata.iommu = NULL;
3213 spin_unlock_irqrestore(&device_domain_lock, flags);
3215 iommu_disable_dev_iotlb(info);
3216 iommu_detach_dev(iommu, info->bus, info->devfn);
3217 iommu_detach_dependent_devices(iommu, pdev);
3218 free_devinfo_mem(info);
3220 spin_lock_irqsave(&device_domain_lock, flags);
3228 /* if there is no other devices under the same iommu
3229 * owned by this domain, clear this iommu in iommu_bmp
3230 * update iommu count and coherency
3232 if (iommu == device_to_iommu(info->segment, info->bus,
3238 unsigned long tmp_flags;
3239 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3240 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3241 domain->iommu_count--;
3242 domain_update_iommu_cap(domain);
3243 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3246 spin_unlock_irqrestore(&device_domain_lock, flags);
3249 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3251 struct device_domain_info *info;
3252 struct intel_iommu *iommu;
3253 unsigned long flags1, flags2;
3255 spin_lock_irqsave(&device_domain_lock, flags1);
3256 while (!list_empty(&domain->devices)) {
3257 info = list_entry(domain->devices.next,
3258 struct device_domain_info, link);
3259 list_del(&info->link);
3260 list_del(&info->global);
3262 info->dev->dev.archdata.iommu = NULL;
3264 spin_unlock_irqrestore(&device_domain_lock, flags1);
3266 iommu_disable_dev_iotlb(info);
3267 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3268 iommu_detach_dev(iommu, info->bus, info->devfn);
3269 iommu_detach_dependent_devices(iommu, info->dev);
3271 /* clear this iommu in iommu_bmp, update iommu count
3274 spin_lock_irqsave(&domain->iommu_lock, flags2);
3275 if (test_and_clear_bit(iommu->seq_id,
3276 &domain->iommu_bmp)) {
3277 domain->iommu_count--;
3278 domain_update_iommu_cap(domain);
3280 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3282 free_devinfo_mem(info);
3283 spin_lock_irqsave(&device_domain_lock, flags1);
3285 spin_unlock_irqrestore(&device_domain_lock, flags1);
3288 /* domain id for virtual machine, it won't be set in context */
3289 static unsigned long vm_domid;
3291 static int vm_domain_min_agaw(struct dmar_domain *domain)
3294 int min_agaw = domain->agaw;
3296 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3297 for (; i < g_num_of_iommus; ) {
3298 if (min_agaw > g_iommus[i]->agaw)
3299 min_agaw = g_iommus[i]->agaw;
3301 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3307 static struct dmar_domain *iommu_alloc_vm_domain(void)
3309 struct dmar_domain *domain;
3311 domain = alloc_domain_mem();
3315 domain->id = vm_domid++;
3316 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3317 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3322 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3326 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3327 spin_lock_init(&domain->mapping_lock);
3328 spin_lock_init(&domain->iommu_lock);
3330 domain_reserve_special_ranges(domain);
3332 /* calculate AGAW */
3333 domain->gaw = guest_width;
3334 adjust_width = guestwidth_to_adjustwidth(guest_width);
3335 domain->agaw = width_to_agaw(adjust_width);
3337 INIT_LIST_HEAD(&domain->devices);
3339 domain->iommu_count = 0;
3340 domain->iommu_coherency = 0;
3341 domain->max_addr = 0;
3343 /* always allocate the top pgd */
3344 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3347 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3351 static void iommu_free_vm_domain(struct dmar_domain *domain)
3353 unsigned long flags;
3354 struct dmar_drhd_unit *drhd;
3355 struct intel_iommu *iommu;
3357 unsigned long ndomains;
3359 for_each_drhd_unit(drhd) {
3362 iommu = drhd->iommu;
3364 ndomains = cap_ndoms(iommu->cap);
3365 i = find_first_bit(iommu->domain_ids, ndomains);
3366 for (; i < ndomains; ) {
3367 if (iommu->domains[i] == domain) {
3368 spin_lock_irqsave(&iommu->lock, flags);
3369 clear_bit(i, iommu->domain_ids);
3370 iommu->domains[i] = NULL;
3371 spin_unlock_irqrestore(&iommu->lock, flags);
3374 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3379 static void vm_domain_exit(struct dmar_domain *domain)
3383 /* Domain 0 is reserved, so dont process it */
3387 vm_domain_remove_all_dev_info(domain);
3389 put_iova_domain(&domain->iovad);
3390 end = DOMAIN_MAX_ADDR(domain->gaw);
3391 end = end & (~VTD_PAGE_MASK);
3394 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3396 /* free page tables */
3397 dma_pte_free_pagetable(domain, 0, end);
3399 iommu_free_vm_domain(domain);
3400 free_domain_mem(domain);
3403 static int intel_iommu_domain_init(struct iommu_domain *domain)
3405 struct dmar_domain *dmar_domain;
3407 dmar_domain = iommu_alloc_vm_domain();
3410 "intel_iommu_domain_init: dmar_domain == NULL\n");
3413 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3415 "intel_iommu_domain_init() failed\n");
3416 vm_domain_exit(dmar_domain);
3419 domain->priv = dmar_domain;
3424 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3426 struct dmar_domain *dmar_domain = domain->priv;
3428 domain->priv = NULL;
3429 vm_domain_exit(dmar_domain);
3432 static int intel_iommu_attach_device(struct iommu_domain *domain,
3435 struct dmar_domain *dmar_domain = domain->priv;
3436 struct pci_dev *pdev = to_pci_dev(dev);
3437 struct intel_iommu *iommu;
3442 /* normally pdev is not mapped */
3443 if (unlikely(domain_context_mapped(pdev))) {
3444 struct dmar_domain *old_domain;
3446 old_domain = find_domain(pdev);
3448 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3449 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3450 domain_remove_one_dev_info(old_domain, pdev);
3452 domain_remove_dev_info(old_domain);
3456 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3461 /* check if this iommu agaw is sufficient for max mapped address */
3462 addr_width = agaw_to_width(iommu->agaw);
3463 end = DOMAIN_MAX_ADDR(addr_width);
3464 end = end & VTD_PAGE_MASK;
3465 if (end < dmar_domain->max_addr) {
3466 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3467 "sufficient for the mapped address (%llx)\n",
3468 __func__, iommu->agaw, dmar_domain->max_addr);
3472 ret = domain_add_dev_info(dmar_domain, pdev);
3476 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3480 static void intel_iommu_detach_device(struct iommu_domain *domain,
3483 struct dmar_domain *dmar_domain = domain->priv;
3484 struct pci_dev *pdev = to_pci_dev(dev);
3486 domain_remove_one_dev_info(dmar_domain, pdev);
3489 static int intel_iommu_map_range(struct iommu_domain *domain,
3490 unsigned long iova, phys_addr_t hpa,
3491 size_t size, int iommu_prot)
3493 struct dmar_domain *dmar_domain = domain->priv;
3499 if (iommu_prot & IOMMU_READ)
3500 prot |= DMA_PTE_READ;
3501 if (iommu_prot & IOMMU_WRITE)
3502 prot |= DMA_PTE_WRITE;
3503 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3504 prot |= DMA_PTE_SNP;
3506 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
3507 if (dmar_domain->max_addr < max_addr) {
3511 /* check if minimum agaw is sufficient for mapped address */
3512 min_agaw = vm_domain_min_agaw(dmar_domain);
3513 addr_width = agaw_to_width(min_agaw);
3514 end = DOMAIN_MAX_ADDR(addr_width);
3515 end = end & VTD_PAGE_MASK;
3516 if (end < max_addr) {
3517 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3518 "sufficient for the mapped address (%llx)\n",
3519 __func__, min_agaw, max_addr);
3522 dmar_domain->max_addr = max_addr;
3525 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
3529 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3530 unsigned long iova, size_t size)
3532 struct dmar_domain *dmar_domain = domain->priv;
3535 /* The address might not be aligned */
3536 base = iova & VTD_PAGE_MASK;
3537 size = VTD_PAGE_ALIGN(size);
3538 dma_pte_clear_range(dmar_domain, base >> VTD_PAGE_SHIFT,
3539 (base + size - 1) >> VTD_PAGE_SHIFT);
3541 if (dmar_domain->max_addr == base + size)
3542 dmar_domain->max_addr = base;
3545 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3548 struct dmar_domain *dmar_domain = domain->priv;
3549 struct dma_pte *pte;
3552 pte = addr_to_dma_pte(dmar_domain, iova);
3554 phys = dma_pte_addr(pte);
3559 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3562 struct dmar_domain *dmar_domain = domain->priv;
3564 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3565 return dmar_domain->iommu_snooping;
3570 static struct iommu_ops intel_iommu_ops = {
3571 .domain_init = intel_iommu_domain_init,
3572 .domain_destroy = intel_iommu_domain_destroy,
3573 .attach_dev = intel_iommu_attach_device,
3574 .detach_dev = intel_iommu_detach_device,
3575 .map = intel_iommu_map_range,
3576 .unmap = intel_iommu_unmap_range,
3577 .iova_to_phys = intel_iommu_iova_to_phys,
3578 .domain_has_cap = intel_iommu_domain_has_cap,
3581 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3584 * Mobile 4 Series Chipset neglects to set RWBF capability,
3587 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3591 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);