intel-iommu: make domain_add_dev_info() call domain_context_mapping()
[safe/jmp/linux-2.6] / drivers / pci / intel-iommu.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  * Author: Fenghua Yu <fenghua.yu@intel.com>
22  */
23
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
42 #include "pci.h"
43
44 #define ROOT_SIZE               VTD_PAGE_SIZE
45 #define CONTEXT_SIZE            VTD_PAGE_SIZE
46
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50 #define IOAPIC_RANGE_START      (0xfee00000)
51 #define IOAPIC_RANGE_END        (0xfeefffff)
52 #define IOVA_START_ADDR         (0x1000)
53
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
56 #define MAX_AGAW_WIDTH 64
57
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59 #define DOMAIN_MAX_PFN(gaw)  ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
60
61 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
62 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
63 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
64
65
66 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67    are never going to work. */
68 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
69 {
70         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
71 }
72
73 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
74 {
75         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
76 }
77 static inline unsigned long page_to_dma_pfn(struct page *pg)
78 {
79         return mm_to_dma_pfn(page_to_pfn(pg));
80 }
81 static inline unsigned long virt_to_dma_pfn(void *p)
82 {
83         return page_to_dma_pfn(virt_to_page(p));
84 }
85
86 /* global iommu list, set NULL for ignored DMAR units */
87 static struct intel_iommu **g_iommus;
88
89 static int rwbf_quirk;
90
91 /*
92  * 0: Present
93  * 1-11: Reserved
94  * 12-63: Context Ptr (12 - (haw-1))
95  * 64-127: Reserved
96  */
97 struct root_entry {
98         u64     val;
99         u64     rsvd1;
100 };
101 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102 static inline bool root_present(struct root_entry *root)
103 {
104         return (root->val & 1);
105 }
106 static inline void set_root_present(struct root_entry *root)
107 {
108         root->val |= 1;
109 }
110 static inline void set_root_value(struct root_entry *root, unsigned long value)
111 {
112         root->val |= value & VTD_PAGE_MASK;
113 }
114
115 static inline struct context_entry *
116 get_context_addr_from_root(struct root_entry *root)
117 {
118         return (struct context_entry *)
119                 (root_present(root)?phys_to_virt(
120                 root->val & VTD_PAGE_MASK) :
121                 NULL);
122 }
123
124 /*
125  * low 64 bits:
126  * 0: present
127  * 1: fault processing disable
128  * 2-3: translation type
129  * 12-63: address space root
130  * high 64 bits:
131  * 0-2: address width
132  * 3-6: aval
133  * 8-23: domain id
134  */
135 struct context_entry {
136         u64 lo;
137         u64 hi;
138 };
139
140 static inline bool context_present(struct context_entry *context)
141 {
142         return (context->lo & 1);
143 }
144 static inline void context_set_present(struct context_entry *context)
145 {
146         context->lo |= 1;
147 }
148
149 static inline void context_set_fault_enable(struct context_entry *context)
150 {
151         context->lo &= (((u64)-1) << 2) | 1;
152 }
153
154 static inline void context_set_translation_type(struct context_entry *context,
155                                                 unsigned long value)
156 {
157         context->lo &= (((u64)-1) << 4) | 3;
158         context->lo |= (value & 3) << 2;
159 }
160
161 static inline void context_set_address_root(struct context_entry *context,
162                                             unsigned long value)
163 {
164         context->lo |= value & VTD_PAGE_MASK;
165 }
166
167 static inline void context_set_address_width(struct context_entry *context,
168                                              unsigned long value)
169 {
170         context->hi |= value & 7;
171 }
172
173 static inline void context_set_domain_id(struct context_entry *context,
174                                          unsigned long value)
175 {
176         context->hi |= (value & ((1 << 16) - 1)) << 8;
177 }
178
179 static inline void context_clear_entry(struct context_entry *context)
180 {
181         context->lo = 0;
182         context->hi = 0;
183 }
184
185 /*
186  * 0: readable
187  * 1: writable
188  * 2-6: reserved
189  * 7: super page
190  * 8-10: available
191  * 11: snoop behavior
192  * 12-63: Host physcial address
193  */
194 struct dma_pte {
195         u64 val;
196 };
197
198 static inline void dma_clear_pte(struct dma_pte *pte)
199 {
200         pte->val = 0;
201 }
202
203 static inline void dma_set_pte_readable(struct dma_pte *pte)
204 {
205         pte->val |= DMA_PTE_READ;
206 }
207
208 static inline void dma_set_pte_writable(struct dma_pte *pte)
209 {
210         pte->val |= DMA_PTE_WRITE;
211 }
212
213 static inline void dma_set_pte_snp(struct dma_pte *pte)
214 {
215         pte->val |= DMA_PTE_SNP;
216 }
217
218 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
219 {
220         pte->val = (pte->val & ~3) | (prot & 3);
221 }
222
223 static inline u64 dma_pte_addr(struct dma_pte *pte)
224 {
225 #ifdef CONFIG_64BIT
226         return pte->val & VTD_PAGE_MASK;
227 #else
228         /* Must have a full atomic 64-bit read */
229         return  __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
230 #endif
231 }
232
233 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
234 {
235         pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
236 }
237
238 static inline bool dma_pte_present(struct dma_pte *pte)
239 {
240         return (pte->val & 3) != 0;
241 }
242
243 static inline int first_pte_in_page(struct dma_pte *pte)
244 {
245         return !((unsigned long)pte & ~VTD_PAGE_MASK);
246 }
247
248 /*
249  * This domain is a statically identity mapping domain.
250  *      1. This domain creats a static 1:1 mapping to all usable memory.
251  *      2. It maps to each iommu if successful.
252  *      3. Each iommu mapps to this domain if successful.
253  */
254 static struct dmar_domain *si_domain;
255 static int hw_pass_through = 1;
256
257 /* devices under the same p2p bridge are owned in one domain */
258 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
259
260 /* domain represents a virtual machine, more than one devices
261  * across iommus may be owned in one domain, e.g. kvm guest.
262  */
263 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 1)
264
265 /* si_domain contains mulitple devices */
266 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 2)
267
268 struct dmar_domain {
269         int     id;                     /* domain id */
270         unsigned long iommu_bmp;        /* bitmap of iommus this domain uses*/
271
272         struct list_head devices;       /* all devices' list */
273         struct iova_domain iovad;       /* iova's that belong to this domain */
274
275         struct dma_pte  *pgd;           /* virtual address */
276         int             gaw;            /* max guest address width */
277
278         /* adjusted guest address width, 0 is level 2 30-bit */
279         int             agaw;
280
281         int             flags;          /* flags to find out type of domain */
282
283         int             iommu_coherency;/* indicate coherency of iommu access */
284         int             iommu_snooping; /* indicate snooping control feature*/
285         int             iommu_count;    /* reference count of iommu */
286         spinlock_t      iommu_lock;     /* protect iommu set in domain */
287         u64             max_addr;       /* maximum mapped address */
288 };
289
290 /* PCI domain-device relationship */
291 struct device_domain_info {
292         struct list_head link;  /* link to domain siblings */
293         struct list_head global; /* link to global list */
294         int segment;            /* PCI domain */
295         u8 bus;                 /* PCI bus number */
296         u8 devfn;               /* PCI devfn number */
297         struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
298         struct intel_iommu *iommu; /* IOMMU used by this device */
299         struct dmar_domain *domain; /* pointer to domain */
300 };
301
302 static void flush_unmaps_timeout(unsigned long data);
303
304 DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
305
306 #define HIGH_WATER_MARK 250
307 struct deferred_flush_tables {
308         int next;
309         struct iova *iova[HIGH_WATER_MARK];
310         struct dmar_domain *domain[HIGH_WATER_MARK];
311 };
312
313 static struct deferred_flush_tables *deferred_flush;
314
315 /* bitmap for indexing intel_iommus */
316 static int g_num_of_iommus;
317
318 static DEFINE_SPINLOCK(async_umap_flush_lock);
319 static LIST_HEAD(unmaps_to_do);
320
321 static int timer_on;
322 static long list_size;
323
324 static void domain_remove_dev_info(struct dmar_domain *domain);
325
326 #ifdef CONFIG_DMAR_DEFAULT_ON
327 int dmar_disabled = 0;
328 #else
329 int dmar_disabled = 1;
330 #endif /*CONFIG_DMAR_DEFAULT_ON*/
331
332 static int __initdata dmar_map_gfx = 1;
333 static int dmar_forcedac;
334 static int intel_iommu_strict;
335
336 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
337 static DEFINE_SPINLOCK(device_domain_lock);
338 static LIST_HEAD(device_domain_list);
339
340 static struct iommu_ops intel_iommu_ops;
341
342 static int __init intel_iommu_setup(char *str)
343 {
344         if (!str)
345                 return -EINVAL;
346         while (*str) {
347                 if (!strncmp(str, "on", 2)) {
348                         dmar_disabled = 0;
349                         printk(KERN_INFO "Intel-IOMMU: enabled\n");
350                 } else if (!strncmp(str, "off", 3)) {
351                         dmar_disabled = 1;
352                         printk(KERN_INFO "Intel-IOMMU: disabled\n");
353                 } else if (!strncmp(str, "igfx_off", 8)) {
354                         dmar_map_gfx = 0;
355                         printk(KERN_INFO
356                                 "Intel-IOMMU: disable GFX device mapping\n");
357                 } else if (!strncmp(str, "forcedac", 8)) {
358                         printk(KERN_INFO
359                                 "Intel-IOMMU: Forcing DAC for PCI devices\n");
360                         dmar_forcedac = 1;
361                 } else if (!strncmp(str, "strict", 6)) {
362                         printk(KERN_INFO
363                                 "Intel-IOMMU: disable batched IOTLB flush\n");
364                         intel_iommu_strict = 1;
365                 }
366
367                 str += strcspn(str, ",");
368                 while (*str == ',')
369                         str++;
370         }
371         return 0;
372 }
373 __setup("intel_iommu=", intel_iommu_setup);
374
375 static struct kmem_cache *iommu_domain_cache;
376 static struct kmem_cache *iommu_devinfo_cache;
377 static struct kmem_cache *iommu_iova_cache;
378
379 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
380 {
381         unsigned int flags;
382         void *vaddr;
383
384         /* trying to avoid low memory issues */
385         flags = current->flags & PF_MEMALLOC;
386         current->flags |= PF_MEMALLOC;
387         vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
388         current->flags &= (~PF_MEMALLOC | flags);
389         return vaddr;
390 }
391
392
393 static inline void *alloc_pgtable_page(void)
394 {
395         unsigned int flags;
396         void *vaddr;
397
398         /* trying to avoid low memory issues */
399         flags = current->flags & PF_MEMALLOC;
400         current->flags |= PF_MEMALLOC;
401         vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
402         current->flags &= (~PF_MEMALLOC | flags);
403         return vaddr;
404 }
405
406 static inline void free_pgtable_page(void *vaddr)
407 {
408         free_page((unsigned long)vaddr);
409 }
410
411 static inline void *alloc_domain_mem(void)
412 {
413         return iommu_kmem_cache_alloc(iommu_domain_cache);
414 }
415
416 static void free_domain_mem(void *vaddr)
417 {
418         kmem_cache_free(iommu_domain_cache, vaddr);
419 }
420
421 static inline void * alloc_devinfo_mem(void)
422 {
423         return iommu_kmem_cache_alloc(iommu_devinfo_cache);
424 }
425
426 static inline void free_devinfo_mem(void *vaddr)
427 {
428         kmem_cache_free(iommu_devinfo_cache, vaddr);
429 }
430
431 struct iova *alloc_iova_mem(void)
432 {
433         return iommu_kmem_cache_alloc(iommu_iova_cache);
434 }
435
436 void free_iova_mem(struct iova *iova)
437 {
438         kmem_cache_free(iommu_iova_cache, iova);
439 }
440
441
442 static inline int width_to_agaw(int width);
443
444 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
445 {
446         unsigned long sagaw;
447         int agaw = -1;
448
449         sagaw = cap_sagaw(iommu->cap);
450         for (agaw = width_to_agaw(max_gaw);
451              agaw >= 0; agaw--) {
452                 if (test_bit(agaw, &sagaw))
453                         break;
454         }
455
456         return agaw;
457 }
458
459 /*
460  * Calculate max SAGAW for each iommu.
461  */
462 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
463 {
464         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
465 }
466
467 /*
468  * calculate agaw for each iommu.
469  * "SAGAW" may be different across iommus, use a default agaw, and
470  * get a supported less agaw for iommus that don't support the default agaw.
471  */
472 int iommu_calculate_agaw(struct intel_iommu *iommu)
473 {
474         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
475 }
476
477 /* This functionin only returns single iommu in a domain */
478 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
479 {
480         int iommu_id;
481
482         /* si_domain and vm domain should not get here. */
483         BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
484         BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
485
486         iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
487         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
488                 return NULL;
489
490         return g_iommus[iommu_id];
491 }
492
493 static void domain_update_iommu_coherency(struct dmar_domain *domain)
494 {
495         int i;
496
497         domain->iommu_coherency = 1;
498
499         i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
500         for (; i < g_num_of_iommus; ) {
501                 if (!ecap_coherent(g_iommus[i]->ecap)) {
502                         domain->iommu_coherency = 0;
503                         break;
504                 }
505                 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
506         }
507 }
508
509 static void domain_update_iommu_snooping(struct dmar_domain *domain)
510 {
511         int i;
512
513         domain->iommu_snooping = 1;
514
515         i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
516         for (; i < g_num_of_iommus; ) {
517                 if (!ecap_sc_support(g_iommus[i]->ecap)) {
518                         domain->iommu_snooping = 0;
519                         break;
520                 }
521                 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
522         }
523 }
524
525 /* Some capabilities may be different across iommus */
526 static void domain_update_iommu_cap(struct dmar_domain *domain)
527 {
528         domain_update_iommu_coherency(domain);
529         domain_update_iommu_snooping(domain);
530 }
531
532 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
533 {
534         struct dmar_drhd_unit *drhd = NULL;
535         int i;
536
537         for_each_drhd_unit(drhd) {
538                 if (drhd->ignored)
539                         continue;
540                 if (segment != drhd->segment)
541                         continue;
542
543                 for (i = 0; i < drhd->devices_cnt; i++) {
544                         if (drhd->devices[i] &&
545                             drhd->devices[i]->bus->number == bus &&
546                             drhd->devices[i]->devfn == devfn)
547                                 return drhd->iommu;
548                         if (drhd->devices[i] &&
549                             drhd->devices[i]->subordinate &&
550                             drhd->devices[i]->subordinate->number <= bus &&
551                             drhd->devices[i]->subordinate->subordinate >= bus)
552                                 return drhd->iommu;
553                 }
554
555                 if (drhd->include_all)
556                         return drhd->iommu;
557         }
558
559         return NULL;
560 }
561
562 static void domain_flush_cache(struct dmar_domain *domain,
563                                void *addr, int size)
564 {
565         if (!domain->iommu_coherency)
566                 clflush_cache_range(addr, size);
567 }
568
569 /* Gets context entry for a given bus and devfn */
570 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
571                 u8 bus, u8 devfn)
572 {
573         struct root_entry *root;
574         struct context_entry *context;
575         unsigned long phy_addr;
576         unsigned long flags;
577
578         spin_lock_irqsave(&iommu->lock, flags);
579         root = &iommu->root_entry[bus];
580         context = get_context_addr_from_root(root);
581         if (!context) {
582                 context = (struct context_entry *)alloc_pgtable_page();
583                 if (!context) {
584                         spin_unlock_irqrestore(&iommu->lock, flags);
585                         return NULL;
586                 }
587                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
588                 phy_addr = virt_to_phys((void *)context);
589                 set_root_value(root, phy_addr);
590                 set_root_present(root);
591                 __iommu_flush_cache(iommu, root, sizeof(*root));
592         }
593         spin_unlock_irqrestore(&iommu->lock, flags);
594         return &context[devfn];
595 }
596
597 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
598 {
599         struct root_entry *root;
600         struct context_entry *context;
601         int ret;
602         unsigned long flags;
603
604         spin_lock_irqsave(&iommu->lock, flags);
605         root = &iommu->root_entry[bus];
606         context = get_context_addr_from_root(root);
607         if (!context) {
608                 ret = 0;
609                 goto out;
610         }
611         ret = context_present(&context[devfn]);
612 out:
613         spin_unlock_irqrestore(&iommu->lock, flags);
614         return ret;
615 }
616
617 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
618 {
619         struct root_entry *root;
620         struct context_entry *context;
621         unsigned long flags;
622
623         spin_lock_irqsave(&iommu->lock, flags);
624         root = &iommu->root_entry[bus];
625         context = get_context_addr_from_root(root);
626         if (context) {
627                 context_clear_entry(&context[devfn]);
628                 __iommu_flush_cache(iommu, &context[devfn], \
629                         sizeof(*context));
630         }
631         spin_unlock_irqrestore(&iommu->lock, flags);
632 }
633
634 static void free_context_table(struct intel_iommu *iommu)
635 {
636         struct root_entry *root;
637         int i;
638         unsigned long flags;
639         struct context_entry *context;
640
641         spin_lock_irqsave(&iommu->lock, flags);
642         if (!iommu->root_entry) {
643                 goto out;
644         }
645         for (i = 0; i < ROOT_ENTRY_NR; i++) {
646                 root = &iommu->root_entry[i];
647                 context = get_context_addr_from_root(root);
648                 if (context)
649                         free_pgtable_page(context);
650         }
651         free_pgtable_page(iommu->root_entry);
652         iommu->root_entry = NULL;
653 out:
654         spin_unlock_irqrestore(&iommu->lock, flags);
655 }
656
657 /* page table handling */
658 #define LEVEL_STRIDE            (9)
659 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
660
661 static inline int agaw_to_level(int agaw)
662 {
663         return agaw + 2;
664 }
665
666 static inline int agaw_to_width(int agaw)
667 {
668         return 30 + agaw * LEVEL_STRIDE;
669
670 }
671
672 static inline int width_to_agaw(int width)
673 {
674         return (width - 30) / LEVEL_STRIDE;
675 }
676
677 static inline unsigned int level_to_offset_bits(int level)
678 {
679         return (level - 1) * LEVEL_STRIDE;
680 }
681
682 static inline int pfn_level_offset(unsigned long pfn, int level)
683 {
684         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
685 }
686
687 static inline unsigned long level_mask(int level)
688 {
689         return -1UL << level_to_offset_bits(level);
690 }
691
692 static inline unsigned long level_size(int level)
693 {
694         return 1UL << level_to_offset_bits(level);
695 }
696
697 static inline unsigned long align_to_level(unsigned long pfn, int level)
698 {
699         return (pfn + level_size(level) - 1) & level_mask(level);
700 }
701
702 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
703                                       unsigned long pfn)
704 {
705         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
706         struct dma_pte *parent, *pte = NULL;
707         int level = agaw_to_level(domain->agaw);
708         int offset;
709
710         BUG_ON(!domain->pgd);
711         BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
712         parent = domain->pgd;
713
714         while (level > 0) {
715                 void *tmp_page;
716
717                 offset = pfn_level_offset(pfn, level);
718                 pte = &parent[offset];
719                 if (level == 1)
720                         break;
721
722                 if (!dma_pte_present(pte)) {
723                         uint64_t pteval;
724
725                         tmp_page = alloc_pgtable_page();
726
727                         if (!tmp_page)
728                                 return NULL;
729
730                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
731                         pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
732                         if (cmpxchg64(&pte->val, 0ULL, pteval)) {
733                                 /* Someone else set it while we were thinking; use theirs. */
734                                 free_pgtable_page(tmp_page);
735                         } else {
736                                 dma_pte_addr(pte);
737                                 domain_flush_cache(domain, pte, sizeof(*pte));
738                         }
739                 }
740                 parent = phys_to_virt(dma_pte_addr(pte));
741                 level--;
742         }
743
744         return pte;
745 }
746
747 /* return address's pte at specific level */
748 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
749                                          unsigned long pfn,
750                                          int level)
751 {
752         struct dma_pte *parent, *pte = NULL;
753         int total = agaw_to_level(domain->agaw);
754         int offset;
755
756         parent = domain->pgd;
757         while (level <= total) {
758                 offset = pfn_level_offset(pfn, total);
759                 pte = &parent[offset];
760                 if (level == total)
761                         return pte;
762
763                 if (!dma_pte_present(pte))
764                         break;
765                 parent = phys_to_virt(dma_pte_addr(pte));
766                 total--;
767         }
768         return NULL;
769 }
770
771 /* clear last level pte, a tlb flush should be followed */
772 static void dma_pte_clear_range(struct dmar_domain *domain,
773                                 unsigned long start_pfn,
774                                 unsigned long last_pfn)
775 {
776         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
777         struct dma_pte *first_pte, *pte;
778
779         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
780         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
781
782         /* we don't need lock here; nobody else touches the iova range */
783         while (start_pfn <= last_pfn) {
784                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
785                 if (!pte) {
786                         start_pfn = align_to_level(start_pfn + 1, 2);
787                         continue;
788                 }
789                 do { 
790                         dma_clear_pte(pte);
791                         start_pfn++;
792                         pte++;
793                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
794
795                 domain_flush_cache(domain, first_pte,
796                                    (void *)pte - (void *)first_pte);
797         }
798 }
799
800 /* free page table pages. last level pte should already be cleared */
801 static void dma_pte_free_pagetable(struct dmar_domain *domain,
802                                    unsigned long start_pfn,
803                                    unsigned long last_pfn)
804 {
805         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
806         struct dma_pte *first_pte, *pte;
807         int total = agaw_to_level(domain->agaw);
808         int level;
809         unsigned long tmp;
810
811         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
812         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
813
814         /* We don't need lock here; nobody else touches the iova range */
815         level = 2;
816         while (level <= total) {
817                 tmp = align_to_level(start_pfn, level);
818
819                 /* If we can't even clear one PTE at this level, we're done */
820                 if (tmp + level_size(level) - 1 > last_pfn)
821                         return;
822
823                 while (tmp + level_size(level) - 1 <= last_pfn) {
824                         first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
825                         if (!pte) {
826                                 tmp = align_to_level(tmp + 1, level + 1);
827                                 continue;
828                         }
829                         do {
830                                 if (dma_pte_present(pte)) {
831                                         free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
832                                         dma_clear_pte(pte);
833                                 }
834                                 pte++;
835                                 tmp += level_size(level);
836                         } while (!first_pte_in_page(pte) &&
837                                  tmp + level_size(level) - 1 <= last_pfn);
838
839                         domain_flush_cache(domain, first_pte,
840                                            (void *)pte - (void *)first_pte);
841                         
842                 }
843                 level++;
844         }
845         /* free pgd */
846         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
847                 free_pgtable_page(domain->pgd);
848                 domain->pgd = NULL;
849         }
850 }
851
852 /* iommu handling */
853 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
854 {
855         struct root_entry *root;
856         unsigned long flags;
857
858         root = (struct root_entry *)alloc_pgtable_page();
859         if (!root)
860                 return -ENOMEM;
861
862         __iommu_flush_cache(iommu, root, ROOT_SIZE);
863
864         spin_lock_irqsave(&iommu->lock, flags);
865         iommu->root_entry = root;
866         spin_unlock_irqrestore(&iommu->lock, flags);
867
868         return 0;
869 }
870
871 static void iommu_set_root_entry(struct intel_iommu *iommu)
872 {
873         void *addr;
874         u32 sts;
875         unsigned long flag;
876
877         addr = iommu->root_entry;
878
879         spin_lock_irqsave(&iommu->register_lock, flag);
880         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
881
882         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
883
884         /* Make sure hardware complete it */
885         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
886                       readl, (sts & DMA_GSTS_RTPS), sts);
887
888         spin_unlock_irqrestore(&iommu->register_lock, flag);
889 }
890
891 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
892 {
893         u32 val;
894         unsigned long flag;
895
896         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
897                 return;
898
899         spin_lock_irqsave(&iommu->register_lock, flag);
900         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
901
902         /* Make sure hardware complete it */
903         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
904                       readl, (!(val & DMA_GSTS_WBFS)), val);
905
906         spin_unlock_irqrestore(&iommu->register_lock, flag);
907 }
908
909 /* return value determine if we need a write buffer flush */
910 static void __iommu_flush_context(struct intel_iommu *iommu,
911                                   u16 did, u16 source_id, u8 function_mask,
912                                   u64 type)
913 {
914         u64 val = 0;
915         unsigned long flag;
916
917         switch (type) {
918         case DMA_CCMD_GLOBAL_INVL:
919                 val = DMA_CCMD_GLOBAL_INVL;
920                 break;
921         case DMA_CCMD_DOMAIN_INVL:
922                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
923                 break;
924         case DMA_CCMD_DEVICE_INVL:
925                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
926                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
927                 break;
928         default:
929                 BUG();
930         }
931         val |= DMA_CCMD_ICC;
932
933         spin_lock_irqsave(&iommu->register_lock, flag);
934         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
935
936         /* Make sure hardware complete it */
937         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
938                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
939
940         spin_unlock_irqrestore(&iommu->register_lock, flag);
941 }
942
943 /* return value determine if we need a write buffer flush */
944 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
945                                 u64 addr, unsigned int size_order, u64 type)
946 {
947         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
948         u64 val = 0, val_iva = 0;
949         unsigned long flag;
950
951         switch (type) {
952         case DMA_TLB_GLOBAL_FLUSH:
953                 /* global flush doesn't need set IVA_REG */
954                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
955                 break;
956         case DMA_TLB_DSI_FLUSH:
957                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
958                 break;
959         case DMA_TLB_PSI_FLUSH:
960                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
961                 /* Note: always flush non-leaf currently */
962                 val_iva = size_order | addr;
963                 break;
964         default:
965                 BUG();
966         }
967         /* Note: set drain read/write */
968 #if 0
969         /*
970          * This is probably to be super secure.. Looks like we can
971          * ignore it without any impact.
972          */
973         if (cap_read_drain(iommu->cap))
974                 val |= DMA_TLB_READ_DRAIN;
975 #endif
976         if (cap_write_drain(iommu->cap))
977                 val |= DMA_TLB_WRITE_DRAIN;
978
979         spin_lock_irqsave(&iommu->register_lock, flag);
980         /* Note: Only uses first TLB reg currently */
981         if (val_iva)
982                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
983         dmar_writeq(iommu->reg + tlb_offset + 8, val);
984
985         /* Make sure hardware complete it */
986         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
987                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
988
989         spin_unlock_irqrestore(&iommu->register_lock, flag);
990
991         /* check IOTLB invalidation granularity */
992         if (DMA_TLB_IAIG(val) == 0)
993                 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
994         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
995                 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
996                         (unsigned long long)DMA_TLB_IIRG(type),
997                         (unsigned long long)DMA_TLB_IAIG(val));
998 }
999
1000 static struct device_domain_info *iommu_support_dev_iotlb(
1001         struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1002 {
1003         int found = 0;
1004         unsigned long flags;
1005         struct device_domain_info *info;
1006         struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1007
1008         if (!ecap_dev_iotlb_support(iommu->ecap))
1009                 return NULL;
1010
1011         if (!iommu->qi)
1012                 return NULL;
1013
1014         spin_lock_irqsave(&device_domain_lock, flags);
1015         list_for_each_entry(info, &domain->devices, link)
1016                 if (info->bus == bus && info->devfn == devfn) {
1017                         found = 1;
1018                         break;
1019                 }
1020         spin_unlock_irqrestore(&device_domain_lock, flags);
1021
1022         if (!found || !info->dev)
1023                 return NULL;
1024
1025         if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1026                 return NULL;
1027
1028         if (!dmar_find_matched_atsr_unit(info->dev))
1029                 return NULL;
1030
1031         info->iommu = iommu;
1032
1033         return info;
1034 }
1035
1036 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1037 {
1038         if (!info)
1039                 return;
1040
1041         pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1042 }
1043
1044 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1045 {
1046         if (!info->dev || !pci_ats_enabled(info->dev))
1047                 return;
1048
1049         pci_disable_ats(info->dev);
1050 }
1051
1052 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1053                                   u64 addr, unsigned mask)
1054 {
1055         u16 sid, qdep;
1056         unsigned long flags;
1057         struct device_domain_info *info;
1058
1059         spin_lock_irqsave(&device_domain_lock, flags);
1060         list_for_each_entry(info, &domain->devices, link) {
1061                 if (!info->dev || !pci_ats_enabled(info->dev))
1062                         continue;
1063
1064                 sid = info->bus << 8 | info->devfn;
1065                 qdep = pci_ats_queue_depth(info->dev);
1066                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1067         }
1068         spin_unlock_irqrestore(&device_domain_lock, flags);
1069 }
1070
1071 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1072                                   unsigned long pfn, unsigned int pages)
1073 {
1074         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1075         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1076
1077         BUG_ON(pages == 0);
1078
1079         /*
1080          * Fallback to domain selective flush if no PSI support or the size is
1081          * too big.
1082          * PSI requires page size to be 2 ^ x, and the base address is naturally
1083          * aligned to the size
1084          */
1085         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1086                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1087                                                 DMA_TLB_DSI_FLUSH);
1088         else
1089                 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1090                                                 DMA_TLB_PSI_FLUSH);
1091
1092         /*
1093          * In caching mode, domain ID 0 is reserved for non-present to present
1094          * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1095          */
1096         if (!cap_caching_mode(iommu->cap) || did)
1097                 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1098 }
1099
1100 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1101 {
1102         u32 pmen;
1103         unsigned long flags;
1104
1105         spin_lock_irqsave(&iommu->register_lock, flags);
1106         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1107         pmen &= ~DMA_PMEN_EPM;
1108         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1109
1110         /* wait for the protected region status bit to clear */
1111         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1112                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1113
1114         spin_unlock_irqrestore(&iommu->register_lock, flags);
1115 }
1116
1117 static int iommu_enable_translation(struct intel_iommu *iommu)
1118 {
1119         u32 sts;
1120         unsigned long flags;
1121
1122         spin_lock_irqsave(&iommu->register_lock, flags);
1123         iommu->gcmd |= DMA_GCMD_TE;
1124         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1125
1126         /* Make sure hardware complete it */
1127         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1128                       readl, (sts & DMA_GSTS_TES), sts);
1129
1130         spin_unlock_irqrestore(&iommu->register_lock, flags);
1131         return 0;
1132 }
1133
1134 static int iommu_disable_translation(struct intel_iommu *iommu)
1135 {
1136         u32 sts;
1137         unsigned long flag;
1138
1139         spin_lock_irqsave(&iommu->register_lock, flag);
1140         iommu->gcmd &= ~DMA_GCMD_TE;
1141         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1142
1143         /* Make sure hardware complete it */
1144         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1145                       readl, (!(sts & DMA_GSTS_TES)), sts);
1146
1147         spin_unlock_irqrestore(&iommu->register_lock, flag);
1148         return 0;
1149 }
1150
1151
1152 static int iommu_init_domains(struct intel_iommu *iommu)
1153 {
1154         unsigned long ndomains;
1155         unsigned long nlongs;
1156
1157         ndomains = cap_ndoms(iommu->cap);
1158         pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1159         nlongs = BITS_TO_LONGS(ndomains);
1160
1161         /* TBD: there might be 64K domains,
1162          * consider other allocation for future chip
1163          */
1164         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1165         if (!iommu->domain_ids) {
1166                 printk(KERN_ERR "Allocating domain id array failed\n");
1167                 return -ENOMEM;
1168         }
1169         iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1170                         GFP_KERNEL);
1171         if (!iommu->domains) {
1172                 printk(KERN_ERR "Allocating domain array failed\n");
1173                 kfree(iommu->domain_ids);
1174                 return -ENOMEM;
1175         }
1176
1177         spin_lock_init(&iommu->lock);
1178
1179         /*
1180          * if Caching mode is set, then invalid translations are tagged
1181          * with domainid 0. Hence we need to pre-allocate it.
1182          */
1183         if (cap_caching_mode(iommu->cap))
1184                 set_bit(0, iommu->domain_ids);
1185         return 0;
1186 }
1187
1188
1189 static void domain_exit(struct dmar_domain *domain);
1190 static void vm_domain_exit(struct dmar_domain *domain);
1191
1192 void free_dmar_iommu(struct intel_iommu *iommu)
1193 {
1194         struct dmar_domain *domain;
1195         int i;
1196         unsigned long flags;
1197
1198         i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1199         for (; i < cap_ndoms(iommu->cap); ) {
1200                 domain = iommu->domains[i];
1201                 clear_bit(i, iommu->domain_ids);
1202
1203                 spin_lock_irqsave(&domain->iommu_lock, flags);
1204                 if (--domain->iommu_count == 0) {
1205                         if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1206                                 vm_domain_exit(domain);
1207                         else
1208                                 domain_exit(domain);
1209                 }
1210                 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1211
1212                 i = find_next_bit(iommu->domain_ids,
1213                         cap_ndoms(iommu->cap), i+1);
1214         }
1215
1216         if (iommu->gcmd & DMA_GCMD_TE)
1217                 iommu_disable_translation(iommu);
1218
1219         if (iommu->irq) {
1220                 set_irq_data(iommu->irq, NULL);
1221                 /* This will mask the irq */
1222                 free_irq(iommu->irq, iommu);
1223                 destroy_irq(iommu->irq);
1224         }
1225
1226         kfree(iommu->domains);
1227         kfree(iommu->domain_ids);
1228
1229         g_iommus[iommu->seq_id] = NULL;
1230
1231         /* if all iommus are freed, free g_iommus */
1232         for (i = 0; i < g_num_of_iommus; i++) {
1233                 if (g_iommus[i])
1234                         break;
1235         }
1236
1237         if (i == g_num_of_iommus)
1238                 kfree(g_iommus);
1239
1240         /* free context mapping */
1241         free_context_table(iommu);
1242 }
1243
1244 static struct dmar_domain *alloc_domain(void)
1245 {
1246         struct dmar_domain *domain;
1247
1248         domain = alloc_domain_mem();
1249         if (!domain)
1250                 return NULL;
1251
1252         memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1253         domain->flags = 0;
1254
1255         return domain;
1256 }
1257
1258 static int iommu_attach_domain(struct dmar_domain *domain,
1259                                struct intel_iommu *iommu)
1260 {
1261         int num;
1262         unsigned long ndomains;
1263         unsigned long flags;
1264
1265         ndomains = cap_ndoms(iommu->cap);
1266
1267         spin_lock_irqsave(&iommu->lock, flags);
1268
1269         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1270         if (num >= ndomains) {
1271                 spin_unlock_irqrestore(&iommu->lock, flags);
1272                 printk(KERN_ERR "IOMMU: no free domain ids\n");
1273                 return -ENOMEM;
1274         }
1275
1276         domain->id = num;
1277         set_bit(num, iommu->domain_ids);
1278         set_bit(iommu->seq_id, &domain->iommu_bmp);
1279         iommu->domains[num] = domain;
1280         spin_unlock_irqrestore(&iommu->lock, flags);
1281
1282         return 0;
1283 }
1284
1285 static void iommu_detach_domain(struct dmar_domain *domain,
1286                                 struct intel_iommu *iommu)
1287 {
1288         unsigned long flags;
1289         int num, ndomains;
1290         int found = 0;
1291
1292         spin_lock_irqsave(&iommu->lock, flags);
1293         ndomains = cap_ndoms(iommu->cap);
1294         num = find_first_bit(iommu->domain_ids, ndomains);
1295         for (; num < ndomains; ) {
1296                 if (iommu->domains[num] == domain) {
1297                         found = 1;
1298                         break;
1299                 }
1300                 num = find_next_bit(iommu->domain_ids,
1301                                     cap_ndoms(iommu->cap), num+1);
1302         }
1303
1304         if (found) {
1305                 clear_bit(num, iommu->domain_ids);
1306                 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1307                 iommu->domains[num] = NULL;
1308         }
1309         spin_unlock_irqrestore(&iommu->lock, flags);
1310 }
1311
1312 static struct iova_domain reserved_iova_list;
1313 static struct lock_class_key reserved_rbtree_key;
1314
1315 static void dmar_init_reserved_ranges(void)
1316 {
1317         struct pci_dev *pdev = NULL;
1318         struct iova *iova;
1319         int i;
1320
1321         init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1322
1323         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1324                 &reserved_rbtree_key);
1325
1326         /* IOAPIC ranges shouldn't be accessed by DMA */
1327         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1328                 IOVA_PFN(IOAPIC_RANGE_END));
1329         if (!iova)
1330                 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1331
1332         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1333         for_each_pci_dev(pdev) {
1334                 struct resource *r;
1335
1336                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1337                         r = &pdev->resource[i];
1338                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1339                                 continue;
1340                         iova = reserve_iova(&reserved_iova_list,
1341                                             IOVA_PFN(r->start),
1342                                             IOVA_PFN(r->end));
1343                         if (!iova)
1344                                 printk(KERN_ERR "Reserve iova failed\n");
1345                 }
1346         }
1347
1348 }
1349
1350 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1351 {
1352         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1353 }
1354
1355 static inline int guestwidth_to_adjustwidth(int gaw)
1356 {
1357         int agaw;
1358         int r = (gaw - 12) % 9;
1359
1360         if (r == 0)
1361                 agaw = gaw;
1362         else
1363                 agaw = gaw + 9 - r;
1364         if (agaw > 64)
1365                 agaw = 64;
1366         return agaw;
1367 }
1368
1369 static int domain_init(struct dmar_domain *domain, int guest_width)
1370 {
1371         struct intel_iommu *iommu;
1372         int adjust_width, agaw;
1373         unsigned long sagaw;
1374
1375         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1376         spin_lock_init(&domain->iommu_lock);
1377
1378         domain_reserve_special_ranges(domain);
1379
1380         /* calculate AGAW */
1381         iommu = domain_get_iommu(domain);
1382         if (guest_width > cap_mgaw(iommu->cap))
1383                 guest_width = cap_mgaw(iommu->cap);
1384         domain->gaw = guest_width;
1385         adjust_width = guestwidth_to_adjustwidth(guest_width);
1386         agaw = width_to_agaw(adjust_width);
1387         sagaw = cap_sagaw(iommu->cap);
1388         if (!test_bit(agaw, &sagaw)) {
1389                 /* hardware doesn't support it, choose a bigger one */
1390                 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1391                 agaw = find_next_bit(&sagaw, 5, agaw);
1392                 if (agaw >= 5)
1393                         return -ENODEV;
1394         }
1395         domain->agaw = agaw;
1396         INIT_LIST_HEAD(&domain->devices);
1397
1398         if (ecap_coherent(iommu->ecap))
1399                 domain->iommu_coherency = 1;
1400         else
1401                 domain->iommu_coherency = 0;
1402
1403         if (ecap_sc_support(iommu->ecap))
1404                 domain->iommu_snooping = 1;
1405         else
1406                 domain->iommu_snooping = 0;
1407
1408         domain->iommu_count = 1;
1409
1410         /* always allocate the top pgd */
1411         domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1412         if (!domain->pgd)
1413                 return -ENOMEM;
1414         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1415         return 0;
1416 }
1417
1418 static void domain_exit(struct dmar_domain *domain)
1419 {
1420         struct dmar_drhd_unit *drhd;
1421         struct intel_iommu *iommu;
1422
1423         /* Domain 0 is reserved, so dont process it */
1424         if (!domain)
1425                 return;
1426
1427         domain_remove_dev_info(domain);
1428         /* destroy iovas */
1429         put_iova_domain(&domain->iovad);
1430
1431         /* clear ptes */
1432         dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1433
1434         /* free page tables */
1435         dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1436
1437         for_each_active_iommu(iommu, drhd)
1438                 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1439                         iommu_detach_domain(domain, iommu);
1440
1441         free_domain_mem(domain);
1442 }
1443
1444 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1445                                  u8 bus, u8 devfn, int translation)
1446 {
1447         struct context_entry *context;
1448         unsigned long flags;
1449         struct intel_iommu *iommu;
1450         struct dma_pte *pgd;
1451         unsigned long num;
1452         unsigned long ndomains;
1453         int id;
1454         int agaw;
1455         struct device_domain_info *info = NULL;
1456
1457         pr_debug("Set context mapping for %02x:%02x.%d\n",
1458                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1459
1460         BUG_ON(!domain->pgd);
1461         BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1462                translation != CONTEXT_TT_MULTI_LEVEL);
1463
1464         iommu = device_to_iommu(segment, bus, devfn);
1465         if (!iommu)
1466                 return -ENODEV;
1467
1468         context = device_to_context_entry(iommu, bus, devfn);
1469         if (!context)
1470                 return -ENOMEM;
1471         spin_lock_irqsave(&iommu->lock, flags);
1472         if (context_present(context)) {
1473                 spin_unlock_irqrestore(&iommu->lock, flags);
1474                 return 0;
1475         }
1476
1477         id = domain->id;
1478         pgd = domain->pgd;
1479
1480         if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1481             domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1482                 int found = 0;
1483
1484                 /* find an available domain id for this device in iommu */
1485                 ndomains = cap_ndoms(iommu->cap);
1486                 num = find_first_bit(iommu->domain_ids, ndomains);
1487                 for (; num < ndomains; ) {
1488                         if (iommu->domains[num] == domain) {
1489                                 id = num;
1490                                 found = 1;
1491                                 break;
1492                         }
1493                         num = find_next_bit(iommu->domain_ids,
1494                                             cap_ndoms(iommu->cap), num+1);
1495                 }
1496
1497                 if (found == 0) {
1498                         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1499                         if (num >= ndomains) {
1500                                 spin_unlock_irqrestore(&iommu->lock, flags);
1501                                 printk(KERN_ERR "IOMMU: no free domain ids\n");
1502                                 return -EFAULT;
1503                         }
1504
1505                         set_bit(num, iommu->domain_ids);
1506                         iommu->domains[num] = domain;
1507                         id = num;
1508                 }
1509
1510                 /* Skip top levels of page tables for
1511                  * iommu which has less agaw than default.
1512                  */
1513                 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1514                         pgd = phys_to_virt(dma_pte_addr(pgd));
1515                         if (!dma_pte_present(pgd)) {
1516                                 spin_unlock_irqrestore(&iommu->lock, flags);
1517                                 return -ENOMEM;
1518                         }
1519                 }
1520         }
1521
1522         context_set_domain_id(context, id);
1523
1524         if (translation != CONTEXT_TT_PASS_THROUGH) {
1525                 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1526                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1527                                      CONTEXT_TT_MULTI_LEVEL;
1528         }
1529         /*
1530          * In pass through mode, AW must be programmed to indicate the largest
1531          * AGAW value supported by hardware. And ASR is ignored by hardware.
1532          */
1533         if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1534                 context_set_address_width(context, iommu->msagaw);
1535         else {
1536                 context_set_address_root(context, virt_to_phys(pgd));
1537                 context_set_address_width(context, iommu->agaw);
1538         }
1539
1540         context_set_translation_type(context, translation);
1541         context_set_fault_enable(context);
1542         context_set_present(context);
1543         domain_flush_cache(domain, context, sizeof(*context));
1544
1545         /*
1546          * It's a non-present to present mapping. If hardware doesn't cache
1547          * non-present entry we only need to flush the write-buffer. If the
1548          * _does_ cache non-present entries, then it does so in the special
1549          * domain #0, which we have to flush:
1550          */
1551         if (cap_caching_mode(iommu->cap)) {
1552                 iommu->flush.flush_context(iommu, 0,
1553                                            (((u16)bus) << 8) | devfn,
1554                                            DMA_CCMD_MASK_NOBIT,
1555                                            DMA_CCMD_DEVICE_INVL);
1556                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1557         } else {
1558                 iommu_flush_write_buffer(iommu);
1559         }
1560         iommu_enable_dev_iotlb(info);
1561         spin_unlock_irqrestore(&iommu->lock, flags);
1562
1563         spin_lock_irqsave(&domain->iommu_lock, flags);
1564         if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1565                 domain->iommu_count++;
1566                 domain_update_iommu_cap(domain);
1567         }
1568         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1569         return 0;
1570 }
1571
1572 static int
1573 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1574                         int translation)
1575 {
1576         int ret;
1577         struct pci_dev *tmp, *parent;
1578
1579         ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1580                                          pdev->bus->number, pdev->devfn,
1581                                          translation);
1582         if (ret)
1583                 return ret;
1584
1585         /* dependent device mapping */
1586         tmp = pci_find_upstream_pcie_bridge(pdev);
1587         if (!tmp)
1588                 return 0;
1589         /* Secondary interface's bus number and devfn 0 */
1590         parent = pdev->bus->self;
1591         while (parent != tmp) {
1592                 ret = domain_context_mapping_one(domain,
1593                                                  pci_domain_nr(parent->bus),
1594                                                  parent->bus->number,
1595                                                  parent->devfn, translation);
1596                 if (ret)
1597                         return ret;
1598                 parent = parent->bus->self;
1599         }
1600         if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1601                 return domain_context_mapping_one(domain,
1602                                         pci_domain_nr(tmp->subordinate),
1603                                         tmp->subordinate->number, 0,
1604                                         translation);
1605         else /* this is a legacy PCI bridge */
1606                 return domain_context_mapping_one(domain,
1607                                                   pci_domain_nr(tmp->bus),
1608                                                   tmp->bus->number,
1609                                                   tmp->devfn,
1610                                                   translation);
1611 }
1612
1613 static int domain_context_mapped(struct pci_dev *pdev)
1614 {
1615         int ret;
1616         struct pci_dev *tmp, *parent;
1617         struct intel_iommu *iommu;
1618
1619         iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1620                                 pdev->devfn);
1621         if (!iommu)
1622                 return -ENODEV;
1623
1624         ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1625         if (!ret)
1626                 return ret;
1627         /* dependent device mapping */
1628         tmp = pci_find_upstream_pcie_bridge(pdev);
1629         if (!tmp)
1630                 return ret;
1631         /* Secondary interface's bus number and devfn 0 */
1632         parent = pdev->bus->self;
1633         while (parent != tmp) {
1634                 ret = device_context_mapped(iommu, parent->bus->number,
1635                                             parent->devfn);
1636                 if (!ret)
1637                         return ret;
1638                 parent = parent->bus->self;
1639         }
1640         if (tmp->is_pcie)
1641                 return device_context_mapped(iommu, tmp->subordinate->number,
1642                                              0);
1643         else
1644                 return device_context_mapped(iommu, tmp->bus->number,
1645                                              tmp->devfn);
1646 }
1647
1648 /* Returns a number of VTD pages, but aligned to MM page size */
1649 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1650                                             size_t size)
1651 {
1652         host_addr &= ~PAGE_MASK;
1653         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1654 }
1655
1656 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1657                             struct scatterlist *sg, unsigned long phys_pfn,
1658                             unsigned long nr_pages, int prot)
1659 {
1660         struct dma_pte *first_pte = NULL, *pte = NULL;
1661         phys_addr_t uninitialized_var(pteval);
1662         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1663         unsigned long sg_res;
1664
1665         BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1666
1667         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1668                 return -EINVAL;
1669
1670         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1671
1672         if (sg)
1673                 sg_res = 0;
1674         else {
1675                 sg_res = nr_pages + 1;
1676                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1677         }
1678
1679         while (nr_pages--) {
1680                 uint64_t tmp;
1681
1682                 if (!sg_res) {
1683                         sg_res = aligned_nrpages(sg->offset, sg->length);
1684                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1685                         sg->dma_length = sg->length;
1686                         pteval = page_to_phys(sg_page(sg)) | prot;
1687                 }
1688                 if (!pte) {
1689                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1690                         if (!pte)
1691                                 return -ENOMEM;
1692                 }
1693                 /* We don't need lock here, nobody else
1694                  * touches the iova range
1695                  */
1696                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1697                 if (tmp) {
1698                         static int dumps = 5;
1699                         printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1700                                iov_pfn, tmp, (unsigned long long)pteval);
1701                         if (dumps) {
1702                                 dumps--;
1703                                 debug_dma_dump_mappings(NULL);
1704                         }
1705                         WARN_ON(1);
1706                 }
1707                 pte++;
1708                 if (!nr_pages || first_pte_in_page(pte)) {
1709                         domain_flush_cache(domain, first_pte,
1710                                            (void *)pte - (void *)first_pte);
1711                         pte = NULL;
1712                 }
1713                 iov_pfn++;
1714                 pteval += VTD_PAGE_SIZE;
1715                 sg_res--;
1716                 if (!sg_res)
1717                         sg = sg_next(sg);
1718         }
1719         return 0;
1720 }
1721
1722 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1723                                     struct scatterlist *sg, unsigned long nr_pages,
1724                                     int prot)
1725 {
1726         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1727 }
1728
1729 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1730                                      unsigned long phys_pfn, unsigned long nr_pages,
1731                                      int prot)
1732 {
1733         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1734 }
1735
1736 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1737 {
1738         if (!iommu)
1739                 return;
1740
1741         clear_context_table(iommu, bus, devfn);
1742         iommu->flush.flush_context(iommu, 0, 0, 0,
1743                                            DMA_CCMD_GLOBAL_INVL);
1744         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1745 }
1746
1747 static void domain_remove_dev_info(struct dmar_domain *domain)
1748 {
1749         struct device_domain_info *info;
1750         unsigned long flags;
1751         struct intel_iommu *iommu;
1752
1753         spin_lock_irqsave(&device_domain_lock, flags);
1754         while (!list_empty(&domain->devices)) {
1755                 info = list_entry(domain->devices.next,
1756                         struct device_domain_info, link);
1757                 list_del(&info->link);
1758                 list_del(&info->global);
1759                 if (info->dev)
1760                         info->dev->dev.archdata.iommu = NULL;
1761                 spin_unlock_irqrestore(&device_domain_lock, flags);
1762
1763                 iommu_disable_dev_iotlb(info);
1764                 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1765                 iommu_detach_dev(iommu, info->bus, info->devfn);
1766                 free_devinfo_mem(info);
1767
1768                 spin_lock_irqsave(&device_domain_lock, flags);
1769         }
1770         spin_unlock_irqrestore(&device_domain_lock, flags);
1771 }
1772
1773 /*
1774  * find_domain
1775  * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1776  */
1777 static struct dmar_domain *
1778 find_domain(struct pci_dev *pdev)
1779 {
1780         struct device_domain_info *info;
1781
1782         /* No lock here, assumes no domain exit in normal case */
1783         info = pdev->dev.archdata.iommu;
1784         if (info)
1785                 return info->domain;
1786         return NULL;
1787 }
1788
1789 /* domain is initialized */
1790 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1791 {
1792         struct dmar_domain *domain, *found = NULL;
1793         struct intel_iommu *iommu;
1794         struct dmar_drhd_unit *drhd;
1795         struct device_domain_info *info, *tmp;
1796         struct pci_dev *dev_tmp;
1797         unsigned long flags;
1798         int bus = 0, devfn = 0;
1799         int segment;
1800         int ret;
1801
1802         domain = find_domain(pdev);
1803         if (domain)
1804                 return domain;
1805
1806         segment = pci_domain_nr(pdev->bus);
1807
1808         dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1809         if (dev_tmp) {
1810                 if (dev_tmp->is_pcie) {
1811                         bus = dev_tmp->subordinate->number;
1812                         devfn = 0;
1813                 } else {
1814                         bus = dev_tmp->bus->number;
1815                         devfn = dev_tmp->devfn;
1816                 }
1817                 spin_lock_irqsave(&device_domain_lock, flags);
1818                 list_for_each_entry(info, &device_domain_list, global) {
1819                         if (info->segment == segment &&
1820                             info->bus == bus && info->devfn == devfn) {
1821                                 found = info->domain;
1822                                 break;
1823                         }
1824                 }
1825                 spin_unlock_irqrestore(&device_domain_lock, flags);
1826                 /* pcie-pci bridge already has a domain, uses it */
1827                 if (found) {
1828                         domain = found;
1829                         goto found_domain;
1830                 }
1831         }
1832
1833         domain = alloc_domain();
1834         if (!domain)
1835                 goto error;
1836
1837         /* Allocate new domain for the device */
1838         drhd = dmar_find_matched_drhd_unit(pdev);
1839         if (!drhd) {
1840                 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1841                         pci_name(pdev));
1842                 return NULL;
1843         }
1844         iommu = drhd->iommu;
1845
1846         ret = iommu_attach_domain(domain, iommu);
1847         if (ret) {
1848                 domain_exit(domain);
1849                 goto error;
1850         }
1851
1852         if (domain_init(domain, gaw)) {
1853                 domain_exit(domain);
1854                 goto error;
1855         }
1856
1857         /* register pcie-to-pci device */
1858         if (dev_tmp) {
1859                 info = alloc_devinfo_mem();
1860                 if (!info) {
1861                         domain_exit(domain);
1862                         goto error;
1863                 }
1864                 info->segment = segment;
1865                 info->bus = bus;
1866                 info->devfn = devfn;
1867                 info->dev = NULL;
1868                 info->domain = domain;
1869                 /* This domain is shared by devices under p2p bridge */
1870                 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1871
1872                 /* pcie-to-pci bridge already has a domain, uses it */
1873                 found = NULL;
1874                 spin_lock_irqsave(&device_domain_lock, flags);
1875                 list_for_each_entry(tmp, &device_domain_list, global) {
1876                         if (tmp->segment == segment &&
1877                             tmp->bus == bus && tmp->devfn == devfn) {
1878                                 found = tmp->domain;
1879                                 break;
1880                         }
1881                 }
1882                 if (found) {
1883                         free_devinfo_mem(info);
1884                         domain_exit(domain);
1885                         domain = found;
1886                 } else {
1887                         list_add(&info->link, &domain->devices);
1888                         list_add(&info->global, &device_domain_list);
1889                 }
1890                 spin_unlock_irqrestore(&device_domain_lock, flags);
1891         }
1892
1893 found_domain:
1894         info = alloc_devinfo_mem();
1895         if (!info)
1896                 goto error;
1897         info->segment = segment;
1898         info->bus = pdev->bus->number;
1899         info->devfn = pdev->devfn;
1900         info->dev = pdev;
1901         info->domain = domain;
1902         spin_lock_irqsave(&device_domain_lock, flags);
1903         /* somebody is fast */
1904         found = find_domain(pdev);
1905         if (found != NULL) {
1906                 spin_unlock_irqrestore(&device_domain_lock, flags);
1907                 if (found != domain) {
1908                         domain_exit(domain);
1909                         domain = found;
1910                 }
1911                 free_devinfo_mem(info);
1912                 return domain;
1913         }
1914         list_add(&info->link, &domain->devices);
1915         list_add(&info->global, &device_domain_list);
1916         pdev->dev.archdata.iommu = info;
1917         spin_unlock_irqrestore(&device_domain_lock, flags);
1918         return domain;
1919 error:
1920         /* recheck it here, maybe others set it */
1921         return find_domain(pdev);
1922 }
1923
1924 static int iommu_identity_mapping;
1925
1926 static int iommu_domain_identity_map(struct dmar_domain *domain,
1927                                      unsigned long long start,
1928                                      unsigned long long end)
1929 {
1930         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1931         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1932
1933         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1934                           dma_to_mm_pfn(last_vpfn))) {
1935                 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1936                 return -ENOMEM;
1937         }
1938
1939         pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1940                  start, end, domain->id);
1941         /*
1942          * RMRR range might have overlap with physical memory range,
1943          * clear it first
1944          */
1945         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
1946
1947         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1948                                   last_vpfn - first_vpfn + 1,
1949                                   DMA_PTE_READ|DMA_PTE_WRITE);
1950 }
1951
1952 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1953                                       unsigned long long start,
1954                                       unsigned long long end)
1955 {
1956         struct dmar_domain *domain;
1957         int ret;
1958
1959         domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1960         if (!domain)
1961                 return -ENOMEM;
1962
1963         /* For _hardware_ passthrough, don't bother. But for software
1964            passthrough, we do it anyway -- it may indicate a memory
1965            range which is reserved in E820, so which didn't get set
1966            up to start with in si_domain */
1967         if (domain == si_domain && hw_pass_through) {
1968                 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1969                        pci_name(pdev), start, end);
1970                 return 0;
1971         }
1972
1973         printk(KERN_INFO
1974                "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1975                pci_name(pdev), start, end);
1976
1977         ret = iommu_domain_identity_map(domain, start, end);
1978         if (ret)
1979                 goto error;
1980
1981         /* context entry init */
1982         ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1983         if (ret)
1984                 goto error;
1985
1986         return 0;
1987
1988  error:
1989         domain_exit(domain);
1990         return ret;
1991 }
1992
1993 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1994         struct pci_dev *pdev)
1995 {
1996         if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1997                 return 0;
1998         return iommu_prepare_identity_map(pdev, rmrr->base_address,
1999                 rmrr->end_address + 1);
2000 }
2001
2002 #ifdef CONFIG_DMAR_FLOPPY_WA
2003 static inline void iommu_prepare_isa(void)
2004 {
2005         struct pci_dev *pdev;
2006         int ret;
2007
2008         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2009         if (!pdev)
2010                 return;
2011
2012         printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2013         ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2014
2015         if (ret)
2016                 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2017                        "floppy might not work\n");
2018
2019 }
2020 #else
2021 static inline void iommu_prepare_isa(void)
2022 {
2023         return;
2024 }
2025 #endif /* !CONFIG_DMAR_FLPY_WA */
2026
2027 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2028
2029 static int __init si_domain_work_fn(unsigned long start_pfn,
2030                                     unsigned long end_pfn, void *datax)
2031 {
2032         int *ret = datax;
2033
2034         *ret = iommu_domain_identity_map(si_domain,
2035                                          (uint64_t)start_pfn << PAGE_SHIFT,
2036                                          (uint64_t)end_pfn << PAGE_SHIFT);
2037         return *ret;
2038
2039 }
2040
2041 static int si_domain_init(int hw)
2042 {
2043         struct dmar_drhd_unit *drhd;
2044         struct intel_iommu *iommu;
2045         int nid, ret = 0;
2046
2047         si_domain = alloc_domain();
2048         if (!si_domain)
2049                 return -EFAULT;
2050
2051         pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2052
2053         for_each_active_iommu(iommu, drhd) {
2054                 ret = iommu_attach_domain(si_domain, iommu);
2055                 if (ret) {
2056                         domain_exit(si_domain);
2057                         return -EFAULT;
2058                 }
2059         }
2060
2061         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2062                 domain_exit(si_domain);
2063                 return -EFAULT;
2064         }
2065
2066         si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2067
2068         if (hw)
2069                 return 0;
2070
2071         for_each_online_node(nid) {
2072                 work_with_active_regions(nid, si_domain_work_fn, &ret);
2073                 if (ret)
2074                         return ret;
2075         }
2076
2077         return 0;
2078 }
2079
2080 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2081                                           struct pci_dev *pdev);
2082 static int identity_mapping(struct pci_dev *pdev)
2083 {
2084         struct device_domain_info *info;
2085
2086         if (likely(!iommu_identity_mapping))
2087                 return 0;
2088
2089
2090         list_for_each_entry(info, &si_domain->devices, link)
2091                 if (info->dev == pdev)
2092                         return 1;
2093         return 0;
2094 }
2095
2096 static int domain_add_dev_info(struct dmar_domain *domain,
2097                                struct pci_dev *pdev,
2098                                int translation)
2099 {
2100         struct device_domain_info *info;
2101         unsigned long flags;
2102         int ret;
2103
2104         info = alloc_devinfo_mem();
2105         if (!info)
2106                 return -ENOMEM;
2107
2108         ret = domain_context_mapping(domain, pdev, translation);
2109         if (ret) {
2110                 free_devinfo_mem(info);
2111                 return ret;
2112         }
2113
2114         info->segment = pci_domain_nr(pdev->bus);
2115         info->bus = pdev->bus->number;
2116         info->devfn = pdev->devfn;
2117         info->dev = pdev;
2118         info->domain = domain;
2119
2120         spin_lock_irqsave(&device_domain_lock, flags);
2121         list_add(&info->link, &domain->devices);
2122         list_add(&info->global, &device_domain_list);
2123         pdev->dev.archdata.iommu = info;
2124         spin_unlock_irqrestore(&device_domain_lock, flags);
2125
2126         return 0;
2127 }
2128
2129 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2130 {
2131         if (iommu_identity_mapping == 2)
2132                 return IS_GFX_DEVICE(pdev);
2133
2134         /*
2135          * We want to start off with all devices in the 1:1 domain, and
2136          * take them out later if we find they can't access all of memory.
2137          *
2138          * However, we can't do this for PCI devices behind bridges,
2139          * because all PCI devices behind the same bridge will end up
2140          * with the same source-id on their transactions.
2141          *
2142          * Practically speaking, we can't change things around for these
2143          * devices at run-time, because we can't be sure there'll be no
2144          * DMA transactions in flight for any of their siblings.
2145          * 
2146          * So PCI devices (unless they're on the root bus) as well as
2147          * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2148          * the 1:1 domain, just in _case_ one of their siblings turns out
2149          * not to be able to map all of memory.
2150          */
2151         if (!pdev->is_pcie) {
2152                 if (!pci_is_root_bus(pdev->bus))
2153                         return 0;
2154                 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2155                         return 0;
2156         } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2157                 return 0;
2158
2159         /* 
2160          * At boot time, we don't yet know if devices will be 64-bit capable.
2161          * Assume that they will -- if they turn out not to be, then we can 
2162          * take them out of the 1:1 domain later.
2163          */
2164         if (!startup)
2165                 return pdev->dma_mask > DMA_BIT_MASK(32);
2166
2167         return 1;
2168 }
2169
2170 static int iommu_prepare_static_identity_mapping(int hw)
2171 {
2172         struct pci_dev *pdev = NULL;
2173         int ret;
2174
2175         ret = si_domain_init(hw);
2176         if (ret)
2177                 return -EFAULT;
2178
2179         for_each_pci_dev(pdev) {
2180                 if (iommu_should_identity_map(pdev, 1)) {
2181                         printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2182                                hw ? "hardware" : "software", pci_name(pdev));
2183
2184                         ret = domain_add_dev_info(si_domain, pdev,
2185                                                      hw ? CONTEXT_TT_PASS_THROUGH :
2186                                                      CONTEXT_TT_MULTI_LEVEL);
2187                         if (ret)
2188                                 return ret;
2189                 }
2190         }
2191
2192         return 0;
2193 }
2194
2195 int __init init_dmars(void)
2196 {
2197         struct dmar_drhd_unit *drhd;
2198         struct dmar_rmrr_unit *rmrr;
2199         struct pci_dev *pdev;
2200         struct intel_iommu *iommu;
2201         int i, ret;
2202
2203         /*
2204          * for each drhd
2205          *    allocate root
2206          *    initialize and program root entry to not present
2207          * endfor
2208          */
2209         for_each_drhd_unit(drhd) {
2210                 g_num_of_iommus++;
2211                 /*
2212                  * lock not needed as this is only incremented in the single
2213                  * threaded kernel __init code path all other access are read
2214                  * only
2215                  */
2216         }
2217
2218         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2219                         GFP_KERNEL);
2220         if (!g_iommus) {
2221                 printk(KERN_ERR "Allocating global iommu array failed\n");
2222                 ret = -ENOMEM;
2223                 goto error;
2224         }
2225
2226         deferred_flush = kzalloc(g_num_of_iommus *
2227                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2228         if (!deferred_flush) {
2229                 ret = -ENOMEM;
2230                 goto error;
2231         }
2232
2233         for_each_drhd_unit(drhd) {
2234                 if (drhd->ignored)
2235                         continue;
2236
2237                 iommu = drhd->iommu;
2238                 g_iommus[iommu->seq_id] = iommu;
2239
2240                 ret = iommu_init_domains(iommu);
2241                 if (ret)
2242                         goto error;
2243
2244                 /*
2245                  * TBD:
2246                  * we could share the same root & context tables
2247                  * amoung all IOMMU's. Need to Split it later.
2248                  */
2249                 ret = iommu_alloc_root_entry(iommu);
2250                 if (ret) {
2251                         printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2252                         goto error;
2253                 }
2254                 if (!ecap_pass_through(iommu->ecap))
2255                         hw_pass_through = 0;
2256         }
2257
2258         /*
2259          * Start from the sane iommu hardware state.
2260          */
2261         for_each_drhd_unit(drhd) {
2262                 if (drhd->ignored)
2263                         continue;
2264
2265                 iommu = drhd->iommu;
2266
2267                 /*
2268                  * If the queued invalidation is already initialized by us
2269                  * (for example, while enabling interrupt-remapping) then
2270                  * we got the things already rolling from a sane state.
2271                  */
2272                 if (iommu->qi)
2273                         continue;
2274
2275                 /*
2276                  * Clear any previous faults.
2277                  */
2278                 dmar_fault(-1, iommu);
2279                 /*
2280                  * Disable queued invalidation if supported and already enabled
2281                  * before OS handover.
2282                  */
2283                 dmar_disable_qi(iommu);
2284         }
2285
2286         for_each_drhd_unit(drhd) {
2287                 if (drhd->ignored)
2288                         continue;
2289
2290                 iommu = drhd->iommu;
2291
2292                 if (dmar_enable_qi(iommu)) {
2293                         /*
2294                          * Queued Invalidate not enabled, use Register Based
2295                          * Invalidate
2296                          */
2297                         iommu->flush.flush_context = __iommu_flush_context;
2298                         iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2299                         printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
2300                                "invalidation\n",
2301                                (unsigned long long)drhd->reg_base_addr);
2302                 } else {
2303                         iommu->flush.flush_context = qi_flush_context;
2304                         iommu->flush.flush_iotlb = qi_flush_iotlb;
2305                         printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
2306                                "invalidation\n",
2307                                (unsigned long long)drhd->reg_base_addr);
2308                 }
2309         }
2310
2311         if (iommu_pass_through)
2312                 iommu_identity_mapping = 1;
2313 #ifdef CONFIG_DMAR_BROKEN_GFX_WA
2314         else
2315                 iommu_identity_mapping = 2;
2316 #endif
2317         /*
2318          * If pass through is not set or not enabled, setup context entries for
2319          * identity mappings for rmrr, gfx, and isa and may fall back to static
2320          * identity mapping if iommu_identity_mapping is set.
2321          */
2322         if (iommu_identity_mapping) {
2323                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2324                 if (ret) {
2325                         printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2326                         goto error;
2327                 }
2328         }
2329         /*
2330          * For each rmrr
2331          *   for each dev attached to rmrr
2332          *   do
2333          *     locate drhd for dev, alloc domain for dev
2334          *     allocate free domain
2335          *     allocate page table entries for rmrr
2336          *     if context not allocated for bus
2337          *           allocate and init context
2338          *           set present in root table for this bus
2339          *     init context with domain, translation etc
2340          *    endfor
2341          * endfor
2342          */
2343         printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2344         for_each_rmrr_units(rmrr) {
2345                 for (i = 0; i < rmrr->devices_cnt; i++) {
2346                         pdev = rmrr->devices[i];
2347                         /*
2348                          * some BIOS lists non-exist devices in DMAR
2349                          * table.
2350                          */
2351                         if (!pdev)
2352                                 continue;
2353                         ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2354                         if (ret)
2355                                 printk(KERN_ERR
2356                                        "IOMMU: mapping reserved region failed\n");
2357                 }
2358         }
2359
2360         iommu_prepare_isa();
2361
2362         /*
2363          * for each drhd
2364          *   enable fault log
2365          *   global invalidate context cache
2366          *   global invalidate iotlb
2367          *   enable translation
2368          */
2369         for_each_drhd_unit(drhd) {
2370                 if (drhd->ignored)
2371                         continue;
2372                 iommu = drhd->iommu;
2373
2374                 iommu_flush_write_buffer(iommu);
2375
2376                 ret = dmar_set_interrupt(iommu);
2377                 if (ret)
2378                         goto error;
2379
2380                 iommu_set_root_entry(iommu);
2381
2382                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2383                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2384                 iommu_disable_protect_mem_regions(iommu);
2385
2386                 ret = iommu_enable_translation(iommu);
2387                 if (ret)
2388                         goto error;
2389         }
2390
2391         return 0;
2392 error:
2393         for_each_drhd_unit(drhd) {
2394                 if (drhd->ignored)
2395                         continue;
2396                 iommu = drhd->iommu;
2397                 free_iommu(iommu);
2398         }
2399         kfree(g_iommus);
2400         return ret;
2401 }
2402
2403 /* This takes a number of _MM_ pages, not VTD pages */
2404 static struct iova *intel_alloc_iova(struct device *dev,
2405                                      struct dmar_domain *domain,
2406                                      unsigned long nrpages, uint64_t dma_mask)
2407 {
2408         struct pci_dev *pdev = to_pci_dev(dev);
2409         struct iova *iova = NULL;
2410
2411         /* Restrict dma_mask to the width that the iommu can handle */
2412         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2413
2414         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2415                 /*
2416                  * First try to allocate an io virtual address in
2417                  * DMA_BIT_MASK(32) and if that fails then try allocating
2418                  * from higher range
2419                  */
2420                 iova = alloc_iova(&domain->iovad, nrpages,
2421                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
2422                 if (iova)
2423                         return iova;
2424         }
2425         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2426         if (unlikely(!iova)) {
2427                 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2428                        nrpages, pci_name(pdev));
2429                 return NULL;
2430         }
2431
2432         return iova;
2433 }
2434
2435 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2436 {
2437         struct dmar_domain *domain;
2438         int ret;
2439
2440         domain = get_domain_for_dev(pdev,
2441                         DEFAULT_DOMAIN_ADDRESS_WIDTH);
2442         if (!domain) {
2443                 printk(KERN_ERR
2444                         "Allocating domain for %s failed", pci_name(pdev));
2445                 return NULL;
2446         }
2447
2448         /* make sure context mapping is ok */
2449         if (unlikely(!domain_context_mapped(pdev))) {
2450                 ret = domain_context_mapping(domain, pdev,
2451                                              CONTEXT_TT_MULTI_LEVEL);
2452                 if (ret) {
2453                         printk(KERN_ERR
2454                                 "Domain context map for %s failed",
2455                                 pci_name(pdev));
2456                         return NULL;
2457                 }
2458         }
2459
2460         return domain;
2461 }
2462
2463 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2464 {
2465         struct device_domain_info *info;
2466
2467         /* No lock here, assumes no domain exit in normal case */
2468         info = dev->dev.archdata.iommu;
2469         if (likely(info))
2470                 return info->domain;
2471
2472         return __get_valid_domain_for_dev(dev);
2473 }
2474
2475 static int iommu_dummy(struct pci_dev *pdev)
2476 {
2477         return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2478 }
2479
2480 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2481 static int iommu_no_mapping(struct device *dev)
2482 {
2483         struct pci_dev *pdev;
2484         int found;
2485
2486         if (unlikely(dev->bus != &pci_bus_type))
2487                 return 1;
2488
2489         pdev = to_pci_dev(dev);
2490         if (iommu_dummy(pdev))
2491                 return 1;
2492
2493         if (!iommu_identity_mapping)
2494                 return 0;
2495
2496         found = identity_mapping(pdev);
2497         if (found) {
2498                 if (iommu_should_identity_map(pdev, 0))
2499                         return 1;
2500                 else {
2501                         /*
2502                          * 32 bit DMA is removed from si_domain and fall back
2503                          * to non-identity mapping.
2504                          */
2505                         domain_remove_one_dev_info(si_domain, pdev);
2506                         printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2507                                pci_name(pdev));
2508                         return 0;
2509                 }
2510         } else {
2511                 /*
2512                  * In case of a detached 64 bit DMA device from vm, the device
2513                  * is put into si_domain for identity mapping.
2514                  */
2515                 if (iommu_should_identity_map(pdev, 0)) {
2516                         int ret;
2517                         ret = domain_add_dev_info(si_domain, pdev,
2518                                                   hw_pass_through ?
2519                                                   CONTEXT_TT_PASS_THROUGH :
2520                                                   CONTEXT_TT_MULTI_LEVEL);
2521                         if (!ret) {
2522                                 printk(KERN_INFO "64bit %s uses identity mapping\n",
2523                                        pci_name(pdev));
2524                                 return 1;
2525                         }
2526                 }
2527         }
2528
2529         return 0;
2530 }
2531
2532 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2533                                      size_t size, int dir, u64 dma_mask)
2534 {
2535         struct pci_dev *pdev = to_pci_dev(hwdev);
2536         struct dmar_domain *domain;
2537         phys_addr_t start_paddr;
2538         struct iova *iova;
2539         int prot = 0;
2540         int ret;
2541         struct intel_iommu *iommu;
2542         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2543
2544         BUG_ON(dir == DMA_NONE);
2545
2546         if (iommu_no_mapping(hwdev))
2547                 return paddr;
2548
2549         domain = get_valid_domain_for_dev(pdev);
2550         if (!domain)
2551                 return 0;
2552
2553         iommu = domain_get_iommu(domain);
2554         size = aligned_nrpages(paddr, size);
2555
2556         iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2557                                 pdev->dma_mask);
2558         if (!iova)
2559                 goto error;
2560
2561         /*
2562          * Check if DMAR supports zero-length reads on write only
2563          * mappings..
2564          */
2565         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2566                         !cap_zlr(iommu->cap))
2567                 prot |= DMA_PTE_READ;
2568         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2569                 prot |= DMA_PTE_WRITE;
2570         /*
2571          * paddr - (paddr + size) might be partial page, we should map the whole
2572          * page.  Note: if two part of one page are separately mapped, we
2573          * might have two guest_addr mapping to the same host paddr, but this
2574          * is not a big problem
2575          */
2576         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2577                                  mm_to_dma_pfn(paddr_pfn), size, prot);
2578         if (ret)
2579                 goto error;
2580
2581         /* it's a non-present to present mapping. Only flush if caching mode */
2582         if (cap_caching_mode(iommu->cap))
2583                 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
2584         else
2585                 iommu_flush_write_buffer(iommu);
2586
2587         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2588         start_paddr += paddr & ~PAGE_MASK;
2589         return start_paddr;
2590
2591 error:
2592         if (iova)
2593                 __free_iova(&domain->iovad, iova);
2594         printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2595                 pci_name(pdev), size, (unsigned long long)paddr, dir);
2596         return 0;
2597 }
2598
2599 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2600                                  unsigned long offset, size_t size,
2601                                  enum dma_data_direction dir,
2602                                  struct dma_attrs *attrs)
2603 {
2604         return __intel_map_single(dev, page_to_phys(page) + offset, size,
2605                                   dir, to_pci_dev(dev)->dma_mask);
2606 }
2607
2608 static void flush_unmaps(void)
2609 {
2610         int i, j;
2611
2612         timer_on = 0;
2613
2614         /* just flush them all */
2615         for (i = 0; i < g_num_of_iommus; i++) {
2616                 struct intel_iommu *iommu = g_iommus[i];
2617                 if (!iommu)
2618                         continue;
2619
2620                 if (!deferred_flush[i].next)
2621                         continue;
2622
2623                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2624                                          DMA_TLB_GLOBAL_FLUSH);
2625                 for (j = 0; j < deferred_flush[i].next; j++) {
2626                         unsigned long mask;
2627                         struct iova *iova = deferred_flush[i].iova[j];
2628
2629                         mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2630                         mask = ilog2(mask >> VTD_PAGE_SHIFT);
2631                         iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2632                                         iova->pfn_lo << PAGE_SHIFT, mask);
2633                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2634                 }
2635                 deferred_flush[i].next = 0;
2636         }
2637
2638         list_size = 0;
2639 }
2640
2641 static void flush_unmaps_timeout(unsigned long data)
2642 {
2643         unsigned long flags;
2644
2645         spin_lock_irqsave(&async_umap_flush_lock, flags);
2646         flush_unmaps();
2647         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2648 }
2649
2650 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2651 {
2652         unsigned long flags;
2653         int next, iommu_id;
2654         struct intel_iommu *iommu;
2655
2656         spin_lock_irqsave(&async_umap_flush_lock, flags);
2657         if (list_size == HIGH_WATER_MARK)
2658                 flush_unmaps();
2659
2660         iommu = domain_get_iommu(dom);
2661         iommu_id = iommu->seq_id;
2662
2663         next = deferred_flush[iommu_id].next;
2664         deferred_flush[iommu_id].domain[next] = dom;
2665         deferred_flush[iommu_id].iova[next] = iova;
2666         deferred_flush[iommu_id].next++;
2667
2668         if (!timer_on) {
2669                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2670                 timer_on = 1;
2671         }
2672         list_size++;
2673         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2674 }
2675
2676 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2677                              size_t size, enum dma_data_direction dir,
2678                              struct dma_attrs *attrs)
2679 {
2680         struct pci_dev *pdev = to_pci_dev(dev);
2681         struct dmar_domain *domain;
2682         unsigned long start_pfn, last_pfn;
2683         struct iova *iova;
2684         struct intel_iommu *iommu;
2685
2686         if (iommu_no_mapping(dev))
2687                 return;
2688
2689         domain = find_domain(pdev);
2690         BUG_ON(!domain);
2691
2692         iommu = domain_get_iommu(domain);
2693
2694         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2695         if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2696                       (unsigned long long)dev_addr))
2697                 return;
2698
2699         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2700         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2701
2702         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2703                  pci_name(pdev), start_pfn, last_pfn);
2704
2705         /*  clear the whole page */
2706         dma_pte_clear_range(domain, start_pfn, last_pfn);
2707
2708         /* free page tables */
2709         dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2710
2711         if (intel_iommu_strict) {
2712                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2713                                       last_pfn - start_pfn + 1);
2714                 /* free iova */
2715                 __free_iova(&domain->iovad, iova);
2716         } else {
2717                 add_unmap(domain, iova);
2718                 /*
2719                  * queue up the release of the unmap to save the 1/6th of the
2720                  * cpu used up by the iotlb flush operation...
2721                  */
2722         }
2723 }
2724
2725 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2726                                   dma_addr_t *dma_handle, gfp_t flags)
2727 {
2728         void *vaddr;
2729         int order;
2730
2731         size = PAGE_ALIGN(size);
2732         order = get_order(size);
2733         flags &= ~(GFP_DMA | GFP_DMA32);
2734
2735         vaddr = (void *)__get_free_pages(flags, order);
2736         if (!vaddr)
2737                 return NULL;
2738         memset(vaddr, 0, size);
2739
2740         *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2741                                          DMA_BIDIRECTIONAL,
2742                                          hwdev->coherent_dma_mask);
2743         if (*dma_handle)
2744                 return vaddr;
2745         free_pages((unsigned long)vaddr, order);
2746         return NULL;
2747 }
2748
2749 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2750                                 dma_addr_t dma_handle)
2751 {
2752         int order;
2753
2754         size = PAGE_ALIGN(size);
2755         order = get_order(size);
2756
2757         intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2758         free_pages((unsigned long)vaddr, order);
2759 }
2760
2761 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2762                            int nelems, enum dma_data_direction dir,
2763                            struct dma_attrs *attrs)
2764 {
2765         struct pci_dev *pdev = to_pci_dev(hwdev);
2766         struct dmar_domain *domain;
2767         unsigned long start_pfn, last_pfn;
2768         struct iova *iova;
2769         struct intel_iommu *iommu;
2770
2771         if (iommu_no_mapping(hwdev))
2772                 return;
2773
2774         domain = find_domain(pdev);
2775         BUG_ON(!domain);
2776
2777         iommu = domain_get_iommu(domain);
2778
2779         iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2780         if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2781                       (unsigned long long)sglist[0].dma_address))
2782                 return;
2783
2784         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2785         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2786
2787         /*  clear the whole page */
2788         dma_pte_clear_range(domain, start_pfn, last_pfn);
2789
2790         /* free page tables */
2791         dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2792
2793         if (intel_iommu_strict) {
2794                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2795                                       last_pfn - start_pfn + 1);
2796                 /* free iova */
2797                 __free_iova(&domain->iovad, iova);
2798         } else {
2799                 add_unmap(domain, iova);
2800                 /*
2801                  * queue up the release of the unmap to save the 1/6th of the
2802                  * cpu used up by the iotlb flush operation...
2803                  */
2804         }
2805 }
2806
2807 static int intel_nontranslate_map_sg(struct device *hddev,
2808         struct scatterlist *sglist, int nelems, int dir)
2809 {
2810         int i;
2811         struct scatterlist *sg;
2812
2813         for_each_sg(sglist, sg, nelems, i) {
2814                 BUG_ON(!sg_page(sg));
2815                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2816                 sg->dma_length = sg->length;
2817         }
2818         return nelems;
2819 }
2820
2821 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2822                         enum dma_data_direction dir, struct dma_attrs *attrs)
2823 {
2824         int i;
2825         struct pci_dev *pdev = to_pci_dev(hwdev);
2826         struct dmar_domain *domain;
2827         size_t size = 0;
2828         int prot = 0;
2829         size_t offset_pfn = 0;
2830         struct iova *iova = NULL;
2831         int ret;
2832         struct scatterlist *sg;
2833         unsigned long start_vpfn;
2834         struct intel_iommu *iommu;
2835
2836         BUG_ON(dir == DMA_NONE);
2837         if (iommu_no_mapping(hwdev))
2838                 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2839
2840         domain = get_valid_domain_for_dev(pdev);
2841         if (!domain)
2842                 return 0;
2843
2844         iommu = domain_get_iommu(domain);
2845
2846         for_each_sg(sglist, sg, nelems, i)
2847                 size += aligned_nrpages(sg->offset, sg->length);
2848
2849         iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2850                                 pdev->dma_mask);
2851         if (!iova) {
2852                 sglist->dma_length = 0;
2853                 return 0;
2854         }
2855
2856         /*
2857          * Check if DMAR supports zero-length reads on write only
2858          * mappings..
2859          */
2860         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2861                         !cap_zlr(iommu->cap))
2862                 prot |= DMA_PTE_READ;
2863         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2864                 prot |= DMA_PTE_WRITE;
2865
2866         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2867
2868         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
2869         if (unlikely(ret)) {
2870                 /*  clear the page */
2871                 dma_pte_clear_range(domain, start_vpfn,
2872                                     start_vpfn + size - 1);
2873                 /* free page tables */
2874                 dma_pte_free_pagetable(domain, start_vpfn,
2875                                        start_vpfn + size - 1);
2876                 /* free iova */
2877                 __free_iova(&domain->iovad, iova);
2878                 return 0;
2879         }
2880
2881         /* it's a non-present to present mapping. Only flush if caching mode */
2882         if (cap_caching_mode(iommu->cap))
2883                 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
2884         else
2885                 iommu_flush_write_buffer(iommu);
2886
2887         return nelems;
2888 }
2889
2890 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2891 {
2892         return !dma_addr;
2893 }
2894
2895 struct dma_map_ops intel_dma_ops = {
2896         .alloc_coherent = intel_alloc_coherent,
2897         .free_coherent = intel_free_coherent,
2898         .map_sg = intel_map_sg,
2899         .unmap_sg = intel_unmap_sg,
2900         .map_page = intel_map_page,
2901         .unmap_page = intel_unmap_page,
2902         .mapping_error = intel_mapping_error,
2903 };
2904
2905 static inline int iommu_domain_cache_init(void)
2906 {
2907         int ret = 0;
2908
2909         iommu_domain_cache = kmem_cache_create("iommu_domain",
2910                                          sizeof(struct dmar_domain),
2911                                          0,
2912                                          SLAB_HWCACHE_ALIGN,
2913
2914                                          NULL);
2915         if (!iommu_domain_cache) {
2916                 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2917                 ret = -ENOMEM;
2918         }
2919
2920         return ret;
2921 }
2922
2923 static inline int iommu_devinfo_cache_init(void)
2924 {
2925         int ret = 0;
2926
2927         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2928                                          sizeof(struct device_domain_info),
2929                                          0,
2930                                          SLAB_HWCACHE_ALIGN,
2931                                          NULL);
2932         if (!iommu_devinfo_cache) {
2933                 printk(KERN_ERR "Couldn't create devinfo cache\n");
2934                 ret = -ENOMEM;
2935         }
2936
2937         return ret;
2938 }
2939
2940 static inline int iommu_iova_cache_init(void)
2941 {
2942         int ret = 0;
2943
2944         iommu_iova_cache = kmem_cache_create("iommu_iova",
2945                                          sizeof(struct iova),
2946                                          0,
2947                                          SLAB_HWCACHE_ALIGN,
2948                                          NULL);
2949         if (!iommu_iova_cache) {
2950                 printk(KERN_ERR "Couldn't create iova cache\n");
2951                 ret = -ENOMEM;
2952         }
2953
2954         return ret;
2955 }
2956
2957 static int __init iommu_init_mempool(void)
2958 {
2959         int ret;
2960         ret = iommu_iova_cache_init();
2961         if (ret)
2962                 return ret;
2963
2964         ret = iommu_domain_cache_init();
2965         if (ret)
2966                 goto domain_error;
2967
2968         ret = iommu_devinfo_cache_init();
2969         if (!ret)
2970                 return ret;
2971
2972         kmem_cache_destroy(iommu_domain_cache);
2973 domain_error:
2974         kmem_cache_destroy(iommu_iova_cache);
2975
2976         return -ENOMEM;
2977 }
2978
2979 static void __init iommu_exit_mempool(void)
2980 {
2981         kmem_cache_destroy(iommu_devinfo_cache);
2982         kmem_cache_destroy(iommu_domain_cache);
2983         kmem_cache_destroy(iommu_iova_cache);
2984
2985 }
2986
2987 static void __init init_no_remapping_devices(void)
2988 {
2989         struct dmar_drhd_unit *drhd;
2990
2991         for_each_drhd_unit(drhd) {
2992                 if (!drhd->include_all) {
2993                         int i;
2994                         for (i = 0; i < drhd->devices_cnt; i++)
2995                                 if (drhd->devices[i] != NULL)
2996                                         break;
2997                         /* ignore DMAR unit if no pci devices exist */
2998                         if (i == drhd->devices_cnt)
2999                                 drhd->ignored = 1;
3000                 }
3001         }
3002
3003         if (dmar_map_gfx)
3004                 return;
3005
3006         for_each_drhd_unit(drhd) {
3007                 int i;
3008                 if (drhd->ignored || drhd->include_all)
3009                         continue;
3010
3011                 for (i = 0; i < drhd->devices_cnt; i++)
3012                         if (drhd->devices[i] &&
3013                                 !IS_GFX_DEVICE(drhd->devices[i]))
3014                                 break;
3015
3016                 if (i < drhd->devices_cnt)
3017                         continue;
3018
3019                 /* bypass IOMMU if it is just for gfx devices */
3020                 drhd->ignored = 1;
3021                 for (i = 0; i < drhd->devices_cnt; i++) {
3022                         if (!drhd->devices[i])
3023                                 continue;
3024                         drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3025                 }
3026         }
3027 }
3028
3029 #ifdef CONFIG_SUSPEND
3030 static int init_iommu_hw(void)
3031 {
3032         struct dmar_drhd_unit *drhd;
3033         struct intel_iommu *iommu = NULL;
3034
3035         for_each_active_iommu(iommu, drhd)
3036                 if (iommu->qi)
3037                         dmar_reenable_qi(iommu);
3038
3039         for_each_active_iommu(iommu, drhd) {
3040                 iommu_flush_write_buffer(iommu);
3041
3042                 iommu_set_root_entry(iommu);
3043
3044                 iommu->flush.flush_context(iommu, 0, 0, 0,
3045                                            DMA_CCMD_GLOBAL_INVL);
3046                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3047                                          DMA_TLB_GLOBAL_FLUSH);
3048                 iommu_disable_protect_mem_regions(iommu);
3049                 iommu_enable_translation(iommu);
3050         }
3051
3052         return 0;
3053 }
3054
3055 static void iommu_flush_all(void)
3056 {
3057         struct dmar_drhd_unit *drhd;
3058         struct intel_iommu *iommu;
3059
3060         for_each_active_iommu(iommu, drhd) {
3061                 iommu->flush.flush_context(iommu, 0, 0, 0,
3062                                            DMA_CCMD_GLOBAL_INVL);
3063                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3064                                          DMA_TLB_GLOBAL_FLUSH);
3065         }
3066 }
3067
3068 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3069 {
3070         struct dmar_drhd_unit *drhd;
3071         struct intel_iommu *iommu = NULL;
3072         unsigned long flag;
3073
3074         for_each_active_iommu(iommu, drhd) {
3075                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3076                                                  GFP_ATOMIC);
3077                 if (!iommu->iommu_state)
3078                         goto nomem;
3079         }
3080
3081         iommu_flush_all();
3082
3083         for_each_active_iommu(iommu, drhd) {
3084                 iommu_disable_translation(iommu);
3085
3086                 spin_lock_irqsave(&iommu->register_lock, flag);
3087
3088                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3089                         readl(iommu->reg + DMAR_FECTL_REG);
3090                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3091                         readl(iommu->reg + DMAR_FEDATA_REG);
3092                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3093                         readl(iommu->reg + DMAR_FEADDR_REG);
3094                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3095                         readl(iommu->reg + DMAR_FEUADDR_REG);
3096
3097                 spin_unlock_irqrestore(&iommu->register_lock, flag);
3098         }
3099         return 0;
3100
3101 nomem:
3102         for_each_active_iommu(iommu, drhd)
3103                 kfree(iommu->iommu_state);
3104
3105         return -ENOMEM;
3106 }
3107
3108 static int iommu_resume(struct sys_device *dev)
3109 {
3110         struct dmar_drhd_unit *drhd;
3111         struct intel_iommu *iommu = NULL;
3112         unsigned long flag;
3113
3114         if (init_iommu_hw()) {
3115                 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3116                 return -EIO;
3117         }
3118
3119         for_each_active_iommu(iommu, drhd) {
3120
3121                 spin_lock_irqsave(&iommu->register_lock, flag);
3122
3123                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3124                         iommu->reg + DMAR_FECTL_REG);
3125                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3126                         iommu->reg + DMAR_FEDATA_REG);
3127                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3128                         iommu->reg + DMAR_FEADDR_REG);
3129                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3130                         iommu->reg + DMAR_FEUADDR_REG);
3131
3132                 spin_unlock_irqrestore(&iommu->register_lock, flag);
3133         }
3134
3135         for_each_active_iommu(iommu, drhd)
3136                 kfree(iommu->iommu_state);
3137
3138         return 0;
3139 }
3140
3141 static struct sysdev_class iommu_sysclass = {
3142         .name           = "iommu",
3143         .resume         = iommu_resume,
3144         .suspend        = iommu_suspend,
3145 };
3146
3147 static struct sys_device device_iommu = {
3148         .cls    = &iommu_sysclass,
3149 };
3150
3151 static int __init init_iommu_sysfs(void)
3152 {
3153         int error;
3154
3155         error = sysdev_class_register(&iommu_sysclass);
3156         if (error)
3157                 return error;
3158
3159         error = sysdev_register(&device_iommu);
3160         if (error)
3161                 sysdev_class_unregister(&iommu_sysclass);
3162
3163         return error;
3164 }
3165
3166 #else
3167 static int __init init_iommu_sysfs(void)
3168 {
3169         return 0;
3170 }
3171 #endif  /* CONFIG_PM */
3172
3173 int __init intel_iommu_init(void)
3174 {
3175         int ret = 0;
3176
3177         if (dmar_table_init())
3178                 return  -ENODEV;
3179
3180         if (dmar_dev_scope_init())
3181                 return  -ENODEV;
3182
3183         /*
3184          * Check the need for DMA-remapping initialization now.
3185          * Above initialization will also be used by Interrupt-remapping.
3186          */
3187         if (no_iommu || swiotlb || dmar_disabled)
3188                 return -ENODEV;
3189
3190         iommu_init_mempool();
3191         dmar_init_reserved_ranges();
3192
3193         init_no_remapping_devices();
3194
3195         ret = init_dmars();
3196         if (ret) {
3197                 printk(KERN_ERR "IOMMU: dmar init failed\n");
3198                 put_iova_domain(&reserved_iova_list);
3199                 iommu_exit_mempool();
3200                 return ret;
3201         }
3202         printk(KERN_INFO
3203         "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3204
3205         init_timer(&unmap_timer);
3206         force_iommu = 1;
3207         dma_ops = &intel_dma_ops;
3208
3209         init_iommu_sysfs();
3210
3211         register_iommu(&intel_iommu_ops);
3212
3213         return 0;
3214 }
3215
3216 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3217                                            struct pci_dev *pdev)
3218 {
3219         struct pci_dev *tmp, *parent;
3220
3221         if (!iommu || !pdev)
3222                 return;
3223
3224         /* dependent device detach */
3225         tmp = pci_find_upstream_pcie_bridge(pdev);
3226         /* Secondary interface's bus number and devfn 0 */
3227         if (tmp) {
3228                 parent = pdev->bus->self;
3229                 while (parent != tmp) {
3230                         iommu_detach_dev(iommu, parent->bus->number,
3231                                          parent->devfn);
3232                         parent = parent->bus->self;
3233                 }
3234                 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3235                         iommu_detach_dev(iommu,
3236                                 tmp->subordinate->number, 0);
3237                 else /* this is a legacy PCI bridge */
3238                         iommu_detach_dev(iommu, tmp->bus->number,
3239                                          tmp->devfn);
3240         }
3241 }
3242
3243 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3244                                           struct pci_dev *pdev)
3245 {
3246         struct device_domain_info *info;
3247         struct intel_iommu *iommu;
3248         unsigned long flags;
3249         int found = 0;
3250         struct list_head *entry, *tmp;
3251
3252         iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3253                                 pdev->devfn);
3254         if (!iommu)
3255                 return;
3256
3257         spin_lock_irqsave(&device_domain_lock, flags);
3258         list_for_each_safe(entry, tmp, &domain->devices) {
3259                 info = list_entry(entry, struct device_domain_info, link);
3260                 /* No need to compare PCI domain; it has to be the same */
3261                 if (info->bus == pdev->bus->number &&
3262                     info->devfn == pdev->devfn) {
3263                         list_del(&info->link);
3264                         list_del(&info->global);
3265                         if (info->dev)
3266                                 info->dev->dev.archdata.iommu = NULL;
3267                         spin_unlock_irqrestore(&device_domain_lock, flags);
3268
3269                         iommu_disable_dev_iotlb(info);
3270                         iommu_detach_dev(iommu, info->bus, info->devfn);
3271                         iommu_detach_dependent_devices(iommu, pdev);
3272                         free_devinfo_mem(info);
3273
3274                         spin_lock_irqsave(&device_domain_lock, flags);
3275
3276                         if (found)
3277                                 break;
3278                         else
3279                                 continue;
3280                 }
3281
3282                 /* if there is no other devices under the same iommu
3283                  * owned by this domain, clear this iommu in iommu_bmp
3284                  * update iommu count and coherency
3285                  */
3286                 if (iommu == device_to_iommu(info->segment, info->bus,
3287                                             info->devfn))
3288                         found = 1;
3289         }
3290
3291         if (found == 0) {
3292                 unsigned long tmp_flags;
3293                 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3294                 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3295                 domain->iommu_count--;
3296                 domain_update_iommu_cap(domain);
3297                 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3298         }
3299
3300         spin_unlock_irqrestore(&device_domain_lock, flags);
3301 }
3302
3303 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3304 {
3305         struct device_domain_info *info;
3306         struct intel_iommu *iommu;
3307         unsigned long flags1, flags2;
3308
3309         spin_lock_irqsave(&device_domain_lock, flags1);
3310         while (!list_empty(&domain->devices)) {
3311                 info = list_entry(domain->devices.next,
3312                         struct device_domain_info, link);
3313                 list_del(&info->link);
3314                 list_del(&info->global);
3315                 if (info->dev)
3316                         info->dev->dev.archdata.iommu = NULL;
3317
3318                 spin_unlock_irqrestore(&device_domain_lock, flags1);
3319
3320                 iommu_disable_dev_iotlb(info);
3321                 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3322                 iommu_detach_dev(iommu, info->bus, info->devfn);
3323                 iommu_detach_dependent_devices(iommu, info->dev);
3324
3325                 /* clear this iommu in iommu_bmp, update iommu count
3326                  * and capabilities
3327                  */
3328                 spin_lock_irqsave(&domain->iommu_lock, flags2);
3329                 if (test_and_clear_bit(iommu->seq_id,
3330                                        &domain->iommu_bmp)) {
3331                         domain->iommu_count--;
3332                         domain_update_iommu_cap(domain);
3333                 }
3334                 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3335
3336                 free_devinfo_mem(info);
3337                 spin_lock_irqsave(&device_domain_lock, flags1);
3338         }
3339         spin_unlock_irqrestore(&device_domain_lock, flags1);
3340 }
3341
3342 /* domain id for virtual machine, it won't be set in context */
3343 static unsigned long vm_domid;
3344
3345 static int vm_domain_min_agaw(struct dmar_domain *domain)
3346 {
3347         int i;
3348         int min_agaw = domain->agaw;
3349
3350         i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3351         for (; i < g_num_of_iommus; ) {
3352                 if (min_agaw > g_iommus[i]->agaw)
3353                         min_agaw = g_iommus[i]->agaw;
3354
3355                 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3356         }
3357
3358         return min_agaw;
3359 }
3360
3361 static struct dmar_domain *iommu_alloc_vm_domain(void)
3362 {
3363         struct dmar_domain *domain;
3364
3365         domain = alloc_domain_mem();
3366         if (!domain)
3367                 return NULL;
3368
3369         domain->id = vm_domid++;
3370         memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3371         domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3372
3373         return domain;
3374 }
3375
3376 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3377 {
3378         int adjust_width;
3379
3380         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3381         spin_lock_init(&domain->iommu_lock);
3382
3383         domain_reserve_special_ranges(domain);
3384
3385         /* calculate AGAW */
3386         domain->gaw = guest_width;
3387         adjust_width = guestwidth_to_adjustwidth(guest_width);
3388         domain->agaw = width_to_agaw(adjust_width);
3389
3390         INIT_LIST_HEAD(&domain->devices);
3391
3392         domain->iommu_count = 0;
3393         domain->iommu_coherency = 0;
3394         domain->iommu_snooping = 0;
3395         domain->max_addr = 0;
3396
3397         /* always allocate the top pgd */
3398         domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3399         if (!domain->pgd)
3400                 return -ENOMEM;
3401         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3402         return 0;
3403 }
3404
3405 static void iommu_free_vm_domain(struct dmar_domain *domain)
3406 {
3407         unsigned long flags;
3408         struct dmar_drhd_unit *drhd;
3409         struct intel_iommu *iommu;
3410         unsigned long i;
3411         unsigned long ndomains;
3412
3413         for_each_drhd_unit(drhd) {
3414                 if (drhd->ignored)
3415                         continue;
3416                 iommu = drhd->iommu;
3417
3418                 ndomains = cap_ndoms(iommu->cap);
3419                 i = find_first_bit(iommu->domain_ids, ndomains);
3420                 for (; i < ndomains; ) {
3421                         if (iommu->domains[i] == domain) {
3422                                 spin_lock_irqsave(&iommu->lock, flags);
3423                                 clear_bit(i, iommu->domain_ids);
3424                                 iommu->domains[i] = NULL;
3425                                 spin_unlock_irqrestore(&iommu->lock, flags);
3426                                 break;
3427                         }
3428                         i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3429                 }
3430         }
3431 }
3432
3433 static void vm_domain_exit(struct dmar_domain *domain)
3434 {
3435         /* Domain 0 is reserved, so dont process it */
3436         if (!domain)
3437                 return;
3438
3439         vm_domain_remove_all_dev_info(domain);
3440         /* destroy iovas */
3441         put_iova_domain(&domain->iovad);
3442
3443         /* clear ptes */
3444         dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3445
3446         /* free page tables */
3447         dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3448
3449         iommu_free_vm_domain(domain);
3450         free_domain_mem(domain);
3451 }
3452
3453 static int intel_iommu_domain_init(struct iommu_domain *domain)
3454 {
3455         struct dmar_domain *dmar_domain;
3456
3457         dmar_domain = iommu_alloc_vm_domain();
3458         if (!dmar_domain) {
3459                 printk(KERN_ERR
3460                         "intel_iommu_domain_init: dmar_domain == NULL\n");
3461                 return -ENOMEM;
3462         }
3463         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3464                 printk(KERN_ERR
3465                         "intel_iommu_domain_init() failed\n");
3466                 vm_domain_exit(dmar_domain);
3467                 return -ENOMEM;
3468         }
3469         domain->priv = dmar_domain;
3470
3471         return 0;
3472 }
3473
3474 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3475 {
3476         struct dmar_domain *dmar_domain = domain->priv;
3477
3478         domain->priv = NULL;
3479         vm_domain_exit(dmar_domain);
3480 }
3481
3482 static int intel_iommu_attach_device(struct iommu_domain *domain,
3483                                      struct device *dev)
3484 {
3485         struct dmar_domain *dmar_domain = domain->priv;
3486         struct pci_dev *pdev = to_pci_dev(dev);
3487         struct intel_iommu *iommu;
3488         int addr_width;
3489         u64 end;
3490
3491         /* normally pdev is not mapped */
3492         if (unlikely(domain_context_mapped(pdev))) {
3493                 struct dmar_domain *old_domain;
3494
3495                 old_domain = find_domain(pdev);
3496                 if (old_domain) {
3497                         if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3498                             dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3499                                 domain_remove_one_dev_info(old_domain, pdev);
3500                         else
3501                                 domain_remove_dev_info(old_domain);
3502                 }
3503         }
3504
3505         iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3506                                 pdev->devfn);
3507         if (!iommu)
3508                 return -ENODEV;
3509
3510         /* check if this iommu agaw is sufficient for max mapped address */
3511         addr_width = agaw_to_width(iommu->agaw);
3512         end = DOMAIN_MAX_ADDR(addr_width);
3513         end = end & VTD_PAGE_MASK;
3514         if (end < dmar_domain->max_addr) {
3515                 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3516                        "sufficient for the mapped address (%llx)\n",
3517                        __func__, iommu->agaw, dmar_domain->max_addr);
3518                 return -EFAULT;
3519         }
3520
3521         return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3522 }
3523
3524 static void intel_iommu_detach_device(struct iommu_domain *domain,
3525                                       struct device *dev)
3526 {
3527         struct dmar_domain *dmar_domain = domain->priv;
3528         struct pci_dev *pdev = to_pci_dev(dev);
3529
3530         domain_remove_one_dev_info(dmar_domain, pdev);
3531 }
3532
3533 static int intel_iommu_map_range(struct iommu_domain *domain,
3534                                  unsigned long iova, phys_addr_t hpa,
3535                                  size_t size, int iommu_prot)
3536 {
3537         struct dmar_domain *dmar_domain = domain->priv;
3538         u64 max_addr;
3539         int addr_width;
3540         int prot = 0;
3541         int ret;
3542
3543         if (iommu_prot & IOMMU_READ)
3544                 prot |= DMA_PTE_READ;
3545         if (iommu_prot & IOMMU_WRITE)
3546                 prot |= DMA_PTE_WRITE;
3547         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3548                 prot |= DMA_PTE_SNP;
3549
3550         max_addr = iova + size;
3551         if (dmar_domain->max_addr < max_addr) {
3552                 int min_agaw;
3553                 u64 end;
3554
3555                 /* check if minimum agaw is sufficient for mapped address */
3556                 min_agaw = vm_domain_min_agaw(dmar_domain);
3557                 addr_width = agaw_to_width(min_agaw);
3558                 end = DOMAIN_MAX_ADDR(addr_width);
3559                 end = end & VTD_PAGE_MASK;
3560                 if (end < max_addr) {
3561                         printk(KERN_ERR "%s: iommu agaw (%d) is not "
3562                                "sufficient for the mapped address (%llx)\n",
3563                                __func__, min_agaw, max_addr);
3564                         return -EFAULT;
3565                 }
3566                 dmar_domain->max_addr = max_addr;
3567         }
3568         /* Round up size to next multiple of PAGE_SIZE, if it and
3569            the low bits of hpa would take us onto the next page */
3570         size = aligned_nrpages(hpa, size);
3571         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3572                                  hpa >> VTD_PAGE_SHIFT, size, prot);
3573         return ret;
3574 }
3575
3576 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3577                                     unsigned long iova, size_t size)
3578 {
3579         struct dmar_domain *dmar_domain = domain->priv;
3580
3581         if (!size)
3582                 return;
3583
3584         dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3585                             (iova + size - 1) >> VTD_PAGE_SHIFT);
3586
3587         if (dmar_domain->max_addr == iova + size)
3588                 dmar_domain->max_addr = iova;
3589 }
3590
3591 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3592                                             unsigned long iova)
3593 {
3594         struct dmar_domain *dmar_domain = domain->priv;
3595         struct dma_pte *pte;
3596         u64 phys = 0;
3597
3598         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
3599         if (pte)
3600                 phys = dma_pte_addr(pte);
3601
3602         return phys;
3603 }
3604
3605 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3606                                       unsigned long cap)
3607 {
3608         struct dmar_domain *dmar_domain = domain->priv;
3609
3610         if (cap == IOMMU_CAP_CACHE_COHERENCY)
3611                 return dmar_domain->iommu_snooping;
3612
3613         return 0;
3614 }
3615
3616 static struct iommu_ops intel_iommu_ops = {
3617         .domain_init    = intel_iommu_domain_init,
3618         .domain_destroy = intel_iommu_domain_destroy,
3619         .attach_dev     = intel_iommu_attach_device,
3620         .detach_dev     = intel_iommu_detach_device,
3621         .map            = intel_iommu_map_range,
3622         .unmap          = intel_iommu_unmap_range,
3623         .iova_to_phys   = intel_iommu_iova_to_phys,
3624         .domain_has_cap = intel_iommu_domain_has_cap,
3625 };
3626
3627 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3628 {
3629         /*
3630          * Mobile 4 Series Chipset neglects to set RWBF capability,
3631          * but needs it:
3632          */
3633         printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3634         rwbf_quirk = 1;
3635 }
3636
3637 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);