1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
9 #include <asm/calgary.h>
10 #include <asm/amd_iommu.h>
12 int forbid_dac __read_mostly;
13 EXPORT_SYMBOL(forbid_dac);
15 const struct dma_mapping_ops *dma_ops;
16 EXPORT_SYMBOL(dma_ops);
18 static int iommu_sac_force __read_mostly;
20 #ifdef CONFIG_IOMMU_DEBUG
21 int panic_on_overflow __read_mostly = 1;
22 int force_iommu __read_mostly = 1;
24 int panic_on_overflow __read_mostly = 0;
25 int force_iommu __read_mostly = 0;
28 int iommu_merge __read_mostly = 0;
30 int no_iommu __read_mostly;
31 /* Set this to 1 if there is a HW IOMMU in the system */
32 int iommu_detected __read_mostly = 0;
34 /* This tells the BIO block layer to assume merging. Default to off
35 because we cannot guarantee merging later. */
36 int iommu_bio_merge __read_mostly = 0;
37 EXPORT_SYMBOL(iommu_bio_merge);
39 dma_addr_t bad_dma_address __read_mostly = 0;
40 EXPORT_SYMBOL(bad_dma_address);
42 /* Dummy device used for NULL arguments (normally ISA). Better would
43 be probably a smaller DMA mask, but this is bug-to-bug compatible
45 struct device fallback_dev = {
46 .bus_id = "fallback device",
47 .coherent_dma_mask = DMA_32BIT_MASK,
48 .dma_mask = &fallback_dev.coherent_dma_mask,
51 int dma_set_mask(struct device *dev, u64 mask)
53 if (!dev->dma_mask || !dma_supported(dev, mask))
56 *dev->dma_mask = mask;
60 EXPORT_SYMBOL(dma_set_mask);
63 static __initdata void *dma32_bootmem_ptr;
64 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
66 static int __init parse_dma32_size_opt(char *p)
70 dma32_bootmem_size = memparse(p, &p);
73 early_param("dma32_size", parse_dma32_size_opt);
75 void __init dma32_reserve_bootmem(void)
77 unsigned long size, align;
78 if (max_pfn <= MAX_DMA32_PFN)
82 * check aperture_64.c allocate_aperture() for reason about
86 size = round_up(dma32_bootmem_size, align);
87 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
89 if (dma32_bootmem_ptr)
90 dma32_bootmem_size = size;
92 dma32_bootmem_size = 0;
94 static void __init dma32_free_bootmem(void)
97 if (max_pfn <= MAX_DMA32_PFN)
100 if (!dma32_bootmem_ptr)
103 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
105 dma32_bootmem_ptr = NULL;
106 dma32_bootmem_size = 0;
109 void __init pci_iommu_alloc(void)
111 /* free the range so iommu could get some range less than 4G */
112 dma32_free_bootmem();
114 * The order of these functions is important for
115 * fall-back/fail-over reasons
117 gart_iommu_hole_init();
121 detect_intel_iommu();
125 #ifdef CONFIG_SWIOTLB
132 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
135 static __init int iommu_setup(char *p)
143 if (!strncmp(p, "off", 3))
145 /* gart_parse_options has more force support */
146 if (!strncmp(p, "force", 5))
148 if (!strncmp(p, "noforce", 7)) {
153 if (!strncmp(p, "biomerge", 8)) {
154 iommu_bio_merge = 4096;
158 if (!strncmp(p, "panic", 5))
159 panic_on_overflow = 1;
160 if (!strncmp(p, "nopanic", 7))
161 panic_on_overflow = 0;
162 if (!strncmp(p, "merge", 5)) {
166 if (!strncmp(p, "nomerge", 7))
168 if (!strncmp(p, "forcesac", 8))
170 if (!strncmp(p, "allowdac", 8))
172 if (!strncmp(p, "nodac", 5))
174 if (!strncmp(p, "usedac", 6)) {
178 #ifdef CONFIG_SWIOTLB
179 if (!strncmp(p, "soft", 4))
183 gart_parse_options(p);
185 #ifdef CONFIG_CALGARY_IOMMU
186 if (!strncmp(p, "calgary", 7))
188 #endif /* CONFIG_CALGARY_IOMMU */
190 p += strcspn(p, ",");
196 early_param("iommu", iommu_setup);
199 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
200 dma_addr_t device_addr, size_t size, int flags)
202 void __iomem *mem_base = NULL;
203 int pages = size >> PAGE_SHIFT;
204 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
206 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
213 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
215 mem_base = ioremap(bus_addr, size);
219 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
222 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
223 if (!dev->dma_mem->bitmap)
226 dev->dma_mem->virt_base = mem_base;
227 dev->dma_mem->device_base = device_addr;
228 dev->dma_mem->size = pages;
229 dev->dma_mem->flags = flags;
231 if (flags & DMA_MEMORY_MAP)
232 return DMA_MEMORY_MAP;
234 return DMA_MEMORY_IO;
243 EXPORT_SYMBOL(dma_declare_coherent_memory);
245 void dma_release_declared_memory(struct device *dev)
247 struct dma_coherent_mem *mem = dev->dma_mem;
252 iounmap(mem->virt_base);
256 EXPORT_SYMBOL(dma_release_declared_memory);
258 void *dma_mark_declared_memory_occupied(struct device *dev,
259 dma_addr_t device_addr, size_t size)
261 struct dma_coherent_mem *mem = dev->dma_mem;
263 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
265 pages >>= PAGE_SHIFT;
268 return ERR_PTR(-EINVAL);
270 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
271 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
274 return mem->virt_base + (pos << PAGE_SHIFT);
276 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
278 static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
279 dma_addr_t *dma_handle, void **ret)
281 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
282 int order = get_order(size);
285 int page = bitmap_find_free_region(mem->bitmap, mem->size,
288 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
289 *ret = mem->virt_base + (page << PAGE_SHIFT);
290 memset(*ret, 0, size);
292 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
295 return (mem != NULL);
298 static int dma_release_coherent(struct device *dev, int order, void *vaddr)
300 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
302 if (mem && vaddr >= mem->virt_base && vaddr <
303 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
304 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
306 bitmap_release_region(mem->bitmap, page, order);
312 #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
313 #define dma_release_coherent(dev, order, vaddr) (0)
314 #endif /* CONFIG_X86_32 */
316 int dma_supported(struct device *dev, u64 mask)
319 if (mask > 0xffffffff && forbid_dac > 0) {
320 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
326 if (dma_ops->dma_supported)
327 return dma_ops->dma_supported(dev, mask);
329 /* Copied from i386. Doesn't make much sense, because it will
330 only work for pci_alloc_coherent.
331 The caller just has to use GFP_DMA in this case. */
332 if (mask < DMA_24BIT_MASK)
335 /* Tell the device to use SAC when IOMMU force is on. This
336 allows the driver to use cheaper accesses in some cases.
338 Problem with this is that if we overflow the IOMMU area and
339 return DAC as fallback address the device may not handle it
342 As a special case some controllers have a 39bit address
343 mode that is as efficient as 32bit (aic79xx). Don't force
344 SAC for these. Assume all masks <= 40 bits are of this
345 type. Normally this doesn't make any difference, but gives
346 more gentle handling of IOMMU overflow. */
347 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
348 printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
355 EXPORT_SYMBOL(dma_supported);
357 /* Allocate DMA memory on node near device */
358 static noinline struct page *
359 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
363 node = dev_to_node(dev);
365 return alloc_pages_node(node, gfp, order);
369 * Allocate memory for a coherent mapping.
372 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
377 unsigned long dma_mask = 0;
381 /* ignore region specifiers */
382 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
384 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
391 dma_mask = dev->coherent_dma_mask;
393 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
395 /* Device not DMA able */
396 if (dev->dma_mask == NULL)
399 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
404 /* Why <=? Even when the mask is smaller than 4GB it is often
405 larger than 16MB and in this case we have a chance of
406 finding fitting memory in the next higher zone first. If
407 not retry with true GFP_DMA. -AK */
408 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
410 if (dma_mask < DMA_32BIT_MASK)
416 page = dma_alloc_pages(dev,
417 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
423 bus = page_to_phys(page);
424 memory = page_address(page);
425 high = (bus + size) >= dma_mask;
427 if (force_iommu && !(gfp & GFP_DMA))
430 free_pages((unsigned long)memory,
433 /* Don't use the 16MB ZONE_DMA unless absolutely
434 needed. It's better to use remapping first. */
435 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
436 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
440 /* Let low level make its own zone decisions */
441 gfp &= ~(GFP_DMA32|GFP_DMA);
443 if (dma_ops->alloc_coherent)
444 return dma_ops->alloc_coherent(dev, size,
449 memset(memory, 0, size);
456 if (dma_ops->alloc_coherent) {
457 free_pages((unsigned long)memory, get_order(size));
458 gfp &= ~(GFP_DMA|GFP_DMA32);
459 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
462 if (dma_ops->map_simple) {
463 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
465 PCI_DMA_BIDIRECTIONAL);
466 if (*dma_handle != bad_dma_address)
470 if (panic_on_overflow)
471 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
472 (unsigned long)size);
473 free_pages((unsigned long)memory, get_order(size));
476 EXPORT_SYMBOL(dma_alloc_coherent);
479 * Unmap coherent memory.
480 * The caller must ensure that the device has finished accessing the mapping.
482 void dma_free_coherent(struct device *dev, size_t size,
483 void *vaddr, dma_addr_t bus)
485 int order = get_order(size);
486 WARN_ON(irqs_disabled()); /* for portability */
487 if (dma_release_coherent(dev, order, vaddr))
489 if (dma_ops->unmap_single)
490 dma_ops->unmap_single(dev, bus, size, 0);
491 free_pages((unsigned long)vaddr, order);
493 EXPORT_SYMBOL(dma_free_coherent);
495 static int __init pci_iommu_init(void)
497 calgary_iommu_init();
509 void pci_iommu_shutdown(void)
511 gart_iommu_shutdown();
513 /* Must execute after PCI subsystem */
514 fs_initcall(pci_iommu_init);
517 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
519 static __devinit void via_no_dac(struct pci_dev *dev)
521 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
522 printk(KERN_INFO "PCI: VIA PCI bridge detected."
527 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);