1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
9 #include <asm/calgary.h>
10 #include <asm/amd_iommu.h>
12 int forbid_dac __read_mostly;
13 EXPORT_SYMBOL(forbid_dac);
15 const struct dma_mapping_ops *dma_ops;
16 EXPORT_SYMBOL(dma_ops);
18 static int iommu_sac_force __read_mostly;
20 #ifdef CONFIG_IOMMU_DEBUG
21 int panic_on_overflow __read_mostly = 1;
22 int force_iommu __read_mostly = 1;
24 int panic_on_overflow __read_mostly = 0;
25 int force_iommu __read_mostly = 0;
28 int iommu_merge __read_mostly = 0;
30 int no_iommu __read_mostly;
31 /* Set this to 1 if there is a HW IOMMU in the system */
32 int iommu_detected __read_mostly = 0;
34 /* This tells the BIO block layer to assume merging. Default to off
35 because we cannot guarantee merging later. */
36 int iommu_bio_merge __read_mostly = 0;
37 EXPORT_SYMBOL(iommu_bio_merge);
39 dma_addr_t bad_dma_address __read_mostly = 0;
40 EXPORT_SYMBOL(bad_dma_address);
42 /* Dummy device used for NULL arguments (normally ISA). Better would
43 be probably a smaller DMA mask, but this is bug-to-bug compatible
45 struct device fallback_dev = {
46 .bus_id = "fallback device",
47 .coherent_dma_mask = DMA_32BIT_MASK,
48 .dma_mask = &fallback_dev.coherent_dma_mask,
51 int dma_set_mask(struct device *dev, u64 mask)
53 if (!dev->dma_mask || !dma_supported(dev, mask))
56 *dev->dma_mask = mask;
60 EXPORT_SYMBOL(dma_set_mask);
63 static __initdata void *dma32_bootmem_ptr;
64 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
66 static int __init parse_dma32_size_opt(char *p)
70 dma32_bootmem_size = memparse(p, &p);
73 early_param("dma32_size", parse_dma32_size_opt);
75 void __init dma32_reserve_bootmem(void)
77 unsigned long size, align;
78 if (end_pfn <= MAX_DMA32_PFN)
82 size = round_up(dma32_bootmem_size, align);
83 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
84 __pa(MAX_DMA_ADDRESS));
85 if (dma32_bootmem_ptr)
86 dma32_bootmem_size = size;
88 dma32_bootmem_size = 0;
90 static void __init dma32_free_bootmem(void)
94 if (end_pfn <= MAX_DMA32_PFN)
97 if (!dma32_bootmem_ptr)
100 for_each_online_node(node)
101 free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
104 dma32_bootmem_ptr = NULL;
105 dma32_bootmem_size = 0;
108 void __init pci_iommu_alloc(void)
110 /* free the range so iommu could get some range less than 4G */
111 dma32_free_bootmem();
113 * The order of these functions is important for
114 * fall-back/fail-over reasons
116 #ifdef CONFIG_GART_IOMMU
117 gart_iommu_hole_init();
120 #ifdef CONFIG_CALGARY_IOMMU
124 detect_intel_iommu();
128 #ifdef CONFIG_SWIOTLB
135 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
138 static __init int iommu_setup(char *p)
146 if (!strncmp(p, "off", 3))
148 /* gart_parse_options has more force support */
149 if (!strncmp(p, "force", 5))
151 if (!strncmp(p, "noforce", 7)) {
156 if (!strncmp(p, "biomerge", 8)) {
157 iommu_bio_merge = 4096;
161 if (!strncmp(p, "panic", 5))
162 panic_on_overflow = 1;
163 if (!strncmp(p, "nopanic", 7))
164 panic_on_overflow = 0;
165 if (!strncmp(p, "merge", 5)) {
169 if (!strncmp(p, "nomerge", 7))
171 if (!strncmp(p, "forcesac", 8))
173 if (!strncmp(p, "allowdac", 8))
175 if (!strncmp(p, "nodac", 5))
177 if (!strncmp(p, "usedac", 6)) {
181 #ifdef CONFIG_SWIOTLB
182 if (!strncmp(p, "soft", 4))
186 #ifdef CONFIG_GART_IOMMU
187 gart_parse_options(p);
190 #ifdef CONFIG_CALGARY_IOMMU
191 if (!strncmp(p, "calgary", 7))
193 #endif /* CONFIG_CALGARY_IOMMU */
195 p += strcspn(p, ",");
201 early_param("iommu", iommu_setup);
204 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
205 dma_addr_t device_addr, size_t size, int flags)
207 void __iomem *mem_base = NULL;
208 int pages = size >> PAGE_SHIFT;
209 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
211 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
218 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
220 mem_base = ioremap(bus_addr, size);
224 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
227 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
228 if (!dev->dma_mem->bitmap)
231 dev->dma_mem->virt_base = mem_base;
232 dev->dma_mem->device_base = device_addr;
233 dev->dma_mem->size = pages;
234 dev->dma_mem->flags = flags;
236 if (flags & DMA_MEMORY_MAP)
237 return DMA_MEMORY_MAP;
239 return DMA_MEMORY_IO;
248 EXPORT_SYMBOL(dma_declare_coherent_memory);
250 void dma_release_declared_memory(struct device *dev)
252 struct dma_coherent_mem *mem = dev->dma_mem;
257 iounmap(mem->virt_base);
261 EXPORT_SYMBOL(dma_release_declared_memory);
263 void *dma_mark_declared_memory_occupied(struct device *dev,
264 dma_addr_t device_addr, size_t size)
266 struct dma_coherent_mem *mem = dev->dma_mem;
268 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
270 pages >>= PAGE_SHIFT;
273 return ERR_PTR(-EINVAL);
275 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
276 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
279 return mem->virt_base + (pos << PAGE_SHIFT);
281 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
283 static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
284 dma_addr_t *dma_handle, void **ret)
286 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
287 int order = get_order(size);
290 int page = bitmap_find_free_region(mem->bitmap, mem->size,
293 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
294 *ret = mem->virt_base + (page << PAGE_SHIFT);
295 memset(*ret, 0, size);
297 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
300 return (mem != NULL);
303 static int dma_release_coherent(struct device *dev, int order, void *vaddr)
305 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
307 if (mem && vaddr >= mem->virt_base && vaddr <
308 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
309 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
311 bitmap_release_region(mem->bitmap, page, order);
317 #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
318 #define dma_release_coherent(dev, order, vaddr) (0)
319 #endif /* CONFIG_X86_32 */
321 int dma_supported(struct device *dev, u64 mask)
324 if (mask > 0xffffffff && forbid_dac > 0) {
325 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
331 if (dma_ops->dma_supported)
332 return dma_ops->dma_supported(dev, mask);
334 /* Copied from i386. Doesn't make much sense, because it will
335 only work for pci_alloc_coherent.
336 The caller just has to use GFP_DMA in this case. */
337 if (mask < DMA_24BIT_MASK)
340 /* Tell the device to use SAC when IOMMU force is on. This
341 allows the driver to use cheaper accesses in some cases.
343 Problem with this is that if we overflow the IOMMU area and
344 return DAC as fallback address the device may not handle it
347 As a special case some controllers have a 39bit address
348 mode that is as efficient as 32bit (aic79xx). Don't force
349 SAC for these. Assume all masks <= 40 bits are of this
350 type. Normally this doesn't make any difference, but gives
351 more gentle handling of IOMMU overflow. */
352 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
353 printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
360 EXPORT_SYMBOL(dma_supported);
362 /* Allocate DMA memory on node near device */
363 noinline struct page *
364 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
368 node = dev_to_node(dev);
370 return alloc_pages_node(node, gfp, order);
374 * Allocate memory for a coherent mapping.
377 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
382 unsigned long dma_mask = 0;
386 /* ignore region specifiers */
387 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
389 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
396 dma_mask = dev->coherent_dma_mask;
398 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
400 /* Device not DMA able */
401 if (dev->dma_mask == NULL)
404 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
409 /* Why <=? Even when the mask is smaller than 4GB it is often
410 larger than 16MB and in this case we have a chance of
411 finding fitting memory in the next higher zone first. If
412 not retry with true GFP_DMA. -AK */
413 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
415 if (dma_mask < DMA_32BIT_MASK)
421 page = dma_alloc_pages(dev,
422 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
428 bus = page_to_phys(page);
429 memory = page_address(page);
430 high = (bus + size) >= dma_mask;
432 if (force_iommu && !(gfp & GFP_DMA))
435 free_pages((unsigned long)memory,
438 /* Don't use the 16MB ZONE_DMA unless absolutely
439 needed. It's better to use remapping first. */
440 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
441 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
445 /* Let low level make its own zone decisions */
446 gfp &= ~(GFP_DMA32|GFP_DMA);
448 if (dma_ops->alloc_coherent)
449 return dma_ops->alloc_coherent(dev, size,
454 memset(memory, 0, size);
461 if (dma_ops->alloc_coherent) {
462 free_pages((unsigned long)memory, get_order(size));
463 gfp &= ~(GFP_DMA|GFP_DMA32);
464 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
467 if (dma_ops->map_simple) {
468 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
470 PCI_DMA_BIDIRECTIONAL);
471 if (*dma_handle != bad_dma_address)
475 if (panic_on_overflow)
476 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
477 (unsigned long)size);
478 free_pages((unsigned long)memory, get_order(size));
481 EXPORT_SYMBOL(dma_alloc_coherent);
484 * Unmap coherent memory.
485 * The caller must ensure that the device has finished accessing the mapping.
487 void dma_free_coherent(struct device *dev, size_t size,
488 void *vaddr, dma_addr_t bus)
490 int order = get_order(size);
491 WARN_ON(irqs_disabled()); /* for portability */
492 if (dma_release_coherent(dev, order, vaddr))
494 if (dma_ops->unmap_single)
495 dma_ops->unmap_single(dev, bus, size, 0);
496 free_pages((unsigned long)vaddr, order);
498 EXPORT_SYMBOL(dma_free_coherent);
500 static int __init pci_iommu_init(void)
502 #ifdef CONFIG_CALGARY_IOMMU
503 calgary_iommu_init();
510 #ifdef CONFIG_GART_IOMMU
518 void pci_iommu_shutdown(void)
520 gart_iommu_shutdown();
522 /* Must execute after PCI subsystem */
523 fs_initcall(pci_iommu_init);
526 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
528 static __devinit void via_no_dac(struct pci_dev *dev)
530 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
531 printk(KERN_INFO "PCI: VIA PCI bridge detected."
536 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);