X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=lib%2Fswiotlb.c;h=0af497b6b9a87f26b395e281cfd049a76a72ca41;hb=4feb8f8f4589d1cb1594e344c9672ec40f627ab4;hp=a1a6d6bf87b48049c6a722aabffec5439dd0f872;hpb=de69e0f0b38a467d881e138a302b005bf31c8400;p=safe%2Fjmp%2Flinux-2.6 diff --git a/lib/swiotlb.c b/lib/swiotlb.c index a1a6d6b..0af497b 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -1,7 +1,7 @@ /* * Dynamic DMA mapping support. * - * This implementation is for IA-64 platforms that do not support + * This implementation is for IA-64 and EM64T platforms that do not support * I/O TLBs (aka DMA address translation hardware). * Copyright (C) 2000 Asit Mallick * Copyright (C) 2000 Goutham Rao @@ -11,21 +11,23 @@ * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid * unnecessary i-cache flushing. - * 04/07/.. ak Better overflow handling. Assorted fixes. + * 04/07/.. ak Better overflow handling. Assorted fixes. + * 05/09/10 linville Add support for syncing ranges, support syncing for + * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. */ #include +#include #include #include -#include #include #include #include #include #include -#include #include +#include #include #include @@ -125,7 +127,7 @@ __setup("swiotlb=", setup_io_tlb_npages); /* * Statically reserve bounce buffer space and initialize bounce buffer data - * structures for the software IO TLB used to implement the PCI DMA API. + * structures for the software IO TLB used to implement the DMA API. */ void swiotlb_init_with_default_size (size_t default_size) @@ -140,8 +142,7 @@ swiotlb_init_with_default_size (size_t default_size) /* * Get IO TLB memory from the low pages */ - io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * - (1 << IO_TLB_SHIFT)); + io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); @@ -431,7 +432,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, void * swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, int flags) + dma_addr_t *dma_handle, gfp_t flags) { unsigned long dev_addr; void *ret; @@ -462,7 +463,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ dma_addr_t handle; handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); - if (dma_mapping_error(handle)) + if (swiotlb_dma_mapping_error(handle)) return NULL; ret = phys_to_virt(handle); @@ -500,24 +501,24 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) /* * Ran out of IOMMU space for this operation. This is very bad. * Unfortunately the drivers cannot handle this operation properly. - * unless they check for pci_dma_mapping_error (most don't) + * unless they check for dma_mapping_error (most don't) * When the mapping is small enough return a static buffer to limit * the damage, or panic when the transfer is too big. */ - printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at " + printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " "device %s\n", size, dev ? dev->bus_id : "?"); if (size > io_tlb_overflow && do_panic) { - if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) - panic("PCI-DMA: Memory would be corrupted\n"); - if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) - panic("PCI-DMA: Random memory would be DMAed\n"); + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + panic("DMA: Memory would be corrupted\n"); + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + panic("DMA: Random memory would be DMAed\n"); } } /* * Map a single buffer of the indicated size for DMA in streaming mode. The - * PCI address to use is returned. + * physical address to use is returned. * * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. @@ -604,8 +605,8 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, * after a transfer. * * If you perform a swiotlb_map_single() but wish to interrogate the buffer - * using the cpu, yet do not wish to teardown the PCI dma mapping, you must - * call this function before doing so. At the next point you give the PCI dma + * using the cpu, yet do not wish to teardown the dma mapping, you must + * call this function before doing so. At the next point you give the dma * address back to the card, you must first perform a * swiotlb_dma_sync_for_device, and then the device again owns the buffer */ @@ -702,8 +703,9 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, addr = SG_ENT_VIRT_ADDRESS(sg); dev_addr = virt_to_phys(addr); if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { - sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir)); - if (!sg->dma_address) { + void *map = map_single(hwdev, addr, sg->length, dir); + sg->dma_address = virt_to_bus(map); + if (!map) { /* Don't panic here, we expect map_sg users to do proper error handling. */ swiotlb_full(hwdev, sg->length, dir, 0); @@ -781,9 +783,9 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr) } /* - * Return whether the given PCI device DMA address mask can be supported + * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits - * during PCI bus mastering, then you would pass 0x00ffffff as the mask to + * during bus mastering, then you would pass 0x00ffffff as the mask to * this function. */ int