#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/gfp.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <dma-coherence.h>
+static inline unsigned long dma_addr_to_virt(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
+
+ return (unsigned long)phys_to_virt(addr);
+}
+
/*
* Warning on the terminology - Linux calls an uncached area coherent;
* MIPS terminology calls memory areas with hardware maintained coherency
static inline int cpu_is_noncoherent_r10000(struct device *dev)
{
return !plat_device_is_coherent(dev) &&
- (current_cpu_data.cputype == CPU_R10000 &&
- current_cpu_data.cputype == CPU_R12000);
+ (current_cpu_type() == CPU_R10000 ||
+ current_cpu_type() == CPU_R12000);
+}
+
+static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+{
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ZONE_DMA
+ if (dev == NULL)
+ gfp |= __GFP_DMA;
+ else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
+ gfp |= __GFP_DMA;
+ else
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+ gfp |= __GFP_DMA32;
+ else
+#endif
+ ;
+
+ /* Don't invoke OOM killer */
+ gfp |= __GFP_NORETRY;
+
+ return gfp;
}
void *dma_alloc_noncoherent(struct device *dev, size_t size,
{
void *ret;
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+ gfp = massage_gfp_flags(dev, gfp);
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret != NULL) {
{
void *ret;
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+ return ret;
+
+ gfp = massage_gfp_flags(dev, gfp);
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret) {
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
+ plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
free_pages((unsigned long) vaddr, get_order(size));
}
dma_addr_t dma_handle)
{
unsigned long addr = (unsigned long) vaddr;
+ int order = get_order(size);
+
+ if (dma_release_from_coherent(dev, order, vaddr))
+ return;
+
+ plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);
enum dma_data_direction direction)
{
if (cpu_is_noncoherent_r10000(dev))
- __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size,
+ __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
direction);
- plat_unmap_dma_mem(dma_addr);
+ plat_unmap_dma_mem(dev, dma_addr, size, direction);
}
EXPORT_SYMBOL(dma_unmap_single);
for (i = 0; i < nents; i++, sg++) {
unsigned long addr;
- addr = (unsigned long) page_address(sg->page);
+ addr = (unsigned long) sg_virt(sg);
if (!plat_device_is_coherent(dev) && addr)
- __dma_sync(addr + sg->offset, sg->length, direction);
- sg->dma_address = plat_map_dma_mem_page(dev, sg->page) +
- sg->offset;
+ __dma_sync(addr, sg->length, direction);
+ sg->dma_address = plat_map_dma_mem(dev,
+ (void *)addr, sg->length);
}
return nents;
unsigned long addr;
addr = (unsigned long) page_address(page) + offset;
- dma_cache_wback_inv(addr, size);
+ __dma_sync(addr, size, direction);
}
return plat_map_dma_mem_page(dev, page) + offset;
EXPORT_SYMBOL(dma_map_page);
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
- unsigned long addr;
-
- addr = plat_dma_addr_to_phys(dma_address);
- dma_cache_wback_inv(addr, size);
- }
-
- plat_unmap_dma_mem(dma_address);
-}
-
-EXPORT_SYMBOL(dma_unmap_page);
-
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
for (i = 0; i < nhwentries; i++, sg++) {
if (!plat_device_is_coherent(dev) &&
direction != DMA_TO_DEVICE) {
- addr = (unsigned long) page_address(sg->page);
+ addr = (unsigned long) sg_virt(sg);
if (addr)
- __dma_sync(addr + sg->offset, sg->length,
- direction);
+ __dma_sync(addr, sg->length, direction);
}
- plat_unmap_dma_mem(sg->dma_address);
+ plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
}
}
if (cpu_is_noncoherent_r10000(dev)) {
unsigned long addr;
- addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
+ addr = dma_addr_to_virt(dev, dma_handle);
__dma_sync(addr, size, direction);
}
}
{
BUG_ON(direction == DMA_NONE);
- if (cpu_is_noncoherent_r10000(dev)) {
+ plat_extra_sync_for_device(dev);
+ if (!plat_device_is_coherent(dev)) {
unsigned long addr;
- addr = plat_dma_addr_to_phys(dma_handle);
+ addr = dma_addr_to_virt(dev, dma_handle);
__dma_sync(addr, size, direction);
}
}
if (cpu_is_noncoherent_r10000(dev)) {
unsigned long addr;
- addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
+ addr = dma_addr_to_virt(dev, dma_handle);
__dma_sync(addr + offset, size, direction);
}
}
{
BUG_ON(direction == DMA_NONE);
- if (cpu_is_noncoherent_r10000(dev)) {
+ plat_extra_sync_for_device(dev);
+ if (!plat_device_is_coherent(dev)) {
unsigned long addr;
- addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
+ addr = dma_addr_to_virt(dev, dma_handle);
__dma_sync(addr + offset, size, direction);
}
}
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
- if (!plat_device_is_coherent(dev))
- __dma_sync((unsigned long)page_address(sg->page),
+ if (cpu_is_noncoherent_r10000(dev))
+ __dma_sync((unsigned long)page_address(sg_page(sg)),
sg->length, direction);
- plat_unmap_dma_mem(sg->dma_address);
}
}
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (!plat_device_is_coherent(dev))
- __dma_sync((unsigned long)page_address(sg->page),
+ __dma_sync((unsigned long)page_address(sg_page(sg)),
sg->length, direction);
- plat_unmap_dma_mem(sg->dma_address);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return 0;
+ return plat_dma_mapping_error(dev, dma_addr);
}
EXPORT_SYMBOL(dma_mapping_error);
int dma_supported(struct device *dev, u64 mask)
{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < 0x00ffffff)
- return 0;
-
- return 1;
+ return plat_dma_supported(dev, mask);
}
EXPORT_SYMBOL(dma_supported);
{
BUG_ON(direction == DMA_NONE);
+ plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev))
- dma_cache_wback_inv((unsigned long)vaddr, size);
+ __dma_sync((unsigned long)vaddr, size, direction);
}
EXPORT_SYMBOL(dma_cache_sync);