include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / mips / mm / dma-default.c
index 4a32e93..9547bc0 100644 (file)
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/scatterlist.h>
 #include <linux/string.h>
+#include <linux/gfp.h>
 
 #include <asm/cache.h>
 #include <asm/io.h>
 
 #include <dma-coherence.h>
 
+static inline unsigned long dma_addr_to_virt(struct device *dev,
+       dma_addr_t dma_addr)
+{
+       unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
+
+       return (unsigned long)phys_to_virt(addr);
+}
+
 /*
  * Warning on the terminology - Linux calls an uncached area coherent;
  * MIPS terminology calls memory areas with hardware maintained coherency
 static inline int cpu_is_noncoherent_r10000(struct device *dev)
 {
        return !plat_device_is_coherent(dev) &&
-              (current_cpu_data.cputype == CPU_R10000 &&
-              current_cpu_data.cputype == CPU_R12000);
+              (current_cpu_type() == CPU_R10000 ||
+              current_cpu_type() == CPU_R12000);
+}
+
+static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+{
+       /* ignore region specifiers */
+       gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ZONE_DMA
+       if (dev == NULL)
+               gfp |= __GFP_DMA;
+       else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
+               gfp |= __GFP_DMA;
+       else
+#endif
+#ifdef CONFIG_ZONE_DMA32
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+               gfp |= __GFP_DMA32;
+       else
+#endif
+               ;
+
+       /* Don't invoke OOM killer */
+       gfp |= __GFP_NORETRY;
+
+       return gfp;
 }
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
@@ -37,11 +72,8 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
 {
        void *ret;
 
-       /* ignore region specifiers */
-       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+       gfp = massage_gfp_flags(dev, gfp);
 
-       if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-               gfp |= GFP_DMA;
        ret = (void *) __get_free_pages(gfp, get_order(size));
 
        if (ret != NULL) {
@@ -59,11 +91,11 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 {
        void *ret;
 
-       /* ignore region specifiers */
-       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+       if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+               return ret;
+
+       gfp = massage_gfp_flags(dev, gfp);
 
-       if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-               gfp |= GFP_DMA;
        ret = (void *) __get_free_pages(gfp, get_order(size));
 
        if (ret) {
@@ -84,6 +116,7 @@ EXPORT_SYMBOL(dma_alloc_coherent);
 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
        dma_addr_t dma_handle)
 {
+       plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
        free_pages((unsigned long) vaddr, get_order(size));
 }
 
@@ -93,6 +126,12 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
        dma_addr_t dma_handle)
 {
        unsigned long addr = (unsigned long) vaddr;
+       int order = get_order(size);
+
+       if (dma_release_from_coherent(dev, order, vaddr))
+               return;
+
+       plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
 
        if (!plat_device_is_coherent(dev))
                addr = CAC_ADDR(addr);
@@ -140,10 +179,10 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
        enum dma_data_direction direction)
 {
        if (cpu_is_noncoherent_r10000(dev))
-               __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size,
+               __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
                           direction);
 
-       plat_unmap_dma_mem(dma_addr);
+       plat_unmap_dma_mem(dev, dma_addr, size, direction);
 }
 
 EXPORT_SYMBOL(dma_unmap_single);
@@ -158,11 +197,11 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
        for (i = 0; i < nents; i++, sg++) {
                unsigned long addr;
 
-               addr = (unsigned long) page_address(sg->page);
+               addr = (unsigned long) sg_virt(sg);
                if (!plat_device_is_coherent(dev) && addr)
-                       __dma_sync(addr + sg->offset, sg->length, direction);
-               sg->dma_address = plat_map_dma_mem_page(dev, sg->page) +
-                                 sg->offset;
+                       __dma_sync(addr, sg->length, direction);
+               sg->dma_address = plat_map_dma_mem(dev,
+                                                  (void *)addr, sg->length);
        }
 
        return nents;
@@ -179,7 +218,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
                unsigned long addr;
 
                addr = (unsigned long) page_address(page) + offset;
-               dma_cache_wback_inv(addr, size);
+               __dma_sync(addr, size, direction);
        }
 
        return plat_map_dma_mem_page(dev, page) + offset;
@@ -187,23 +226,6 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
 
 EXPORT_SYMBOL(dma_map_page);
 
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-       enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-
-       if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
-               unsigned long addr;
-
-               addr = plat_dma_addr_to_phys(dma_address);
-               dma_cache_wback_inv(addr, size);
-       }
-
-       plat_unmap_dma_mem(dma_address);
-}
-
-EXPORT_SYMBOL(dma_unmap_page);
-
 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
        enum dma_data_direction direction)
 {
@@ -215,12 +237,11 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
        for (i = 0; i < nhwentries; i++, sg++) {
                if (!plat_device_is_coherent(dev) &&
                    direction != DMA_TO_DEVICE) {
-                       addr = (unsigned long) page_address(sg->page);
+                       addr = (unsigned long) sg_virt(sg);
                        if (addr)
-                               __dma_sync(addr + sg->offset, sg->length,
-                                          direction);
+                               __dma_sync(addr, sg->length, direction);
                }
-               plat_unmap_dma_mem(sg->dma_address);
+               plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
        }
 }
 
@@ -234,7 +255,7 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
        if (cpu_is_noncoherent_r10000(dev)) {
                unsigned long addr;
 
-               addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
+               addr = dma_addr_to_virt(dev, dma_handle);
                __dma_sync(addr, size, direction);
        }
 }
@@ -246,10 +267,11 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
 {
        BUG_ON(direction == DMA_NONE);
 
-       if (cpu_is_noncoherent_r10000(dev)) {
+       plat_extra_sync_for_device(dev);
+       if (!plat_device_is_coherent(dev)) {
                unsigned long addr;
 
-               addr = plat_dma_addr_to_phys(dma_handle);
+               addr = dma_addr_to_virt(dev, dma_handle);
                __dma_sync(addr, size, direction);
        }
 }
@@ -264,7 +286,7 @@ void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
        if (cpu_is_noncoherent_r10000(dev)) {
                unsigned long addr;
 
-               addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
+               addr = dma_addr_to_virt(dev, dma_handle);
                __dma_sync(addr + offset, size, direction);
        }
 }
@@ -276,10 +298,11 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
 {
        BUG_ON(direction == DMA_NONE);
 
-       if (cpu_is_noncoherent_r10000(dev)) {
+       plat_extra_sync_for_device(dev);
+       if (!plat_device_is_coherent(dev)) {
                unsigned long addr;
 
-               addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
+               addr = dma_addr_to_virt(dev, dma_handle);
                __dma_sync(addr + offset, size, direction);
        }
 }
@@ -295,10 +318,9 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
 
        /* Make sure that gcc doesn't leave the empty loop body.  */
        for (i = 0; i < nelems; i++, sg++) {
-               if (!plat_device_is_coherent(dev))
-                       __dma_sync((unsigned long)page_address(sg->page),
+               if (cpu_is_noncoherent_r10000(dev))
+                       __dma_sync((unsigned long)page_address(sg_page(sg)),
                                   sg->length, direction);
-               plat_unmap_dma_mem(sg->dma_address);
        }
 }
 
@@ -314,32 +336,23 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
        /* Make sure that gcc doesn't leave the empty loop body.  */
        for (i = 0; i < nelems; i++, sg++) {
                if (!plat_device_is_coherent(dev))
-                       __dma_sync((unsigned long)page_address(sg->page),
+                       __dma_sync((unsigned long)page_address(sg_page(sg)),
                                   sg->length, direction);
-               plat_unmap_dma_mem(sg->dma_address);
        }
 }
 
 EXPORT_SYMBOL(dma_sync_sg_for_device);
 
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-       return 0;
+       return plat_dma_mapping_error(dev, dma_addr);
 }
 
 EXPORT_SYMBOL(dma_mapping_error);
 
 int dma_supported(struct device *dev, u64 mask)
 {
-       /*
-        * we fall back to GFP_DMA when the mask isn't all 1s,
-        * so we can't guarantee allocations that must be
-        * within a tighter range than GFP_DMA..
-        */
-       if (mask < 0x00ffffff)
-               return 0;
-
-       return 1;
+       return plat_dma_supported(dev, mask);
 }
 
 EXPORT_SYMBOL(dma_supported);
@@ -356,8 +369,9 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 {
        BUG_ON(direction == DMA_NONE);
 
+       plat_extra_sync_for_device(dev);
        if (!plat_device_is_coherent(dev))
-               dma_cache_wback_inv((unsigned long)vaddr, size);
+               __dma_sync((unsigned long)vaddr, size, direction);
 }
 
 EXPORT_SYMBOL(dma_cache_sync);