microblaze: Fix sg_dma_len() regression
[safe/jmp/linux-2.6] / arch / microblaze / kernel / dma.c
index 64bc39f..79c7465 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/gfp.h>
 #include <linux/dma-debug.h>
 #include <asm/bug.h>
 #include <asm/cacheflush.h>
  * can set archdata.dma_data to an unsigned long holding the offset. By
  * default the offset is PCI_DRAM_OFFSET.
  */
-
-static inline void __dma_sync_page(void *vaddr, unsigned long offset,
+static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
                                size_t size, enum dma_data_direction direction)
 {
-       unsigned long start = virt_to_phys(vaddr);
-
        switch (direction) {
        case DMA_TO_DEVICE:
-               flush_dcache_range(start + offset, start + offset + size);
+               flush_dcache_range(paddr + offset, paddr + offset + size);
                break;
        case DMA_FROM_DEVICE:
-               invalidate_dcache_range(start + offset, start + offset + size);
+               invalidate_dcache_range(paddr + offset, paddr + offset + size);
                break;
        default:
                BUG();
@@ -40,15 +38,20 @@ static inline void __dma_sync_page(void *vaddr, unsigned long offset,
 
 static unsigned long get_dma_direct_offset(struct device *dev)
 {
-       if (dev)
+       if (likely(dev))
                return (unsigned long)dev->archdata.dma_data;
 
        return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
 }
 
-void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+#define NOT_COHERENT_CACHE
+
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
                                dma_addr_t *dma_handle, gfp_t flag)
 {
+#ifdef NOT_COHERENT_CACHE
+       return consistent_alloc(flag, size, dma_handle);
+#else
        void *ret;
        struct page *page;
        int node = dev_to_node(dev);
@@ -64,12 +67,17 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
        *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
 
        return ret;
+#endif
 }
 
-void dma_direct_free_coherent(struct device *dev, size_t size,
+static void dma_direct_free_coherent(struct device *dev, size_t size,
                              void *vaddr, dma_addr_t dma_handle)
 {
+#ifdef NOT_COHERENT_CACHE
+       consistent_free(size, vaddr);
+#else
        free_pages((unsigned long)vaddr, get_order(size));
+#endif
 }
 
 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
@@ -79,10 +87,11 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
        struct scatterlist *sg;
        int i;
 
+       /* FIXME this part of code is untested */
        for_each_sg(sgl, sg, nents, i) {
                sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
-               sg->dma_length = sg->length;
-               __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+               __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
+                                                       sg->length, direction);
        }
 
        return nents;
@@ -106,8 +115,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
                                             enum dma_data_direction direction,
                                             struct dma_attrs *attrs)
 {
-       BUG_ON(direction == DMA_NONE);
-       __dma_sync_page(page, offset, size, direction);
+       __dma_sync_page(page_to_phys(page), offset, size, direction);
        return page_to_phys(page) + offset + get_dma_direct_offset(dev);
 }
 
@@ -117,8 +125,12 @@ static inline void dma_direct_unmap_page(struct device *dev,
                                         enum dma_data_direction direction,
                                         struct dma_attrs *attrs)
 {
-/* There is not necessary to do cache cleanup */
-       /* __dma_sync_page(dma_address, 0 , size, direction); */
+/* There is not necessary to do cache cleanup
+ *
+ * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
+ * dma_address is physical address
+ */
+       __dma_sync_page(dma_address, 0 , size, direction);
 }
 
 struct dma_map_ops dma_direct_ops = {