[MIPS] Make dma_map_sg handle sg elements which are longer than one page
[safe/jmp/linux-2.6] / arch / mips / mm / dma-default.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
8  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9  */
10
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
16
17 #include <asm/cache.h>
18 #include <asm/io.h>
19
20 #include <dma-coherence.h>
21
22 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
23 {
24         unsigned long addr = plat_dma_addr_to_phys(dma_addr);
25
26         return (unsigned long)phys_to_virt(addr);
27 }
28
29 /*
30  * Warning on the terminology - Linux calls an uncached area coherent;
31  * MIPS terminology calls memory areas with hardware maintained coherency
32  * coherent.
33  */
34
35 static inline int cpu_is_noncoherent_r10000(struct device *dev)
36 {
37         return !plat_device_is_coherent(dev) &&
38                (current_cpu_data.cputype == CPU_R10000 &&
39                current_cpu_data.cputype == CPU_R12000);
40 }
41
42 void *dma_alloc_noncoherent(struct device *dev, size_t size,
43         dma_addr_t * dma_handle, gfp_t gfp)
44 {
45         void *ret;
46
47         /* ignore region specifiers */
48         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
49
50         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
51                 gfp |= GFP_DMA;
52         ret = (void *) __get_free_pages(gfp, get_order(size));
53
54         if (ret != NULL) {
55                 memset(ret, 0, size);
56                 *dma_handle = plat_map_dma_mem(dev, ret, size);
57         }
58
59         return ret;
60 }
61
62 EXPORT_SYMBOL(dma_alloc_noncoherent);
63
64 void *dma_alloc_coherent(struct device *dev, size_t size,
65         dma_addr_t * dma_handle, gfp_t gfp)
66 {
67         void *ret;
68
69         /* ignore region specifiers */
70         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
71
72         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
73                 gfp |= GFP_DMA;
74         ret = (void *) __get_free_pages(gfp, get_order(size));
75
76         if (ret) {
77                 memset(ret, 0, size);
78                 *dma_handle = plat_map_dma_mem(dev, ret, size);
79
80                 if (!plat_device_is_coherent(dev)) {
81                         dma_cache_wback_inv((unsigned long) ret, size);
82                         ret = UNCAC_ADDR(ret);
83                 }
84         }
85
86         return ret;
87 }
88
89 EXPORT_SYMBOL(dma_alloc_coherent);
90
91 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
92         dma_addr_t dma_handle)
93 {
94         free_pages((unsigned long) vaddr, get_order(size));
95 }
96
97 EXPORT_SYMBOL(dma_free_noncoherent);
98
99 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
100         dma_addr_t dma_handle)
101 {
102         unsigned long addr = (unsigned long) vaddr;
103
104         if (!plat_device_is_coherent(dev))
105                 addr = CAC_ADDR(addr);
106
107         free_pages(addr, get_order(size));
108 }
109
110 EXPORT_SYMBOL(dma_free_coherent);
111
112 static inline void __dma_sync(unsigned long addr, size_t size,
113         enum dma_data_direction direction)
114 {
115         switch (direction) {
116         case DMA_TO_DEVICE:
117                 dma_cache_wback(addr, size);
118                 break;
119
120         case DMA_FROM_DEVICE:
121                 dma_cache_inv(addr, size);
122                 break;
123
124         case DMA_BIDIRECTIONAL:
125                 dma_cache_wback_inv(addr, size);
126                 break;
127
128         default:
129                 BUG();
130         }
131 }
132
133 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
134         enum dma_data_direction direction)
135 {
136         unsigned long addr = (unsigned long) ptr;
137
138         if (!plat_device_is_coherent(dev))
139                 __dma_sync(addr, size, direction);
140
141         return plat_map_dma_mem(dev, ptr, size);
142 }
143
144 EXPORT_SYMBOL(dma_map_single);
145
146 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
147         enum dma_data_direction direction)
148 {
149         if (cpu_is_noncoherent_r10000(dev))
150                 __dma_sync(dma_addr_to_virt(dma_addr), size,
151                            direction);
152
153         plat_unmap_dma_mem(dma_addr);
154 }
155
156 EXPORT_SYMBOL(dma_unmap_single);
157
158 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
159         enum dma_data_direction direction)
160 {
161         int i;
162
163         BUG_ON(direction == DMA_NONE);
164
165         for (i = 0; i < nents; i++, sg++) {
166                 unsigned long addr;
167
168                 addr = (unsigned long) page_address(sg->page);
169                 if (!plat_device_is_coherent(dev) && addr)
170                         __dma_sync(addr + sg->offset, sg->length, direction);
171                 sg->dma_address = plat_map_dma_mem(dev,
172                                                    (void *)(addr + sg->offset),
173                                                    sg->length);
174         }
175
176         return nents;
177 }
178
179 EXPORT_SYMBOL(dma_map_sg);
180
181 dma_addr_t dma_map_page(struct device *dev, struct page *page,
182         unsigned long offset, size_t size, enum dma_data_direction direction)
183 {
184         BUG_ON(direction == DMA_NONE);
185
186         if (!plat_device_is_coherent(dev)) {
187                 unsigned long addr;
188
189                 addr = (unsigned long) page_address(page) + offset;
190                 dma_cache_wback_inv(addr, size);
191         }
192
193         return plat_map_dma_mem_page(dev, page) + offset;
194 }
195
196 EXPORT_SYMBOL(dma_map_page);
197
198 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
199         enum dma_data_direction direction)
200 {
201         BUG_ON(direction == DMA_NONE);
202
203         if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
204                 unsigned long addr;
205
206                 addr = plat_dma_addr_to_phys(dma_address);
207                 dma_cache_wback_inv(addr, size);
208         }
209
210         plat_unmap_dma_mem(dma_address);
211 }
212
213 EXPORT_SYMBOL(dma_unmap_page);
214
215 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
216         enum dma_data_direction direction)
217 {
218         unsigned long addr;
219         int i;
220
221         BUG_ON(direction == DMA_NONE);
222
223         for (i = 0; i < nhwentries; i++, sg++) {
224                 if (!plat_device_is_coherent(dev) &&
225                     direction != DMA_TO_DEVICE) {
226                         addr = (unsigned long) page_address(sg->page);
227                         if (addr)
228                                 __dma_sync(addr + sg->offset, sg->length,
229                                            direction);
230                 }
231                 plat_unmap_dma_mem(sg->dma_address);
232         }
233 }
234
235 EXPORT_SYMBOL(dma_unmap_sg);
236
237 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
238         size_t size, enum dma_data_direction direction)
239 {
240         BUG_ON(direction == DMA_NONE);
241
242         if (cpu_is_noncoherent_r10000(dev)) {
243                 unsigned long addr;
244
245                 addr = dma_addr_to_virt(dma_handle);
246                 __dma_sync(addr, size, direction);
247         }
248 }
249
250 EXPORT_SYMBOL(dma_sync_single_for_cpu);
251
252 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
253         size_t size, enum dma_data_direction direction)
254 {
255         BUG_ON(direction == DMA_NONE);
256
257         if (!plat_device_is_coherent(dev)) {
258                 unsigned long addr;
259
260                 addr = dma_addr_to_virt(dma_handle);
261                 __dma_sync(addr, size, direction);
262         }
263 }
264
265 EXPORT_SYMBOL(dma_sync_single_for_device);
266
267 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
268         unsigned long offset, size_t size, enum dma_data_direction direction)
269 {
270         BUG_ON(direction == DMA_NONE);
271
272         if (cpu_is_noncoherent_r10000(dev)) {
273                 unsigned long addr;
274
275                 addr = dma_addr_to_virt(dma_handle);
276                 __dma_sync(addr + offset, size, direction);
277         }
278 }
279
280 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
281
282 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
283         unsigned long offset, size_t size, enum dma_data_direction direction)
284 {
285         BUG_ON(direction == DMA_NONE);
286
287         if (!plat_device_is_coherent(dev)) {
288                 unsigned long addr;
289
290                 addr = dma_addr_to_virt(dma_handle);
291                 __dma_sync(addr + offset, size, direction);
292         }
293 }
294
295 EXPORT_SYMBOL(dma_sync_single_range_for_device);
296
297 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
298         enum dma_data_direction direction)
299 {
300         int i;
301
302         BUG_ON(direction == DMA_NONE);
303
304         /* Make sure that gcc doesn't leave the empty loop body.  */
305         for (i = 0; i < nelems; i++, sg++) {
306                 if (cpu_is_noncoherent_r10000(dev))
307                         __dma_sync((unsigned long)page_address(sg->page),
308                                    sg->length, direction);
309                 plat_unmap_dma_mem(sg->dma_address);
310         }
311 }
312
313 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
314
315 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
316         enum dma_data_direction direction)
317 {
318         int i;
319
320         BUG_ON(direction == DMA_NONE);
321
322         /* Make sure that gcc doesn't leave the empty loop body.  */
323         for (i = 0; i < nelems; i++, sg++) {
324                 if (!plat_device_is_coherent(dev))
325                         __dma_sync((unsigned long)page_address(sg->page),
326                                    sg->length, direction);
327                 plat_unmap_dma_mem(sg->dma_address);
328         }
329 }
330
331 EXPORT_SYMBOL(dma_sync_sg_for_device);
332
333 int dma_mapping_error(dma_addr_t dma_addr)
334 {
335         return 0;
336 }
337
338 EXPORT_SYMBOL(dma_mapping_error);
339
340 int dma_supported(struct device *dev, u64 mask)
341 {
342         /*
343          * we fall back to GFP_DMA when the mask isn't all 1s,
344          * so we can't guarantee allocations that must be
345          * within a tighter range than GFP_DMA..
346          */
347         if (mask < 0x00ffffff)
348                 return 0;
349
350         return 1;
351 }
352
353 EXPORT_SYMBOL(dma_supported);
354
355 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
356 {
357         return plat_device_is_coherent(dev);
358 }
359
360 EXPORT_SYMBOL(dma_is_consistent);
361
362 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
363                enum dma_data_direction direction)
364 {
365         BUG_ON(direction == DMA_NONE);
366
367         if (!plat_device_is_coherent(dev))
368                 dma_cache_wback_inv((unsigned long)vaddr, size);
369 }
370
371 EXPORT_SYMBOL(dma_cache_sync);