include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / dma / coh901318_lli.c
1 /*
2  * driver/dma/coh901318_lli.c
3  *
4  * Copyright (C) 2007-2009 ST-Ericsson
5  * License terms: GNU General Public License (GPL) version 2
6  * Support functions for handling lli for dma
7  * Author: Per Friden <per.friden@stericsson.com>
8  */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/dmapool.h>
13 #include <linux/memory.h>
14 #include <linux/gfp.h>
15 #include <mach/coh901318.h>
16
17 #include "coh901318_lli.h"
18
19 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
20 #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
21 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
22 #else
23 #define DEBUGFS_POOL_COUNTER_RESET(pool)
24 #define DEBUGFS_POOL_COUNTER_ADD(pool, add)
25 #endif
26
27 static struct coh901318_lli *
28 coh901318_lli_next(struct coh901318_lli *data)
29 {
30         if (data == NULL || data->link_addr == 0)
31                 return NULL;
32
33         return (struct coh901318_lli *) data->virt_link_addr;
34 }
35
36 int coh901318_pool_create(struct coh901318_pool *pool,
37                           struct device *dev,
38                           size_t size, size_t align)
39 {
40         spin_lock_init(&pool->lock);
41         pool->dev = dev;
42         pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
43
44         DEBUGFS_POOL_COUNTER_RESET(pool);
45         return 0;
46 }
47
48 int coh901318_pool_destroy(struct coh901318_pool *pool)
49 {
50
51         dma_pool_destroy(pool->dmapool);
52         return 0;
53 }
54
55 struct coh901318_lli *
56 coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
57 {
58         int i;
59         struct coh901318_lli *head;
60         struct coh901318_lli *lli;
61         struct coh901318_lli *lli_prev;
62         dma_addr_t phy;
63
64         if (len == 0)
65                 goto err;
66
67         spin_lock(&pool->lock);
68
69         head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
70
71         if (head == NULL)
72                 goto err;
73
74         DEBUGFS_POOL_COUNTER_ADD(pool, 1);
75
76         lli = head;
77         lli->phy_this = phy;
78         lli->link_addr = 0x00000000;
79         lli->virt_link_addr = 0x00000000U;
80
81         for (i = 1; i < len; i++) {
82                 lli_prev = lli;
83
84                 lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
85
86                 if (lli == NULL)
87                         goto err_clean_up;
88
89                 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
90                 lli->phy_this = phy;
91                 lli->link_addr = 0x00000000;
92                 lli->virt_link_addr = 0x00000000U;
93
94                 lli_prev->link_addr = phy;
95                 lli_prev->virt_link_addr = lli;
96         }
97
98         spin_unlock(&pool->lock);
99
100         return head;
101
102  err:
103         spin_unlock(&pool->lock);
104         return NULL;
105
106  err_clean_up:
107         lli_prev->link_addr = 0x00000000U;
108         spin_unlock(&pool->lock);
109         coh901318_lli_free(pool, &head);
110         return NULL;
111 }
112
113 void coh901318_lli_free(struct coh901318_pool *pool,
114                         struct coh901318_lli **lli)
115 {
116         struct coh901318_lli *l;
117         struct coh901318_lli *next;
118
119         if (lli == NULL)
120                 return;
121
122         l = *lli;
123
124         if (l == NULL)
125                 return;
126
127         spin_lock(&pool->lock);
128
129         while (l->link_addr) {
130                 next = l->virt_link_addr;
131                 dma_pool_free(pool->dmapool, l, l->phy_this);
132                 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
133                 l = next;
134         }
135         dma_pool_free(pool->dmapool, l, l->phy_this);
136         DEBUGFS_POOL_COUNTER_ADD(pool, -1);
137
138         spin_unlock(&pool->lock);
139         *lli = NULL;
140 }
141
142 int
143 coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
144                           struct coh901318_lli *lli,
145                           dma_addr_t source, unsigned int size,
146                           dma_addr_t destination, u32 ctrl_chained,
147                           u32 ctrl_eom)
148 {
149         int s = size;
150         dma_addr_t src = source;
151         dma_addr_t dst = destination;
152
153         lli->src_addr = src;
154         lli->dst_addr = dst;
155
156         while (lli->link_addr) {
157                 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
158                 lli->src_addr = src;
159                 lli->dst_addr = dst;
160
161                 s -= MAX_DMA_PACKET_SIZE;
162                 lli = coh901318_lli_next(lli);
163
164                 src += MAX_DMA_PACKET_SIZE;
165                 dst += MAX_DMA_PACKET_SIZE;
166         }
167
168         lli->control = ctrl_eom | s;
169         lli->src_addr = src;
170         lli->dst_addr = dst;
171
172         return 0;
173 }
174
175 int
176 coh901318_lli_fill_single(struct coh901318_pool *pool,
177                           struct coh901318_lli *lli,
178                           dma_addr_t buf, unsigned int size,
179                           dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
180                           enum dma_data_direction dir)
181 {
182         int s = size;
183         dma_addr_t src;
184         dma_addr_t dst;
185
186
187         if (dir == DMA_TO_DEVICE) {
188                 src = buf;
189                 dst = dev_addr;
190
191         } else if (dir == DMA_FROM_DEVICE) {
192
193                 src = dev_addr;
194                 dst = buf;
195         } else {
196                 return -EINVAL;
197         }
198
199         while (lli->link_addr) {
200                 size_t block_size = MAX_DMA_PACKET_SIZE;
201                 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
202
203                 /* If we are on the next-to-final block and there will
204                  * be less than half a DMA packet left for the last
205                  * block, then we want to make this block a little
206                  * smaller to balance the sizes. This is meant to
207                  * avoid too small transfers if the buffer size is
208                  * (MAX_DMA_PACKET_SIZE*N + 1) */
209                 if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
210                         block_size = MAX_DMA_PACKET_SIZE/2;
211
212                 s -= block_size;
213                 lli->src_addr = src;
214                 lli->dst_addr = dst;
215
216                 lli = coh901318_lli_next(lli);
217
218                 if (dir == DMA_TO_DEVICE)
219                         src += block_size;
220                 else if (dir == DMA_FROM_DEVICE)
221                         dst += block_size;
222         }
223
224         lli->control = ctrl_eom | s;
225         lli->src_addr = src;
226         lli->dst_addr = dst;
227
228         return 0;
229 }
230
231 int
232 coh901318_lli_fill_sg(struct coh901318_pool *pool,
233                       struct coh901318_lli *lli,
234                       struct scatterlist *sgl, unsigned int nents,
235                       dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
236                       u32 ctrl_last,
237                       enum dma_data_direction dir, u32 ctrl_irq_mask)
238 {
239         int i;
240         struct scatterlist *sg;
241         u32 ctrl_sg;
242         dma_addr_t src = 0;
243         dma_addr_t dst = 0;
244         u32 bytes_to_transfer;
245         u32 elem_size;
246
247         if (lli == NULL)
248                 goto err;
249
250         spin_lock(&pool->lock);
251
252         if (dir == DMA_TO_DEVICE)
253                 dst = dev_addr;
254         else if (dir == DMA_FROM_DEVICE)
255                 src = dev_addr;
256         else
257                 goto err;
258
259         for_each_sg(sgl, sg, nents, i) {
260                 if (sg_is_chain(sg)) {
261                         /* sg continues to the next sg-element don't
262                          * send ctrl_finish until the last
263                          * sg-element in the chain
264                          */
265                         ctrl_sg = ctrl_chained;
266                 } else if (i == nents - 1)
267                         ctrl_sg = ctrl_last;
268                 else
269                         ctrl_sg = ctrl ? ctrl : ctrl_last;
270
271
272                 if (dir == DMA_TO_DEVICE)
273                         /* increment source address */
274                         src = sg_phys(sg);
275                 else
276                         /* increment destination address */
277                         dst =  sg_phys(sg);
278
279                 bytes_to_transfer = sg_dma_len(sg);
280
281                 while (bytes_to_transfer) {
282                         u32 val;
283
284                         if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
285                                 elem_size = MAX_DMA_PACKET_SIZE;
286                                 val = ctrl_chained;
287                         } else {
288                                 elem_size = bytes_to_transfer;
289                                 val = ctrl_sg;
290                         }
291
292                         lli->control = val | elem_size;
293                         lli->src_addr = src;
294                         lli->dst_addr = dst;
295
296                         if (dir == DMA_FROM_DEVICE)
297                                 dst += elem_size;
298                         else
299                                 src += elem_size;
300
301                         BUG_ON(lli->link_addr & 3);
302
303                         bytes_to_transfer -= elem_size;
304                         lli = coh901318_lli_next(lli);
305                 }
306
307         }
308         spin_unlock(&pool->lock);
309
310         return 0;
311  err:
312         spin_unlock(&pool->lock);
313         return -EINVAL;
314 }