davinci: EDMA: multiple CCs, channel mapping and API changes
[safe/jmp/linux-2.6] / arch / arm / mach-davinci / dma.c
1 /*
2  * EDMA3 support for DaVinci
3  *
4  * Copyright (C) 2006-2009 Texas Instruments.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock.h>
27 #include <linux/compiler.h>
28 #include <linux/io.h>
29
30 #include <mach/cputype.h>
31 #include <mach/memory.h>
32 #include <mach/hardware.h>
33 #include <mach/irqs.h>
34 #include <mach/edma.h>
35 #include <mach/mux.h>
36
37
38 /* Offsets matching "struct edmacc_param" */
39 #define PARM_OPT                0x00
40 #define PARM_SRC                0x04
41 #define PARM_A_B_CNT            0x08
42 #define PARM_DST                0x0c
43 #define PARM_SRC_DST_BIDX       0x10
44 #define PARM_LINK_BCNTRLD       0x14
45 #define PARM_SRC_DST_CIDX       0x18
46 #define PARM_CCNT               0x1c
47
48 #define PARM_SIZE               0x20
49
50 /* Offsets for EDMA CC global channel registers and their shadows */
51 #define SH_ER           0x00    /* 64 bits */
52 #define SH_ECR          0x08    /* 64 bits */
53 #define SH_ESR          0x10    /* 64 bits */
54 #define SH_CER          0x18    /* 64 bits */
55 #define SH_EER          0x20    /* 64 bits */
56 #define SH_EECR         0x28    /* 64 bits */
57 #define SH_EESR         0x30    /* 64 bits */
58 #define SH_SER          0x38    /* 64 bits */
59 #define SH_SECR         0x40    /* 64 bits */
60 #define SH_IER          0x50    /* 64 bits */
61 #define SH_IECR         0x58    /* 64 bits */
62 #define SH_IESR         0x60    /* 64 bits */
63 #define SH_IPR          0x68    /* 64 bits */
64 #define SH_ICR          0x70    /* 64 bits */
65 #define SH_IEVAL        0x78
66 #define SH_QER          0x80
67 #define SH_QEER         0x84
68 #define SH_QEECR        0x88
69 #define SH_QEESR        0x8c
70 #define SH_QSER         0x90
71 #define SH_QSECR        0x94
72 #define SH_SIZE         0x200
73
74 /* Offsets for EDMA CC global registers */
75 #define EDMA_REV        0x0000
76 #define EDMA_CCCFG      0x0004
77 #define EDMA_QCHMAP     0x0200  /* 8 registers */
78 #define EDMA_DMAQNUM    0x0240  /* 8 registers (4 on OMAP-L1xx) */
79 #define EDMA_QDMAQNUM   0x0260
80 #define EDMA_QUETCMAP   0x0280
81 #define EDMA_QUEPRI     0x0284
82 #define EDMA_EMR        0x0300  /* 64 bits */
83 #define EDMA_EMCR       0x0308  /* 64 bits */
84 #define EDMA_QEMR       0x0310
85 #define EDMA_QEMCR      0x0314
86 #define EDMA_CCERR      0x0318
87 #define EDMA_CCERRCLR   0x031c
88 #define EDMA_EEVAL      0x0320
89 #define EDMA_DRAE       0x0340  /* 4 x 64 bits*/
90 #define EDMA_QRAE       0x0380  /* 4 registers */
91 #define EDMA_QUEEVTENTRY        0x0400  /* 2 x 16 registers */
92 #define EDMA_QSTAT      0x0600  /* 2 registers */
93 #define EDMA_QWMTHRA    0x0620
94 #define EDMA_QWMTHRB    0x0624
95 #define EDMA_CCSTAT     0x0640
96
97 #define EDMA_M          0x1000  /* global channel registers */
98 #define EDMA_ECR        0x1008
99 #define EDMA_ECRH       0x100C
100 #define EDMA_SHADOW0    0x2000  /* 4 regions shadowing global channels */
101 #define EDMA_PARM       0x4000  /* 128 param entries */
102
103 #define PARM_OFFSET(param_no)   (EDMA_PARM + ((param_no) << 5))
104
105 #define EDMA_DCHMAP     0x0100  /* 64 registers */
106 #define CHMAP_EXIST     BIT(24)
107
108 #define EDMA_MAX_DMACH           64
109 #define EDMA_MAX_PARAMENTRY     512
110 #define EDMA_MAX_CC               2
111
112
113 /*****************************************************************************/
114
115 static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
116
117 static inline unsigned int edma_read(unsigned ctlr, int offset)
118 {
119         return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
120 }
121
122 static inline void edma_write(unsigned ctlr, int offset, int val)
123 {
124         __raw_writel(val, edmacc_regs_base[ctlr] + offset);
125 }
126 static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
127                 unsigned or)
128 {
129         unsigned val = edma_read(ctlr, offset);
130         val &= and;
131         val |= or;
132         edma_write(ctlr, offset, val);
133 }
134 static inline void edma_and(unsigned ctlr, int offset, unsigned and)
135 {
136         unsigned val = edma_read(ctlr, offset);
137         val &= and;
138         edma_write(ctlr, offset, val);
139 }
140 static inline void edma_or(unsigned ctlr, int offset, unsigned or)
141 {
142         unsigned val = edma_read(ctlr, offset);
143         val |= or;
144         edma_write(ctlr, offset, val);
145 }
146 static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
147 {
148         return edma_read(ctlr, offset + (i << 2));
149 }
150 static inline void edma_write_array(unsigned ctlr, int offset, int i,
151                 unsigned val)
152 {
153         edma_write(ctlr, offset + (i << 2), val);
154 }
155 static inline void edma_modify_array(unsigned ctlr, int offset, int i,
156                 unsigned and, unsigned or)
157 {
158         edma_modify(ctlr, offset + (i << 2), and, or);
159 }
160 static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
161 {
162         edma_or(ctlr, offset + (i << 2), or);
163 }
164 static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
165                 unsigned or)
166 {
167         edma_or(ctlr, offset + ((i*2 + j) << 2), or);
168 }
169 static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
170                 unsigned val)
171 {
172         edma_write(ctlr, offset + ((i*2 + j) << 2), val);
173 }
174 static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
175 {
176         return edma_read(ctlr, EDMA_SHADOW0 + offset);
177 }
178 static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
179                 int i)
180 {
181         return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
182 }
183 static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
184 {
185         edma_write(ctlr, EDMA_SHADOW0 + offset, val);
186 }
187 static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
188                 unsigned val)
189 {
190         edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
191 }
192 static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
193                 int param_no)
194 {
195         return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
196 }
197 static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
198                 unsigned val)
199 {
200         edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
201 }
202 static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
203                 unsigned and, unsigned or)
204 {
205         edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
206 }
207 static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
208                 unsigned and)
209 {
210         edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
211 }
212 static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
213                 unsigned or)
214 {
215         edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
216 }
217
218 /*****************************************************************************/
219
220 /* actual number of DMA channels and slots on this silicon */
221 struct edma {
222         /* how many dma resources of each type */
223         unsigned        num_channels;
224         unsigned        num_region;
225         unsigned        num_slots;
226         unsigned        num_tc;
227         unsigned        num_cc;
228
229         /* list of channels with no even trigger; terminated by "-1" */
230         const s8        *noevent;
231
232         /* The edma_inuse bit for each PaRAM slot is clear unless the
233          * channel is in use ... by ARM or DSP, for QDMA, or whatever.
234          */
235         DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
236
237         /* The edma_noevent bit for each channel is clear unless
238          * it doesn't trigger DMA events on this platform.  It uses a
239          * bit of SOC-specific initialization code.
240          */
241         DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH);
242
243         unsigned        irq_res_start;
244         unsigned        irq_res_end;
245
246         struct dma_interrupt_data {
247                 void (*callback)(unsigned channel, unsigned short ch_status,
248                                 void *data);
249                 void *data;
250         } intr_data[EDMA_MAX_DMACH];
251 };
252
253 static struct edma *edma_info[EDMA_MAX_CC];
254
255 /* dummy param set used to (re)initialize parameter RAM slots */
256 static const struct edmacc_param dummy_paramset = {
257         .link_bcntrld = 0xffff,
258         .ccnt = 1,
259 };
260
261 /*****************************************************************************/
262
263 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
264                 enum dma_event_q queue_no)
265 {
266         int bit = (ch_no & 0x7) * 4;
267
268         /* default to low priority queue */
269         if (queue_no == EVENTQ_DEFAULT)
270                 queue_no = EVENTQ_1;
271
272         queue_no &= 7;
273         edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
274                         ~(0x7 << bit), queue_no << bit);
275 }
276
277 static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
278 {
279         int bit = queue_no * 4;
280         edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
281 }
282
283 static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
284                 int priority)
285 {
286         int bit = queue_no * 4;
287         edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
288                         ((priority & 0x7) << bit));
289 }
290
291 /**
292  * map_dmach_param - Maps channel number to param entry number
293  *
294  * This maps the dma channel number to param entry numberter. In
295  * other words using the DMA channel mapping registers a param entry
296  * can be mapped to any channel
297  *
298  * Callers are responsible for ensuring the channel mapping logic is
299  * included in that particular EDMA variant (Eg : dm646x)
300  *
301  */
302 static void __init map_dmach_param(unsigned ctlr)
303 {
304         int i;
305         for (i = 0; i < EDMA_MAX_DMACH; i++)
306                 edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
307 }
308
309 static inline void
310 setup_dma_interrupt(unsigned lch,
311         void (*callback)(unsigned channel, u16 ch_status, void *data),
312         void *data)
313 {
314         unsigned ctlr;
315
316         ctlr = EDMA_CTLR(lch);
317         lch = EDMA_CHAN_SLOT(lch);
318
319         if (!callback) {
320                 edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
321                                 (1 << (lch & 0x1f)));
322         }
323
324         edma_info[ctlr]->intr_data[lch].callback = callback;
325         edma_info[ctlr]->intr_data[lch].data = data;
326
327         if (callback) {
328                 edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
329                                 (1 << (lch & 0x1f)));
330                 edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
331                                 (1 << (lch & 0x1f)));
332         }
333 }
334
335 static int irq2ctlr(int irq)
336 {
337         if (irq >= edma_info[0]->irq_res_start &&
338                 irq <= edma_info[0]->irq_res_end)
339                 return 0;
340         else if (irq >= edma_info[1]->irq_res_start &&
341                 irq <= edma_info[1]->irq_res_end)
342                 return 1;
343
344         return -1;
345 }
346
347 /******************************************************************************
348  *
349  * DMA interrupt handler
350  *
351  *****************************************************************************/
352 static irqreturn_t dma_irq_handler(int irq, void *data)
353 {
354         int i;
355         unsigned ctlr;
356         unsigned int cnt = 0;
357
358         ctlr = irq2ctlr(irq);
359
360         dev_dbg(data, "dma_irq_handler\n");
361
362         if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0)
363             && (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
364                 return IRQ_NONE;
365
366         while (1) {
367                 int j;
368                 if (edma_shadow0_read_array(ctlr, SH_IPR, 0))
369                         j = 0;
370                 else if (edma_shadow0_read_array(ctlr, SH_IPR, 1))
371                         j = 1;
372                 else
373                         break;
374                 dev_dbg(data, "IPR%d %08x\n", j,
375                                 edma_shadow0_read_array(ctlr, SH_IPR, j));
376                 for (i = 0; i < 32; i++) {
377                         int k = (j << 5) + i;
378                         if (edma_shadow0_read_array(ctlr, SH_IPR, j) &
379                                                         (1 << i)) {
380                                 /* Clear the corresponding IPR bits */
381                                 edma_shadow0_write_array(ctlr, SH_ICR, j,
382                                                         (1 << i));
383                                 if (edma_info[ctlr]->intr_data[k].callback) {
384                                         edma_info[ctlr]->intr_data[k].callback(
385                                                 k, DMA_COMPLETE,
386                                                 edma_info[ctlr]->intr_data[k].
387                                                 data);
388                                 }
389                         }
390                 }
391                 cnt++;
392                 if (cnt > 10)
393                         break;
394         }
395         edma_shadow0_write(ctlr, SH_IEVAL, 1);
396         return IRQ_HANDLED;
397 }
398
399 /******************************************************************************
400  *
401  * DMA error interrupt handler
402  *
403  *****************************************************************************/
404 static irqreturn_t dma_ccerr_handler(int irq, void *data)
405 {
406         int i;
407         unsigned ctlr;
408         unsigned int cnt = 0;
409
410         ctlr = irq2ctlr(irq);
411
412         dev_dbg(data, "dma_ccerr_handler\n");
413
414         if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
415             (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
416             (edma_read(ctlr, EDMA_QEMR) == 0) &&
417             (edma_read(ctlr, EDMA_CCERR) == 0))
418                 return IRQ_NONE;
419
420         while (1) {
421                 int j = -1;
422                 if (edma_read_array(ctlr, EDMA_EMR, 0))
423                         j = 0;
424                 else if (edma_read_array(ctlr, EDMA_EMR, 1))
425                         j = 1;
426                 if (j >= 0) {
427                         dev_dbg(data, "EMR%d %08x\n", j,
428                                         edma_read_array(ctlr, EDMA_EMR, j));
429                         for (i = 0; i < 32; i++) {
430                                 int k = (j << 5) + i;
431                                 if (edma_read_array(ctlr, EDMA_EMR, j) &
432                                                         (1 << i)) {
433                                         /* Clear the corresponding EMR bits */
434                                         edma_write_array(ctlr, EDMA_EMCR, j,
435                                                         1 << i);
436                                         /* Clear any SER */
437                                         edma_shadow0_write_array(ctlr, SH_SECR,
438                                                                 j, (1 << i));
439                                         if (edma_info[ctlr]->intr_data[k].
440                                                                 callback) {
441                                                 edma_info[ctlr]->intr_data[k].
442                                                 callback(k,
443                                                 DMA_CC_ERROR,
444                                                 edma_info[ctlr]->intr_data
445                                                 [k].data);
446                                         }
447                                 }
448                         }
449                 } else if (edma_read(ctlr, EDMA_QEMR)) {
450                         dev_dbg(data, "QEMR %02x\n",
451                                 edma_read(ctlr, EDMA_QEMR));
452                         for (i = 0; i < 8; i++) {
453                                 if (edma_read(ctlr, EDMA_QEMR) & (1 << i)) {
454                                         /* Clear the corresponding IPR bits */
455                                         edma_write(ctlr, EDMA_QEMCR, 1 << i);
456                                         edma_shadow0_write(ctlr, SH_QSECR,
457                                                                 (1 << i));
458
459                                         /* NOTE:  not reported!! */
460                                 }
461                         }
462                 } else if (edma_read(ctlr, EDMA_CCERR)) {
463                         dev_dbg(data, "CCERR %08x\n",
464                                 edma_read(ctlr, EDMA_CCERR));
465                         /* FIXME:  CCERR.BIT(16) ignored!  much better
466                          * to just write CCERRCLR with CCERR value...
467                          */
468                         for (i = 0; i < 8; i++) {
469                                 if (edma_read(ctlr, EDMA_CCERR) & (1 << i)) {
470                                         /* Clear the corresponding IPR bits */
471                                         edma_write(ctlr, EDMA_CCERRCLR, 1 << i);
472
473                                         /* NOTE:  not reported!! */
474                                 }
475                         }
476                 }
477                 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0)
478                     && (edma_read_array(ctlr, EDMA_EMR, 1) == 0)
479                     && (edma_read(ctlr, EDMA_QEMR) == 0)
480                     && (edma_read(ctlr, EDMA_CCERR) == 0)) {
481                         break;
482                 }
483                 cnt++;
484                 if (cnt > 10)
485                         break;
486         }
487         edma_write(ctlr, EDMA_EEVAL, 1);
488         return IRQ_HANDLED;
489 }
490
491 /******************************************************************************
492  *
493  * Transfer controller error interrupt handlers
494  *
495  *****************************************************************************/
496
497 #define tc_errs_handled false   /* disabled as long as they're NOPs */
498
499 static irqreturn_t dma_tc0err_handler(int irq, void *data)
500 {
501         dev_dbg(data, "dma_tc0err_handler\n");
502         return IRQ_HANDLED;
503 }
504
505 static irqreturn_t dma_tc1err_handler(int irq, void *data)
506 {
507         dev_dbg(data, "dma_tc1err_handler\n");
508         return IRQ_HANDLED;
509 }
510
511 /*-----------------------------------------------------------------------*/
512
513 /* Resource alloc/free:  dma channels, parameter RAM slots */
514
515 /**
516  * edma_alloc_channel - allocate DMA channel and paired parameter RAM
517  * @channel: specific channel to allocate; negative for "any unmapped channel"
518  * @callback: optional; to be issued on DMA completion or errors
519  * @data: passed to callback
520  * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
521  *      Controller (TC) executes requests using this channel.  Use
522  *      EVENTQ_DEFAULT unless you really need a high priority queue.
523  *
524  * This allocates a DMA channel and its associated parameter RAM slot.
525  * The parameter RAM is initialized to hold a dummy transfer.
526  *
527  * Normal use is to pass a specific channel number as @channel, to make
528  * use of hardware events mapped to that channel.  When the channel will
529  * be used only for software triggering or event chaining, channels not
530  * mapped to hardware events (or mapped to unused events) are preferable.
531  *
532  * DMA transfers start from a channel using edma_start(), or by
533  * chaining.  When the transfer described in that channel's parameter RAM
534  * slot completes, that slot's data may be reloaded through a link.
535  *
536  * DMA errors are only reported to the @callback associated with the
537  * channel driving that transfer, but transfer completion callbacks can
538  * be sent to another channel under control of the TCC field in
539  * the option word of the transfer's parameter RAM set.  Drivers must not
540  * use DMA transfer completion callbacks for channels they did not allocate.
541  * (The same applies to TCC codes used in transfer chaining.)
542  *
543  * Returns the number of the channel, else negative errno.
544  */
545 int edma_alloc_channel(int channel,
546                 void (*callback)(unsigned channel, u16 ch_status, void *data),
547                 void *data,
548                 enum dma_event_q eventq_no)
549 {
550         unsigned i, done, ctlr = 0;
551
552         if (channel >= 0) {
553                 ctlr = EDMA_CTLR(channel);
554                 channel = EDMA_CHAN_SLOT(channel);
555         }
556
557         if (channel < 0) {
558                 for (i = 0; i < EDMA_MAX_CC; i++) {
559                         channel = 0;
560                         for (;;) {
561                                 channel = find_next_bit(edma_info[i]->
562                                                 edma_noevent,
563                                                 edma_info[i]->num_channels,
564                                                 channel);
565                                 if (channel == edma_info[i]->num_channels)
566                                         return -ENOMEM;
567                                 if (!test_and_set_bit(channel,
568                                                 edma_info[i]->edma_inuse)) {
569                                         done = 1;
570                                         ctlr = i;
571                                         break;
572                                 }
573                                 channel++;
574                         }
575                         if (done)
576                                 break;
577                 }
578         } else if (channel >= edma_info[ctlr]->num_channels) {
579                 return -EINVAL;
580         } else if (test_and_set_bit(channel, edma_info[ctlr]->edma_inuse)) {
581                 return -EBUSY;
582         }
583
584         /* ensure access through shadow region 0 */
585         edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f));
586
587         /* ensure no events are pending */
588         edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
589         memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
590                         &dummy_paramset, PARM_SIZE);
591
592         if (callback)
593                 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
594                                         callback, data);
595
596         map_dmach_queue(ctlr, channel, eventq_no);
597
598         return channel;
599 }
600 EXPORT_SYMBOL(edma_alloc_channel);
601
602
603 /**
604  * edma_free_channel - deallocate DMA channel
605  * @channel: dma channel returned from edma_alloc_channel()
606  *
607  * This deallocates the DMA channel and associated parameter RAM slot
608  * allocated by edma_alloc_channel().
609  *
610  * Callers are responsible for ensuring the channel is inactive, and
611  * will not be reactivated by linking, chaining, or software calls to
612  * edma_start().
613  */
614 void edma_free_channel(unsigned channel)
615 {
616         unsigned ctlr;
617
618         ctlr = EDMA_CTLR(channel);
619         channel = EDMA_CHAN_SLOT(channel);
620
621         if (channel >= edma_info[ctlr]->num_channels)
622                 return;
623
624         setup_dma_interrupt(channel, NULL, NULL);
625         /* REVISIT should probably take out of shadow region 0 */
626
627         memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
628                         &dummy_paramset, PARM_SIZE);
629         clear_bit(channel, edma_info[ctlr]->edma_inuse);
630 }
631 EXPORT_SYMBOL(edma_free_channel);
632
633 /**
634  * edma_alloc_slot - allocate DMA parameter RAM
635  * @slot: specific slot to allocate; negative for "any unused slot"
636  *
637  * This allocates a parameter RAM slot, initializing it to hold a
638  * dummy transfer.  Slots allocated using this routine have not been
639  * mapped to a hardware DMA channel, and will normally be used by
640  * linking to them from a slot associated with a DMA channel.
641  *
642  * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
643  * slots may be allocated on behalf of DSP firmware.
644  *
645  * Returns the number of the slot, else negative errno.
646  */
647 int edma_alloc_slot(unsigned ctlr, int slot)
648 {
649         if (slot >= 0)
650                 slot = EDMA_CHAN_SLOT(slot);
651
652         if (slot < 0) {
653                 slot = edma_info[ctlr]->num_channels;
654                 for (;;) {
655                         slot = find_next_zero_bit(edma_info[ctlr]->edma_inuse,
656                                         edma_info[ctlr]->num_slots, slot);
657                         if (slot == edma_info[ctlr]->num_slots)
658                                 return -ENOMEM;
659                         if (!test_and_set_bit(slot,
660                                                 edma_info[ctlr]->edma_inuse))
661                                 break;
662                 }
663         } else if (slot < edma_info[ctlr]->num_channels ||
664                         slot >= edma_info[ctlr]->num_slots) {
665                 return -EINVAL;
666         } else if (test_and_set_bit(slot, edma_info[ctlr]->edma_inuse)) {
667                 return -EBUSY;
668         }
669
670         memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
671                         &dummy_paramset, PARM_SIZE);
672
673         return EDMA_CTLR_CHAN(ctlr, slot);
674 }
675 EXPORT_SYMBOL(edma_alloc_slot);
676
677 /**
678  * edma_free_slot - deallocate DMA parameter RAM
679  * @slot: parameter RAM slot returned from edma_alloc_slot()
680  *
681  * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
682  * Callers are responsible for ensuring the slot is inactive, and will
683  * not be activated.
684  */
685 void edma_free_slot(unsigned slot)
686 {
687         unsigned ctlr;
688
689         ctlr = EDMA_CTLR(slot);
690         slot = EDMA_CHAN_SLOT(slot);
691
692         if (slot < edma_info[ctlr]->num_channels ||
693                 slot >= edma_info[ctlr]->num_slots)
694                 return;
695
696         memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
697                         &dummy_paramset, PARM_SIZE);
698         clear_bit(slot, edma_info[ctlr]->edma_inuse);
699 }
700 EXPORT_SYMBOL(edma_free_slot);
701
702 /*-----------------------------------------------------------------------*/
703
704 /* Parameter RAM operations (i) -- read/write partial slots */
705
706 /**
707  * edma_set_src - set initial DMA source address in parameter RAM slot
708  * @slot: parameter RAM slot being configured
709  * @src_port: physical address of source (memory, controller FIFO, etc)
710  * @addressMode: INCR, except in very rare cases
711  * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
712  *      width to use when addressing the fifo (e.g. W8BIT, W32BIT)
713  *
714  * Note that the source address is modified during the DMA transfer
715  * according to edma_set_src_index().
716  */
717 void edma_set_src(unsigned slot, dma_addr_t src_port,
718                                 enum address_mode mode, enum fifo_width width)
719 {
720         unsigned ctlr;
721
722         ctlr = EDMA_CTLR(slot);
723         slot = EDMA_CHAN_SLOT(slot);
724
725         if (slot < edma_info[ctlr]->num_slots) {
726                 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
727
728                 if (mode) {
729                         /* set SAM and program FWID */
730                         i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
731                 } else {
732                         /* clear SAM */
733                         i &= ~SAM;
734                 }
735                 edma_parm_write(ctlr, PARM_OPT, slot, i);
736
737                 /* set the source port address
738                    in source register of param structure */
739                 edma_parm_write(ctlr, PARM_SRC, slot, src_port);
740         }
741 }
742 EXPORT_SYMBOL(edma_set_src);
743
744 /**
745  * edma_set_dest - set initial DMA destination address in parameter RAM slot
746  * @slot: parameter RAM slot being configured
747  * @dest_port: physical address of destination (memory, controller FIFO, etc)
748  * @addressMode: INCR, except in very rare cases
749  * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
750  *      width to use when addressing the fifo (e.g. W8BIT, W32BIT)
751  *
752  * Note that the destination address is modified during the DMA transfer
753  * according to edma_set_dest_index().
754  */
755 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
756                                  enum address_mode mode, enum fifo_width width)
757 {
758         unsigned ctlr;
759
760         ctlr = EDMA_CTLR(slot);
761         slot = EDMA_CHAN_SLOT(slot);
762
763         if (slot < edma_info[ctlr]->num_slots) {
764                 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
765
766                 if (mode) {
767                         /* set DAM and program FWID */
768                         i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
769                 } else {
770                         /* clear DAM */
771                         i &= ~DAM;
772                 }
773                 edma_parm_write(ctlr, PARM_OPT, slot, i);
774                 /* set the destination port address
775                    in dest register of param structure */
776                 edma_parm_write(ctlr, PARM_DST, slot, dest_port);
777         }
778 }
779 EXPORT_SYMBOL(edma_set_dest);
780
781 /**
782  * edma_get_position - returns the current transfer points
783  * @slot: parameter RAM slot being examined
784  * @src: pointer to source port position
785  * @dst: pointer to destination port position
786  *
787  * Returns current source and destination addresses for a particular
788  * parameter RAM slot.  Its channel should not be active when this is called.
789  */
790 void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
791 {
792         struct edmacc_param temp;
793         unsigned ctlr;
794
795         ctlr = EDMA_CTLR(slot);
796         slot = EDMA_CHAN_SLOT(slot);
797
798         edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
799         if (src != NULL)
800                 *src = temp.src;
801         if (dst != NULL)
802                 *dst = temp.dst;
803 }
804 EXPORT_SYMBOL(edma_get_position);
805
806 /**
807  * edma_set_src_index - configure DMA source address indexing
808  * @slot: parameter RAM slot being configured
809  * @src_bidx: byte offset between source arrays in a frame
810  * @src_cidx: byte offset between source frames in a block
811  *
812  * Offsets are specified to support either contiguous or discontiguous
813  * memory transfers, or repeated access to a hardware register, as needed.
814  * When accessing hardware registers, both offsets are normally zero.
815  */
816 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
817 {
818         unsigned ctlr;
819
820         ctlr = EDMA_CTLR(slot);
821         slot = EDMA_CHAN_SLOT(slot);
822
823         if (slot < edma_info[ctlr]->num_slots) {
824                 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
825                                 0xffff0000, src_bidx);
826                 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
827                                 0xffff0000, src_cidx);
828         }
829 }
830 EXPORT_SYMBOL(edma_set_src_index);
831
832 /**
833  * edma_set_dest_index - configure DMA destination address indexing
834  * @slot: parameter RAM slot being configured
835  * @dest_bidx: byte offset between destination arrays in a frame
836  * @dest_cidx: byte offset between destination frames in a block
837  *
838  * Offsets are specified to support either contiguous or discontiguous
839  * memory transfers, or repeated access to a hardware register, as needed.
840  * When accessing hardware registers, both offsets are normally zero.
841  */
842 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
843 {
844         unsigned ctlr;
845
846         ctlr = EDMA_CTLR(slot);
847         slot = EDMA_CHAN_SLOT(slot);
848
849         if (slot < edma_info[ctlr]->num_slots) {
850                 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
851                                 0x0000ffff, dest_bidx << 16);
852                 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
853                                 0x0000ffff, dest_cidx << 16);
854         }
855 }
856 EXPORT_SYMBOL(edma_set_dest_index);
857
858 /**
859  * edma_set_transfer_params - configure DMA transfer parameters
860  * @slot: parameter RAM slot being configured
861  * @acnt: how many bytes per array (at least one)
862  * @bcnt: how many arrays per frame (at least one)
863  * @ccnt: how many frames per block (at least one)
864  * @bcnt_rld: used only for A-Synchronized transfers; this specifies
865  *      the value to reload into bcnt when it decrements to zero
866  * @sync_mode: ASYNC or ABSYNC
867  *
868  * See the EDMA3 documentation to understand how to configure and link
869  * transfers using the fields in PaRAM slots.  If you are not doing it
870  * all at once with edma_write_slot(), you will use this routine
871  * plus two calls each for source and destination, setting the initial
872  * address and saying how to index that address.
873  *
874  * An example of an A-Synchronized transfer is a serial link using a
875  * single word shift register.  In that case, @acnt would be equal to
876  * that word size; the serial controller issues a DMA synchronization
877  * event to transfer each word, and memory access by the DMA transfer
878  * controller will be word-at-a-time.
879  *
880  * An example of an AB-Synchronized transfer is a device using a FIFO.
881  * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
882  * The controller with the FIFO issues DMA synchronization events when
883  * the FIFO threshold is reached, and the DMA transfer controller will
884  * transfer one frame to (or from) the FIFO.  It will probably use
885  * efficient burst modes to access memory.
886  */
887 void edma_set_transfer_params(unsigned slot,
888                 u16 acnt, u16 bcnt, u16 ccnt,
889                 u16 bcnt_rld, enum sync_dimension sync_mode)
890 {
891         unsigned ctlr;
892
893         ctlr = EDMA_CTLR(slot);
894         slot = EDMA_CHAN_SLOT(slot);
895
896         if (slot < edma_info[ctlr]->num_slots) {
897                 edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
898                                 0x0000ffff, bcnt_rld << 16);
899                 if (sync_mode == ASYNC)
900                         edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
901                 else
902                         edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
903                 /* Set the acount, bcount, ccount registers */
904                 edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
905                 edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
906         }
907 }
908 EXPORT_SYMBOL(edma_set_transfer_params);
909
910 /**
911  * edma_link - link one parameter RAM slot to another
912  * @from: parameter RAM slot originating the link
913  * @to: parameter RAM slot which is the link target
914  *
915  * The originating slot should not be part of any active DMA transfer.
916  */
917 void edma_link(unsigned from, unsigned to)
918 {
919         unsigned ctlr_from, ctlr_to;
920
921         ctlr_from = EDMA_CTLR(from);
922         from = EDMA_CHAN_SLOT(from);
923         ctlr_to = EDMA_CTLR(to);
924         to = EDMA_CHAN_SLOT(to);
925
926         if (from >= edma_info[ctlr_from]->num_slots)
927                 return;
928         if (to >= edma_info[ctlr_to]->num_slots)
929                 return;
930         edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
931                                 PARM_OFFSET(to));
932 }
933 EXPORT_SYMBOL(edma_link);
934
935 /**
936  * edma_unlink - cut link from one parameter RAM slot
937  * @from: parameter RAM slot originating the link
938  *
939  * The originating slot should not be part of any active DMA transfer.
940  * Its link is set to 0xffff.
941  */
942 void edma_unlink(unsigned from)
943 {
944         unsigned ctlr;
945
946         ctlr = EDMA_CTLR(from);
947         from = EDMA_CHAN_SLOT(from);
948
949         if (from >= edma_info[ctlr]->num_slots)
950                 return;
951         edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
952 }
953 EXPORT_SYMBOL(edma_unlink);
954
955 /*-----------------------------------------------------------------------*/
956
957 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
958
959 /**
960  * edma_write_slot - write parameter RAM data for slot
961  * @slot: number of parameter RAM slot being modified
962  * @param: data to be written into parameter RAM slot
963  *
964  * Use this to assign all parameters of a transfer at once.  This
965  * allows more efficient setup of transfers than issuing multiple
966  * calls to set up those parameters in small pieces, and provides
967  * complete control over all transfer options.
968  */
969 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
970 {
971         unsigned ctlr;
972
973         ctlr = EDMA_CTLR(slot);
974         slot = EDMA_CHAN_SLOT(slot);
975
976         if (slot >= edma_info[ctlr]->num_slots)
977                 return;
978         memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
979                         PARM_SIZE);
980 }
981 EXPORT_SYMBOL(edma_write_slot);
982
983 /**
984  * edma_read_slot - read parameter RAM data from slot
985  * @slot: number of parameter RAM slot being copied
986  * @param: where to store copy of parameter RAM data
987  *
988  * Use this to read data from a parameter RAM slot, perhaps to
989  * save them as a template for later reuse.
990  */
991 void edma_read_slot(unsigned slot, struct edmacc_param *param)
992 {
993         unsigned ctlr;
994
995         ctlr = EDMA_CTLR(slot);
996         slot = EDMA_CHAN_SLOT(slot);
997
998         if (slot >= edma_info[ctlr]->num_slots)
999                 return;
1000         memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1001                         PARM_SIZE);
1002 }
1003 EXPORT_SYMBOL(edma_read_slot);
1004
1005 /*-----------------------------------------------------------------------*/
1006
1007 /* Various EDMA channel control operations */
1008
1009 /**
1010  * edma_pause - pause dma on a channel
1011  * @channel: on which edma_start() has been called
1012  *
1013  * This temporarily disables EDMA hardware events on the specified channel,
1014  * preventing them from triggering new transfers on its behalf
1015  */
1016 void edma_pause(unsigned channel)
1017 {
1018         unsigned ctlr;
1019
1020         ctlr = EDMA_CTLR(channel);
1021         channel = EDMA_CHAN_SLOT(channel);
1022
1023         if (channel < edma_info[ctlr]->num_channels) {
1024                 unsigned int mask = (1 << (channel & 0x1f));
1025
1026                 edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1027         }
1028 }
1029 EXPORT_SYMBOL(edma_pause);
1030
1031 /**
1032  * edma_resume - resumes dma on a paused channel
1033  * @channel: on which edma_pause() has been called
1034  *
1035  * This re-enables EDMA hardware events on the specified channel.
1036  */
1037 void edma_resume(unsigned channel)
1038 {
1039         unsigned ctlr;
1040
1041         ctlr = EDMA_CTLR(channel);
1042         channel = EDMA_CHAN_SLOT(channel);
1043
1044         if (channel < edma_info[ctlr]->num_channels) {
1045                 unsigned int mask = (1 << (channel & 0x1f));
1046
1047                 edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1048         }
1049 }
1050 EXPORT_SYMBOL(edma_resume);
1051
1052 /**
1053  * edma_start - start dma on a channel
1054  * @channel: channel being activated
1055  *
1056  * Channels with event associations will be triggered by their hardware
1057  * events, and channels without such associations will be triggered by
1058  * software.  (At this writing there is no interface for using software
1059  * triggers except with channels that don't support hardware triggers.)
1060  *
1061  * Returns zero on success, else negative errno.
1062  */
1063 int edma_start(unsigned channel)
1064 {
1065         unsigned ctlr;
1066
1067         ctlr = EDMA_CTLR(channel);
1068         channel = EDMA_CHAN_SLOT(channel);
1069
1070         if (channel < edma_info[ctlr]->num_channels) {
1071                 int j = channel >> 5;
1072                 unsigned int mask = (1 << (channel & 0x1f));
1073
1074                 /* EDMA channels without event association */
1075                 if (test_bit(channel, edma_info[ctlr]->edma_noevent)) {
1076                         pr_debug("EDMA: ESR%d %08x\n", j,
1077                                 edma_shadow0_read_array(ctlr, SH_ESR, j));
1078                         edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1079                         return 0;
1080                 }
1081
1082                 /* EDMA channel with event association */
1083                 pr_debug("EDMA: ER%d %08x\n", j,
1084                         edma_shadow0_read_array(ctlr, SH_ER, j));
1085                 /* Clear any pending error */
1086                 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1087                 /* Clear any SER */
1088                 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1089                 edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1090                 pr_debug("EDMA: EER%d %08x\n", j,
1091                         edma_shadow0_read_array(ctlr, SH_EER, j));
1092                 return 0;
1093         }
1094
1095         return -EINVAL;
1096 }
1097 EXPORT_SYMBOL(edma_start);
1098
1099 /**
1100  * edma_stop - stops dma on the channel passed
1101  * @channel: channel being deactivated
1102  *
1103  * When @lch is a channel, any active transfer is paused and
1104  * all pending hardware events are cleared.  The current transfer
1105  * may not be resumed, and the channel's Parameter RAM should be
1106  * reinitialized before being reused.
1107  */
1108 void edma_stop(unsigned channel)
1109 {
1110         unsigned ctlr;
1111
1112         ctlr = EDMA_CTLR(channel);
1113         channel = EDMA_CHAN_SLOT(channel);
1114
1115         if (channel < edma_info[ctlr]->num_channels) {
1116                 int j = channel >> 5;
1117                 unsigned int mask = (1 << (channel & 0x1f));
1118
1119                 edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1120                 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1121                 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1122                 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1123
1124                 pr_debug("EDMA: EER%d %08x\n", j,
1125                                 edma_shadow0_read_array(ctlr, SH_EER, j));
1126
1127                 /* REVISIT:  consider guarding against inappropriate event
1128                  * chaining by overwriting with dummy_paramset.
1129                  */
1130         }
1131 }
1132 EXPORT_SYMBOL(edma_stop);
1133
1134 /******************************************************************************
1135  *
1136  * It cleans ParamEntry qand bring back EDMA to initial state if media has
1137  * been removed before EDMA has finished.It is usedful for removable media.
1138  * Arguments:
1139  *      ch_no     - channel no
1140  *
1141  * Return: zero on success, or corresponding error no on failure
1142  *
1143  * FIXME this should not be needed ... edma_stop() should suffice.
1144  *
1145  *****************************************************************************/
1146
1147 void edma_clean_channel(unsigned channel)
1148 {
1149         unsigned ctlr;
1150
1151         ctlr = EDMA_CTLR(channel);
1152         channel = EDMA_CHAN_SLOT(channel);
1153
1154         if (channel < edma_info[ctlr]->num_channels) {
1155                 int j = (channel >> 5);
1156                 unsigned int mask = 1 << (channel & 0x1f);
1157
1158                 pr_debug("EDMA: EMR%d %08x\n", j,
1159                                 edma_read_array(ctlr, EDMA_EMR, j));
1160                 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1161                 /* Clear the corresponding EMR bits */
1162                 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1163                 /* Clear any SER */
1164                 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1165                 edma_write(ctlr, EDMA_CCERRCLR, (1 << 16) | 0x3);
1166         }
1167 }
1168 EXPORT_SYMBOL(edma_clean_channel);
1169
1170 /*
1171  * edma_clear_event - clear an outstanding event on the DMA channel
1172  * Arguments:
1173  *      channel - channel number
1174  */
1175 void edma_clear_event(unsigned channel)
1176 {
1177         unsigned ctlr;
1178
1179         ctlr = EDMA_CTLR(channel);
1180         channel = EDMA_CHAN_SLOT(channel);
1181
1182         if (channel >= edma_info[ctlr]->num_channels)
1183                 return;
1184         if (channel < 32)
1185                 edma_write(ctlr, EDMA_ECR, 1 << channel);
1186         else
1187                 edma_write(ctlr, EDMA_ECRH, 1 << (channel - 32));
1188 }
1189 EXPORT_SYMBOL(edma_clear_event);
1190
1191 /*-----------------------------------------------------------------------*/
1192
1193 static int __init edma_probe(struct platform_device *pdev)
1194 {
1195         struct edma_soc_info    *info = pdev->dev.platform_data;
1196         const s8                (*queue_priority_mapping)[2];
1197         const s8                (*queue_tc_mapping)[2];
1198         int                     i, j, found = 0;
1199         int                     status = -1;
1200         const s8                *noevent;
1201         int                     irq[EDMA_MAX_CC] = {0, 0};
1202         int                     err_irq[EDMA_MAX_CC] = {0, 0};
1203         struct resource         *r[EDMA_MAX_CC] = {NULL};
1204         resource_size_t         len[EDMA_MAX_CC];
1205         char                    res_name[10];
1206         char                    irq_name[10];
1207
1208         if (!info)
1209                 return -ENODEV;
1210
1211         for (j = 0; j < EDMA_MAX_CC; j++) {
1212                 sprintf(res_name, "edma_cc%d", j);
1213                 r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1214                                                 res_name);
1215                 if (!r[j]) {
1216                         if (found)
1217                                 break;
1218                         else
1219                                 return -ENODEV;
1220                 } else
1221                         found = 1;
1222
1223                 len[j] = resource_size(r[j]);
1224
1225                 r[j] = request_mem_region(r[j]->start, len[j],
1226                         dev_name(&pdev->dev));
1227                 if (!r[j]) {
1228                         status = -EBUSY;
1229                         goto fail1;
1230                 }
1231
1232                 edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
1233                 if (!edmacc_regs_base[j]) {
1234                         status = -EBUSY;
1235                         goto fail1;
1236                 }
1237
1238                 edma_info[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
1239                 if (!edma_info[j]) {
1240                         status = -ENOMEM;
1241                         goto fail1;
1242                 }
1243                 memset(edma_info[j], 0, sizeof(struct edma));
1244
1245                 edma_info[j]->num_channels = min_t(unsigned, info[j].n_channel,
1246                                                         EDMA_MAX_DMACH);
1247                 edma_info[j]->num_slots = min_t(unsigned, info[j].n_slot,
1248                                                         EDMA_MAX_PARAMENTRY);
1249                 edma_info[j]->num_cc = min_t(unsigned, info[j].n_cc,
1250                                                         EDMA_MAX_CC);
1251
1252                 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1253                         edmacc_regs_base[j]);
1254
1255                 for (i = 0; i < edma_info[j]->num_slots; i++)
1256                         memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1257                                         &dummy_paramset, PARM_SIZE);
1258
1259                 noevent = info[j].noevent;
1260                 if (noevent) {
1261                         while (*noevent != -1)
1262                                 set_bit(*noevent++, edma_info[j]->edma_noevent);
1263                 }
1264
1265                 sprintf(irq_name, "edma%d", j);
1266                 irq[j] = platform_get_irq_byname(pdev, irq_name);
1267                 edma_info[j]->irq_res_start = irq[j];
1268                 status = request_irq(irq[j], dma_irq_handler, 0, "edma",
1269                                         &pdev->dev);
1270                 if (status < 0) {
1271                         dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1272                                 irq[j], status);
1273                         goto fail;
1274                 }
1275
1276                 sprintf(irq_name, "edma%d_err", j);
1277                 err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1278                 edma_info[j]->irq_res_end = err_irq[j];
1279                 status = request_irq(err_irq[j], dma_ccerr_handler, 0,
1280                                         "edma_error", &pdev->dev);
1281                 if (status < 0) {
1282                         dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1283                                 err_irq[j], status);
1284                         goto fail;
1285                 }
1286
1287                 /* Everything lives on transfer controller 1 until otherwise
1288                  * specified. This way, long transfers on the low priority queue
1289                  * started by the codec engine will not cause audio defects.
1290                  */
1291                 for (i = 0; i < edma_info[j]->num_channels; i++)
1292                         map_dmach_queue(j, i, EVENTQ_1);
1293
1294                 queue_tc_mapping = info[j].queue_tc_mapping;
1295                 queue_priority_mapping = info[j].queue_priority_mapping;
1296
1297                 /* Event queue to TC mapping */
1298                 for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1299                         map_queue_tc(j, queue_tc_mapping[i][0],
1300                                         queue_tc_mapping[i][1]);
1301
1302                 /* Event queue priority mapping */
1303                 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1304                         assign_priority_to_queue(j,
1305                                                 queue_priority_mapping[i][0],
1306                                                 queue_priority_mapping[i][1]);
1307
1308                 /* Map the channel to param entry if channel mapping logic
1309                  * exist
1310                  */
1311                 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1312                         map_dmach_param(j);
1313
1314                 for (i = 0; i < info[j].n_region; i++) {
1315                         edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1316                         edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1317                         edma_write_array(j, EDMA_QRAE, i, 0x0);
1318                 }
1319         }
1320
1321         if (tc_errs_handled) {
1322                 status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
1323                                         "edma_tc0", &pdev->dev);
1324                 if (status < 0) {
1325                         dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1326                                 IRQ_TCERRINT0, status);
1327                         return status;
1328                 }
1329                 status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
1330                                         "edma_tc1", &pdev->dev);
1331                 if (status < 0) {
1332                         dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
1333                                 IRQ_TCERRINT, status);
1334                         return status;
1335                 }
1336         }
1337
1338         return 0;
1339
1340 fail:
1341         for (i = 0; i < EDMA_MAX_CC; i++) {
1342                 if (err_irq[i])
1343                         free_irq(err_irq[i], &pdev->dev);
1344                 if (irq[i])
1345                         free_irq(irq[i], &pdev->dev);
1346         }
1347 fail1:
1348         for (i = 0; i < EDMA_MAX_CC; i++) {
1349                 if (r[i])
1350                         release_mem_region(r[i]->start, len[i]);
1351                 if (edmacc_regs_base[i])
1352                         iounmap(edmacc_regs_base[i]);
1353                 kfree(edma_info[i]);
1354         }
1355         return status;
1356 }
1357
1358
1359 static struct platform_driver edma_driver = {
1360         .driver.name    = "edma",
1361 };
1362
1363 static int __init edma_init(void)
1364 {
1365         return platform_driver_probe(&edma_driver, edma_probe);
1366 }
1367 arch_initcall(edma_init);
1368