2 * timb_dma.c timberdale FPGA DMA driver
3 * Copyright (c) 2010 Intel Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 * Timberdale FPGA DMA engine
23 #include <linux/dmaengine.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/platform_device.h>
31 #include <linux/timb_dma.h>
33 #define DRIVER_NAME "timb-dma"
35 /* Global DMA registers */
36 #define TIMBDMA_ACR 0x34
37 #define TIMBDMA_32BIT_ADDR 0x01
39 #define TIMBDMA_ISR 0x080000
40 #define TIMBDMA_IPR 0x080004
41 #define TIMBDMA_IER 0x080008
43 /* Channel specific registers */
44 /* RX instances base addresses are 0x00, 0x40, 0x80 ...
45 * TX instances base addresses are 0x18, 0x58, 0x98 ...
47 #define TIMBDMA_INSTANCE_OFFSET 0x40
48 #define TIMBDMA_INSTANCE_TX_OFFSET 0x18
50 /* RX registers, relative the instance base */
51 #define TIMBDMA_OFFS_RX_DHAR 0x00
52 #define TIMBDMA_OFFS_RX_DLAR 0x04
53 #define TIMBDMA_OFFS_RX_LR 0x0C
54 #define TIMBDMA_OFFS_RX_BLR 0x10
55 #define TIMBDMA_OFFS_RX_ER 0x14
56 #define TIMBDMA_RX_EN 0x01
57 /* bytes per Row, video specific register
58 * which is placed after the TX registers...
60 #define TIMBDMA_OFFS_RX_BPRR 0x30
62 /* TX registers, relative the instance base */
63 #define TIMBDMA_OFFS_TX_DHAR 0x00
64 #define TIMBDMA_OFFS_TX_DLAR 0x04
65 #define TIMBDMA_OFFS_TX_BLR 0x0C
66 #define TIMBDMA_OFFS_TX_LR 0x14
69 #define TIMB_DMA_DESC_SIZE 8
71 struct timb_dma_desc {
72 struct list_head desc_node;
73 struct dma_async_tx_descriptor txd;
75 unsigned int desc_list_len;
79 struct timb_dma_chan {
81 void __iomem *membase;
82 spinlock_t lock; /* Used for mutual exclusion */
83 dma_cookie_t last_completed_cookie;
85 struct list_head active_list;
86 struct list_head queue;
87 struct list_head free_list;
88 unsigned int bytes_per_line;
89 enum dma_data_direction direction;
90 unsigned int descs; /* Descriptors to allocate */
91 unsigned int desc_elems; /* number of elems per descriptor */
95 struct dma_device dma;
96 void __iomem *membase;
97 struct tasklet_struct tasklet;
98 struct timb_dma_chan channels[0];
101 static struct device *chan2dev(struct dma_chan *chan)
103 return &chan->dev->device;
105 static struct device *chan2dmadev(struct dma_chan *chan)
107 return chan2dev(chan)->parent->parent;
110 static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
112 int id = td_chan->chan.chan_id;
113 return (struct timb_dma *)((u8 *)td_chan -
114 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
117 /* Must be called with the spinlock held */
118 static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
120 int id = td_chan->chan.chan_id;
121 struct timb_dma *td = tdchantotd(td_chan);
124 /* enable interrupt for this channel */
125 ier = ioread32(td->membase + TIMBDMA_IER);
127 dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
129 iowrite32(ier, td->membase + TIMBDMA_IER);
132 /* Should be called with the spinlock held */
133 static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
135 int id = td_chan->chan.chan_id;
136 struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
137 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
141 dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
143 isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
145 iowrite32(isr, td->membase + TIMBDMA_ISR);
152 static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
158 addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
161 len = (dma_desc[3] << 8) | dma_desc[2];
164 dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
167 dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
171 static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
173 struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
174 struct timb_dma_chan, chan);
177 for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
178 __td_unmap_desc(td_chan, descs, single);
184 static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
185 struct scatterlist *sg, bool last)
187 if (sg_dma_len(sg) > USHORT_MAX) {
188 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
192 /* length must be word aligned */
193 if (sg_dma_len(sg) % sizeof(u32)) {
194 dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
199 dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n",
200 dma_desc, (void *)(int)sg_dma_address(sg));
202 dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
203 dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
204 dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
205 dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
207 dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
208 dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
211 dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
216 /* Must be called with the spinlock held */
217 static void __td_start_dma(struct timb_dma_chan *td_chan)
219 struct timb_dma_desc *td_desc;
221 if (td_chan->ongoing) {
222 dev_err(chan2dev(&td_chan->chan),
223 "Transfer already ongoing\n");
227 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
230 dev_dbg(chan2dev(&td_chan->chan),
231 "td_chan: %p, chan: %d, membase: %p\n",
232 td_chan, td_chan->chan.chan_id, td_chan->membase);
234 if (td_chan->direction == DMA_FROM_DEVICE) {
236 /* descriptor address */
237 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
238 iowrite32(td_desc->txd.phys, td_chan->membase +
239 TIMBDMA_OFFS_RX_DLAR);
241 iowrite32(td_chan->bytes_per_line, td_chan->membase +
242 TIMBDMA_OFFS_RX_BPRR);
244 iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
247 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
248 iowrite32(td_desc->txd.phys, td_chan->membase +
249 TIMBDMA_OFFS_TX_DLAR);
252 td_chan->ongoing = true;
254 if (td_desc->interrupt)
255 __td_enable_chan_irq(td_chan);
258 static void __td_finish(struct timb_dma_chan *td_chan)
260 dma_async_tx_callback callback;
262 struct dma_async_tx_descriptor *txd;
263 struct timb_dma_desc *td_desc;
265 /* can happen if the descriptor is canceled */
266 if (list_empty(&td_chan->active_list))
269 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
273 dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
276 /* make sure to stop the transfer */
277 if (td_chan->direction == DMA_FROM_DEVICE)
278 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
279 /* Currently no support for stopping DMA transfers
281 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
283 td_chan->last_completed_cookie = txd->cookie;
284 td_chan->ongoing = false;
286 callback = txd->callback;
287 param = txd->callback_param;
289 list_move(&td_desc->desc_node, &td_chan->free_list);
291 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
292 __td_unmap_descs(td_desc,
293 txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
296 * The API requires that no submissions are done from a
297 * callback, so we don't need to drop the lock here
303 static u32 __td_ier_mask(struct timb_dma *td)
308 for (i = 0; i < td->dma.chancnt; i++) {
309 struct timb_dma_chan *td_chan = td->channels + i;
310 if (td_chan->ongoing) {
311 struct timb_dma_desc *td_desc =
312 list_entry(td_chan->active_list.next,
313 struct timb_dma_desc, desc_node);
314 if (td_desc->interrupt)
322 static void __td_start_next(struct timb_dma_chan *td_chan)
324 struct timb_dma_desc *td_desc;
326 BUG_ON(list_empty(&td_chan->queue));
327 BUG_ON(td_chan->ongoing);
329 td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
332 dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
333 __func__, td_desc->txd.cookie);
335 list_move(&td_desc->desc_node, &td_chan->active_list);
336 __td_start_dma(td_chan);
339 static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
341 struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
343 struct timb_dma_chan *td_chan = container_of(txd->chan,
344 struct timb_dma_chan, chan);
347 spin_lock_bh(&td_chan->lock);
349 cookie = txd->chan->cookie;
352 txd->chan->cookie = cookie;
353 txd->cookie = cookie;
355 if (list_empty(&td_chan->active_list)) {
356 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
358 list_add_tail(&td_desc->desc_node, &td_chan->active_list);
359 __td_start_dma(td_chan);
361 dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
364 list_add_tail(&td_desc->desc_node, &td_chan->queue);
367 spin_unlock_bh(&td_chan->lock);
372 static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
374 struct dma_chan *chan = &td_chan->chan;
375 struct timb_dma_desc *td_desc;
378 td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
380 dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
384 td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
386 td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
387 if (!td_desc->desc_list) {
388 dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
392 dma_async_tx_descriptor_init(&td_desc->txd, chan);
393 td_desc->txd.tx_submit = td_tx_submit;
394 td_desc->txd.flags = DMA_CTRL_ACK;
396 td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
397 td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
399 err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
401 dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
407 kfree(td_desc->desc_list);
414 static void td_free_desc(struct timb_dma_desc *td_desc)
416 dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
417 dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
418 td_desc->desc_list_len, DMA_TO_DEVICE);
420 kfree(td_desc->desc_list);
424 static void td_desc_put(struct timb_dma_chan *td_chan,
425 struct timb_dma_desc *td_desc)
427 dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
429 spin_lock_bh(&td_chan->lock);
430 list_add(&td_desc->desc_node, &td_chan->free_list);
431 spin_unlock_bh(&td_chan->lock);
434 static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
436 struct timb_dma_desc *td_desc, *_td_desc;
437 struct timb_dma_desc *ret = NULL;
439 spin_lock_bh(&td_chan->lock);
440 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
442 if (async_tx_test_ack(&td_desc->txd)) {
443 list_del(&td_desc->desc_node);
447 dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
450 spin_unlock_bh(&td_chan->lock);
455 static int td_alloc_chan_resources(struct dma_chan *chan)
457 struct timb_dma_chan *td_chan =
458 container_of(chan, struct timb_dma_chan, chan);
461 dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
463 BUG_ON(!list_empty(&td_chan->free_list));
464 for (i = 0; i < td_chan->descs; i++) {
465 struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
470 dev_err(chan2dev(chan),
471 "Couldnt allocate any descriptors\n");
476 td_desc_put(td_chan, td_desc);
479 spin_lock_bh(&td_chan->lock);
480 td_chan->last_completed_cookie = 1;
482 spin_unlock_bh(&td_chan->lock);
487 static void td_free_chan_resources(struct dma_chan *chan)
489 struct timb_dma_chan *td_chan =
490 container_of(chan, struct timb_dma_chan, chan);
491 struct timb_dma_desc *td_desc, *_td_desc;
494 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
496 /* check that all descriptors are free */
497 BUG_ON(!list_empty(&td_chan->active_list));
498 BUG_ON(!list_empty(&td_chan->queue));
500 spin_lock_bh(&td_chan->lock);
501 list_splice_init(&td_chan->free_list, &list);
502 spin_unlock_bh(&td_chan->lock);
504 list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
505 dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
507 td_free_desc(td_desc);
511 static enum dma_status td_is_tx_complete(struct dma_chan *chan,
512 dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
514 struct timb_dma_chan *td_chan =
515 container_of(chan, struct timb_dma_chan, chan);
516 dma_cookie_t last_used;
517 dma_cookie_t last_complete;
520 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
522 last_complete = td_chan->last_completed_cookie;
523 last_used = chan->cookie;
525 ret = dma_async_is_complete(cookie, last_complete, last_used);
528 *done = last_complete;
532 dev_dbg(chan2dev(chan),
533 "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
534 __func__, ret, last_complete, last_used);
539 static void td_issue_pending(struct dma_chan *chan)
541 struct timb_dma_chan *td_chan =
542 container_of(chan, struct timb_dma_chan, chan);
544 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
545 spin_lock_bh(&td_chan->lock);
547 if (!list_empty(&td_chan->active_list))
548 /* transfer ongoing */
549 if (__td_dma_done_ack(td_chan))
550 __td_finish(td_chan);
552 if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
553 __td_start_next(td_chan);
555 spin_unlock_bh(&td_chan->lock);
558 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
559 struct scatterlist *sgl, unsigned int sg_len,
560 enum dma_data_direction direction, unsigned long flags)
562 struct timb_dma_chan *td_chan =
563 container_of(chan, struct timb_dma_chan, chan);
564 struct timb_dma_desc *td_desc;
565 struct scatterlist *sg;
567 unsigned int desc_usage = 0;
569 if (!sgl || !sg_len) {
570 dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
574 /* even channels are for RX, odd for TX */
575 if (td_chan->direction != direction) {
576 dev_err(chan2dev(chan),
577 "Requesting channel in wrong direction\n");
581 td_desc = td_desc_get(td_chan);
583 dev_err(chan2dev(chan), "Not enough descriptors available\n");
587 td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
589 for_each_sg(sgl, sg, sg_len, i) {
591 if (desc_usage > td_desc->desc_list_len) {
592 dev_err(chan2dev(chan), "No descriptor space\n");
596 err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
599 dev_err(chan2dev(chan), "Failed to update desc: %d\n",
601 td_desc_put(td_chan, td_desc);
604 desc_usage += TIMB_DMA_DESC_SIZE;
607 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
608 td_desc->desc_list_len, DMA_TO_DEVICE);
610 return &td_desc->txd;
613 static void td_terminate_all(struct dma_chan *chan)
615 struct timb_dma_chan *td_chan =
616 container_of(chan, struct timb_dma_chan, chan);
617 struct timb_dma_desc *td_desc, *_td_desc;
619 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
621 /* first the easy part, put the queue into the free list */
622 spin_lock_bh(&td_chan->lock);
623 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
625 list_move(&td_desc->desc_node, &td_chan->free_list);
627 /* now tear down the runnning */
628 __td_finish(td_chan);
629 spin_unlock_bh(&td_chan->lock);
632 static void td_tasklet(unsigned long data)
634 struct timb_dma *td = (struct timb_dma *)data;
640 isr = ioread32(td->membase + TIMBDMA_ISR);
641 ipr = isr & __td_ier_mask(td);
643 /* ack the interrupts */
644 iowrite32(ipr, td->membase + TIMBDMA_ISR);
646 for (i = 0; i < td->dma.chancnt; i++)
647 if (ipr & (1 << i)) {
648 struct timb_dma_chan *td_chan = td->channels + i;
649 spin_lock(&td_chan->lock);
650 __td_finish(td_chan);
651 if (!list_empty(&td_chan->queue))
652 __td_start_next(td_chan);
653 spin_unlock(&td_chan->lock);
656 ier = __td_ier_mask(td);
657 iowrite32(ier, td->membase + TIMBDMA_IER);
661 static irqreturn_t td_irq(int irq, void *devid)
663 struct timb_dma *td = devid;
664 u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
667 /* disable interrupts, will be re-enabled in tasklet */
668 iowrite32(0, td->membase + TIMBDMA_IER);
670 tasklet_schedule(&td->tasklet);
678 static int __devinit td_probe(struct platform_device *pdev)
680 struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
682 struct resource *iomem;
688 dev_err(&pdev->dev, "No platform data\n");
692 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
696 irq = platform_get_irq(pdev, 0);
700 if (!request_mem_region(iomem->start, resource_size(iomem),
704 td = kzalloc(sizeof(struct timb_dma) +
705 sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
708 goto err_release_region;
711 dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
713 td->membase = ioremap(iomem->start, resource_size(iomem));
715 dev_err(&pdev->dev, "Failed to remap I/O memory\n");
720 /* 32bit addressing */
721 iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
723 /* disable and clear any interrupts */
724 iowrite32(0x0, td->membase + TIMBDMA_IER);
725 iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
727 tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
729 err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
731 dev_err(&pdev->dev, "Failed to request IRQ\n");
732 goto err_tasklet_kill;
735 td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
736 td->dma.device_free_chan_resources = td_free_chan_resources;
737 td->dma.device_is_tx_complete = td_is_tx_complete;
738 td->dma.device_issue_pending = td_issue_pending;
740 dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
741 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
742 td->dma.device_prep_slave_sg = td_prep_slave_sg;
743 td->dma.device_terminate_all = td_terminate_all;
745 td->dma.dev = &pdev->dev;
747 INIT_LIST_HEAD(&td->dma.channels);
749 for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
750 struct timb_dma_chan *td_chan = &td->channels[i];
751 struct timb_dma_platform_data_channel *pchan =
754 /* even channels are RX, odd are TX */
755 if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
756 dev_err(&pdev->dev, "Wrong channel configuration\n");
758 goto err_tasklet_kill;
761 td_chan->chan.device = &td->dma;
762 td_chan->chan.cookie = 1;
763 td_chan->chan.chan_id = i;
764 spin_lock_init(&td_chan->lock);
765 INIT_LIST_HEAD(&td_chan->active_list);
766 INIT_LIST_HEAD(&td_chan->queue);
767 INIT_LIST_HEAD(&td_chan->free_list);
769 td_chan->descs = pchan->descriptors;
770 td_chan->desc_elems = pchan->descriptor_elements;
771 td_chan->bytes_per_line = pchan->bytes_per_line;
772 td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
775 td_chan->membase = td->membase +
776 (i / 2) * TIMBDMA_INSTANCE_OFFSET +
777 (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
779 dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
780 i, td_chan->membase);
782 list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
785 err = dma_async_device_register(&td->dma);
787 dev_err(&pdev->dev, "Failed to register async device\n");
791 platform_set_drvdata(pdev, td);
793 dev_dbg(&pdev->dev, "Probe result: %d\n", err);
799 tasklet_kill(&td->tasklet);
800 iounmap(td->membase);
804 release_mem_region(iomem->start, resource_size(iomem));
810 static int __devexit td_remove(struct platform_device *pdev)
812 struct timb_dma *td = platform_get_drvdata(pdev);
813 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
814 int irq = platform_get_irq(pdev, 0);
816 dma_async_device_unregister(&td->dma);
818 tasklet_kill(&td->tasklet);
819 iounmap(td->membase);
821 release_mem_region(iomem->start, resource_size(iomem));
823 platform_set_drvdata(pdev, NULL);
825 dev_dbg(&pdev->dev, "Removed...\n");
829 static struct platform_driver td_driver = {
832 .owner = THIS_MODULE,
835 .remove = __exit_p(td_remove),
838 static int __init td_init(void)
840 return platform_driver_register(&td_driver);
842 module_init(td_init);
844 static void __exit td_exit(void)
846 platform_driver_unregister(&td_driver);
848 module_exit(td_exit);
850 MODULE_LICENSE("GPL v2");
851 MODULE_DESCRIPTION("Timberdale DMA controller driver");
852 MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
853 MODULE_ALIAS("platform:"DRIVER_NAME);