of/dma: mpc512x_dma.c: Fix build failures
[safe/jmp/linux-2.6] / drivers / dma / at_hdmac.c
index 64dbf0c..bd5250e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 
 #include "at_hdmac_regs.h"
 
@@ -87,6 +88,7 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
        desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
        if (desc) {
                memset(desc, 0, sizeof(struct at_desc));
+               INIT_LIST_HEAD(&desc->tx_list);
                dma_async_tx_descriptor_init(&desc->txd, chan);
                /* txd.flags will be overwritten in prep functions */
                desc->txd.flags = DMA_CTRL_ACK;
@@ -98,7 +100,7 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
 }
 
 /**
- * atc_desc_get - get a unsused descriptor from free_list
+ * atc_desc_get - get an unused descriptor from free_list
  * @atchan: channel we want a new descriptor for
  */
 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
@@ -150,11 +152,11 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
                struct at_desc *child;
 
                spin_lock_bh(&atchan->lock);
-               list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+               list_for_each_entry(child, &desc->tx_list, desc_node)
                        dev_vdbg(chan2dev(&atchan->chan_common),
                                        "moving child desc %p to freelist\n",
                                        child);
-               list_splice_init(&desc->txd.tx_list, &atchan->free_list);
+               list_splice_init(&desc->tx_list, &atchan->free_list);
                dev_vdbg(chan2dev(&atchan->chan_common),
                         "moving desc %p to freelist\n", desc);
                list_add(&desc->desc_node, &atchan->free_list);
@@ -247,30 +249,33 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
        param = txd->callback_param;
 
        /* move children to free_list */
-       list_splice_init(&txd->tx_list, &atchan->free_list);
+       list_splice_init(&desc->tx_list, &atchan->free_list);
        /* move myself to free_list */
        list_move(&desc->desc_node, &atchan->free_list);
 
        /* unmap dma addresses */
-       if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-               if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
-                       dma_unmap_single(chan2parent(&atchan->chan_common),
-                                       desc->lli.daddr,
-                                       desc->len, DMA_FROM_DEVICE);
-               else
-                       dma_unmap_page(chan2parent(&atchan->chan_common),
-                                       desc->lli.daddr,
-                                       desc->len, DMA_FROM_DEVICE);
-       }
-       if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-               if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
-                       dma_unmap_single(chan2parent(&atchan->chan_common),
-                                       desc->lli.saddr,
-                                       desc->len, DMA_TO_DEVICE);
-               else
-                       dma_unmap_page(chan2parent(&atchan->chan_common),
-                                       desc->lli.saddr,
-                                       desc->len, DMA_TO_DEVICE);
+       if (!atchan->chan_common.private) {
+               struct device *parent = chan2parent(&atchan->chan_common);
+               if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+                       if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+                               dma_unmap_single(parent,
+                                               desc->lli.daddr,
+                                               desc->len, DMA_FROM_DEVICE);
+                       else
+                               dma_unmap_page(parent,
+                                               desc->lli.daddr,
+                                               desc->len, DMA_FROM_DEVICE);
+               }
+               if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+                       if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+                               dma_unmap_single(parent,
+                                               desc->lli.saddr,
+                                               desc->len, DMA_TO_DEVICE);
+                       else
+                               dma_unmap_page(parent,
+                                               desc->lli.saddr,
+                                               desc->len, DMA_TO_DEVICE);
+               }
        }
 
        /*
@@ -334,7 +339,7 @@ static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
                        /* This one is currently in progress */
                        return;
 
-               list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+               list_for_each_entry(child, &desc->tx_list, desc_node)
                        if (!(child->lli.ctrla & ATC_DONE))
                                /* Currently in progress */
                                return;
@@ -407,7 +412,7 @@ static void atc_handle_error(struct at_dma_chan *atchan)
        dev_crit(chan2dev(&atchan->chan_common),
                        "  cookie: %d\n", bad_desc->txd.cookie);
        atc_dump_lli(atchan, &bad_desc->lli);
-       list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
+       list_for_each_entry(child, &bad_desc->tx_list, desc_node)
                atc_dump_lli(atchan, &child->lli);
 
        /* Pretend the descriptor completed successfully */
@@ -587,7 +592,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                        prev->lli.dscr = desc->txd.phys;
                        /* insert the link descriptor to the LD ring */
                        list_add_tail(&desc->desc_node,
-                                       &first->txd.tx_list);
+                                       &first->tx_list);
                }
                prev = desc;
        }
@@ -608,31 +613,213 @@ err_desc_get:
        return NULL;
 }
 
+
 /**
- * atc_is_tx_complete - poll for transaction completion
+ * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: tx descriptor status flags
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned long flags)
+{
+       struct at_dma_chan      *atchan = to_at_dma_chan(chan);
+       struct at_dma_slave     *atslave = chan->private;
+       struct at_desc          *first = NULL;
+       struct at_desc          *prev = NULL;
+       u32                     ctrla;
+       u32                     ctrlb;
+       dma_addr_t              reg;
+       unsigned int            reg_width;
+       unsigned int            mem_width;
+       unsigned int            i;
+       struct scatterlist      *sg;
+       size_t                  total_len = 0;
+
+       dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
+                       direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+                       flags);
+
+       if (unlikely(!atslave || !sg_len)) {
+               dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
+               return NULL;
+       }
+
+       reg_width = atslave->reg_width;
+
+       ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
+       ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
+
+       switch (direction) {
+       case DMA_TO_DEVICE:
+               ctrla |=  ATC_DST_WIDTH(reg_width);
+               ctrlb |=  ATC_DST_ADDR_MODE_FIXED
+                       | ATC_SRC_ADDR_MODE_INCR
+                       | ATC_FC_MEM2PER;
+               reg = atslave->tx_reg;
+               for_each_sg(sgl, sg, sg_len, i) {
+                       struct at_desc  *desc;
+                       u32             len;
+                       u32             mem;
+
+                       desc = atc_desc_get(atchan);
+                       if (!desc)
+                               goto err_desc_get;
+
+                       mem = sg_phys(sg);
+                       len = sg_dma_len(sg);
+                       mem_width = 2;
+                       if (unlikely(mem & 3 || len & 3))
+                               mem_width = 0;
+
+                       desc->lli.saddr = mem;
+                       desc->lli.daddr = reg;
+                       desc->lli.ctrla = ctrla
+                                       | ATC_SRC_WIDTH(mem_width)
+                                       | len >> mem_width;
+                       desc->lli.ctrlb = ctrlb;
+
+                       if (!first) {
+                               first = desc;
+                       } else {
+                               /* inform the HW lli about chaining */
+                               prev->lli.dscr = desc->txd.phys;
+                               /* insert the link descriptor to the LD ring */
+                               list_add_tail(&desc->desc_node,
+                                               &first->tx_list);
+                       }
+                       prev = desc;
+                       total_len += len;
+               }
+               break;
+       case DMA_FROM_DEVICE:
+               ctrla |=  ATC_SRC_WIDTH(reg_width);
+               ctrlb |=  ATC_DST_ADDR_MODE_INCR
+                       | ATC_SRC_ADDR_MODE_FIXED
+                       | ATC_FC_PER2MEM;
+
+               reg = atslave->rx_reg;
+               for_each_sg(sgl, sg, sg_len, i) {
+                       struct at_desc  *desc;
+                       u32             len;
+                       u32             mem;
+
+                       desc = atc_desc_get(atchan);
+                       if (!desc)
+                               goto err_desc_get;
+
+                       mem = sg_phys(sg);
+                       len = sg_dma_len(sg);
+                       mem_width = 2;
+                       if (unlikely(mem & 3 || len & 3))
+                               mem_width = 0;
+
+                       desc->lli.saddr = reg;
+                       desc->lli.daddr = mem;
+                       desc->lli.ctrla = ctrla
+                                       | ATC_DST_WIDTH(mem_width)
+                                       | len >> mem_width;
+                       desc->lli.ctrlb = ctrlb;
+
+                       if (!first) {
+                               first = desc;
+                       } else {
+                               /* inform the HW lli about chaining */
+                               prev->lli.dscr = desc->txd.phys;
+                               /* insert the link descriptor to the LD ring */
+                               list_add_tail(&desc->desc_node,
+                                               &first->tx_list);
+                       }
+                       prev = desc;
+                       total_len += len;
+               }
+               break;
+       default:
+               return NULL;
+       }
+
+       /* set end-of-link to the last link descriptor of list*/
+       set_desc_eol(prev);
+
+       /* First descriptor of the chain embedds additional information */
+       first->txd.cookie = -EBUSY;
+       first->len = total_len;
+
+       /* last link descriptor of list is responsible of flags */
+       prev->txd.flags = flags; /* client is in control of this ack */
+
+       return &first->txd;
+
+err_desc_get:
+       dev_err(chan2dev(chan), "not enough descriptors available\n");
+       atc_desc_put(atchan, first);
+       return NULL;
+}
+
+static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+                      unsigned long arg)
+{
+       struct at_dma_chan      *atchan = to_at_dma_chan(chan);
+       struct at_dma           *atdma = to_at_dma(chan->device);
+       struct at_desc          *desc, *_desc;
+       LIST_HEAD(list);
+
+       /* Only supports DMA_TERMINATE_ALL */
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
+
+       /*
+        * This is only called when something went wrong elsewhere, so
+        * we don't really care about the data. Just disable the
+        * channel. We still have to poll the channel enable bit due
+        * to AHB/HSB limitations.
+        */
+       spin_lock_bh(&atchan->lock);
+
+       dma_writel(atdma, CHDR, atchan->mask);
+
+       /* confirm that this channel is disabled */
+       while (dma_readl(atdma, CHSR) & atchan->mask)
+               cpu_relax();
+
+       /* active_list entries will end up before queued entries */
+       list_splice_init(&atchan->queue, &list);
+       list_splice_init(&atchan->active_list, &list);
+
+       spin_unlock_bh(&atchan->lock);
+
+       /* Flush all pending and queued descriptors */
+       list_for_each_entry_safe(desc, _desc, &list, desc_node)
+               atc_chain_complete(atchan, desc);
+
+       return 0;
+}
+
+/**
+ * atc_tx_status - poll for transaction completion
  * @chan: DMA channel
  * @cookie: transaction identifier to check status of
- * @done: if not %NULL, updated with last completed transaction
- * @used: if not %NULL, updated with last used transaction
+ * @txstate: if not %NULL updated with transaction state
  *
- * If @done and @used are passed in, upon return they reflect the driver
+ * If @txstate is passed in, upon return it reflect the driver
  * internal state and can be used with dma_async_is_complete() to check
  * the status of multiple cookies without re-checking hardware state.
  */
 static enum dma_status
-atc_is_tx_complete(struct dma_chan *chan,
+atc_tx_status(struct dma_chan *chan,
                dma_cookie_t cookie,
-               dma_cookie_t *done, dma_cookie_t *used)
+               struct dma_tx_state *txstate)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        dma_cookie_t            last_used;
        dma_cookie_t            last_complete;
        enum dma_status         ret;
 
-       dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
-                       cookie, done ? *done : 0, used ? *used : 0);
-
-       spin_lock_bh(atchan->lock);
+       spin_lock_bh(&atchan->lock);
 
        last_complete = atchan->completed_cookie;
        last_used = chan->cookie;
@@ -647,12 +834,12 @@ atc_is_tx_complete(struct dma_chan *chan,
                ret = dma_async_is_complete(cookie, last_complete, last_used);
        }
 
-       spin_unlock_bh(atchan->lock);
+       spin_unlock_bh(&atchan->lock);
 
-       if (done)
-               *done = last_complete;
-       if (used)
-               *used = last_used;
+       dma_set_tx_state(txstate, last_complete, last_used, 0);
+       dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
+                cookie, last_complete ? last_complete : 0,
+                last_used ? last_used : 0);
 
        return ret;
 }
@@ -686,7 +873,9 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        struct at_dma           *atdma = to_at_dma(chan->device);
        struct at_desc          *desc;
+       struct at_dma_slave     *atslave;
        int                     i;
+       u32                     cfg;
        LIST_HEAD(tmp_list);
 
        dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -697,7 +886,23 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
                return -EIO;
        }
 
-       /* have we already been set up? */
+       cfg = ATC_DEFAULT_CFG;
+
+       atslave = chan->private;
+       if (atslave) {
+               /*
+                * We need controller-specific data to set up slave
+                * transfers.
+                */
+               BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
+
+               /* if cfg configuration specified take it instad of default */
+               if (atslave->cfg)
+                       cfg = atslave->cfg;
+       }
+
+       /* have we already been set up?
+        * reconfigure channel but no need to reallocate descriptors */
        if (!list_empty(&atchan->free_list))
                return atchan->descs_allocated;
 
@@ -719,7 +924,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
        spin_unlock_bh(&atchan->lock);
 
        /* channel parameters */
-       channel_writel(atchan, CFG, ATC_DEFAULT_CFG);
+       channel_writel(atchan, CFG, cfg);
 
        dev_dbg(chan2dev(chan),
                "alloc_chan_resources: allocated %d descriptors\n",
@@ -880,7 +1085,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
        /* set base routines */
        atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
        atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
-       atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
+       atdma->dma_common.device_tx_status = atc_tx_status;
        atdma->dma_common.device_issue_pending = atc_issue_pending;
        atdma->dma_common.dev = &pdev->dev;
 
@@ -888,6 +1093,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
        if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
                atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
 
+       if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
+               atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
+               atdma->dma_common.device_control = atc_control;
+       }
+
        dma_writel(atdma, EN, AT_DMA_ENABLE);
 
        dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
@@ -962,32 +1172,37 @@ static void at_dma_shutdown(struct platform_device *pdev)
        clk_disable(atdma->clk);
 }
 
-static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg)
+static int at_dma_suspend_noirq(struct device *dev)
 {
-       struct at_dma   *atdma = platform_get_drvdata(pdev);
+       struct platform_device *pdev = to_platform_device(dev);
+       struct at_dma *atdma = platform_get_drvdata(pdev);
 
        at_dma_off(platform_get_drvdata(pdev));
        clk_disable(atdma->clk);
        return 0;
 }
 
-static int at_dma_resume_early(struct platform_device *pdev)
+static int at_dma_resume_noirq(struct device *dev)
 {
-       struct at_dma   *atdma = platform_get_drvdata(pdev);
+       struct platform_device *pdev = to_platform_device(dev);
+       struct at_dma *atdma = platform_get_drvdata(pdev);
 
        clk_enable(atdma->clk);
        dma_writel(atdma, EN, AT_DMA_ENABLE);
        return 0;
-
 }
 
+static const struct dev_pm_ops at_dma_dev_pm_ops = {
+       .suspend_noirq = at_dma_suspend_noirq,
+       .resume_noirq = at_dma_resume_noirq,
+};
+
 static struct platform_driver at_dma_driver = {
        .remove         = __exit_p(at_dma_remove),
        .shutdown       = at_dma_shutdown,
-       .suspend_late   = at_dma_suspend_late,
-       .resume_early   = at_dma_resume_early,
        .driver = {
                .name   = "at_hdmac",
+               .pm     = &at_dma_dev_pm_ops,
        },
 };