DMAENGINE: DMA40 support paused channel status
[safe/jmp/linux-2.6] / drivers / dma / at_hdmac.c
index 9a1e5fb..93ed99c 100644 (file)
@@ -87,6 +87,7 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
        desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
        if (desc) {
                memset(desc, 0, sizeof(struct at_desc));
+               INIT_LIST_HEAD(&desc->tx_list);
                dma_async_tx_descriptor_init(&desc->txd, chan);
                /* txd.flags will be overwritten in prep functions */
                desc->txd.flags = DMA_CTRL_ACK;
@@ -98,7 +99,7 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
 }
 
 /**
- * atc_desc_get - get a unsused descriptor from free_list
+ * atc_desc_get - get an unused descriptor from free_list
  * @atchan: channel we want a new descriptor for
  */
 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
@@ -150,11 +151,11 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
                struct at_desc *child;
 
                spin_lock_bh(&atchan->lock);
-               list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+               list_for_each_entry(child, &desc->tx_list, desc_node)
                        dev_vdbg(chan2dev(&atchan->chan_common),
                                        "moving child desc %p to freelist\n",
                                        child);
-               list_splice_init(&desc->txd.tx_list, &atchan->free_list);
+               list_splice_init(&desc->tx_list, &atchan->free_list);
                dev_vdbg(chan2dev(&atchan->chan_common),
                         "moving desc %p to freelist\n", desc);
                list_add(&desc->desc_node, &atchan->free_list);
@@ -247,30 +248,33 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
        param = txd->callback_param;
 
        /* move children to free_list */
-       list_splice_init(&txd->tx_list, &atchan->free_list);
+       list_splice_init(&desc->tx_list, &atchan->free_list);
        /* move myself to free_list */
        list_move(&desc->desc_node, &atchan->free_list);
 
        /* unmap dma addresses */
-       if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-               if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
-                       dma_unmap_single(chan2parent(&atchan->chan_common),
-                                       desc->lli.daddr,
-                                       desc->len, DMA_FROM_DEVICE);
-               else
-                       dma_unmap_page(chan2parent(&atchan->chan_common),
-                                       desc->lli.daddr,
-                                       desc->len, DMA_FROM_DEVICE);
-       }
-       if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-               if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
-                       dma_unmap_single(chan2parent(&atchan->chan_common),
-                                       desc->lli.saddr,
-                                       desc->len, DMA_TO_DEVICE);
-               else
-                       dma_unmap_page(chan2parent(&atchan->chan_common),
-                                       desc->lli.saddr,
-                                       desc->len, DMA_TO_DEVICE);
+       if (!atchan->chan_common.private) {
+               struct device *parent = chan2parent(&atchan->chan_common);
+               if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+                       if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+                               dma_unmap_single(parent,
+                                               desc->lli.daddr,
+                                               desc->len, DMA_FROM_DEVICE);
+                       else
+                               dma_unmap_page(parent,
+                                               desc->lli.daddr,
+                                               desc->len, DMA_FROM_DEVICE);
+               }
+               if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+                       if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+                               dma_unmap_single(parent,
+                                               desc->lli.saddr,
+                                               desc->len, DMA_TO_DEVICE);
+                       else
+                               dma_unmap_page(parent,
+                                               desc->lli.saddr,
+                                               desc->len, DMA_TO_DEVICE);
+               }
        }
 
        /*
@@ -334,7 +338,7 @@ static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
                        /* This one is currently in progress */
                        return;
 
-               list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+               list_for_each_entry(child, &desc->tx_list, desc_node)
                        if (!(child->lli.ctrla & ATC_DONE))
                                /* Currently in progress */
                                return;
@@ -407,7 +411,7 @@ static void atc_handle_error(struct at_dma_chan *atchan)
        dev_crit(chan2dev(&atchan->chan_common),
                        "  cookie: %d\n", bad_desc->txd.cookie);
        atc_dump_lli(atchan, &bad_desc->lli);
-       list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
+       list_for_each_entry(child, &bad_desc->tx_list, desc_node)
                atc_dump_lli(atchan, &child->lli);
 
        /* Pretend the descriptor completed successfully */
@@ -587,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                        prev->lli.dscr = desc->txd.phys;
                        /* insert the link descriptor to the LD ring */
                        list_add_tail(&desc->desc_node,
-                                       &first->txd.tx_list);
+                                       &first->tx_list);
                }
                prev = desc;
        }
@@ -646,8 +650,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
        reg_width = atslave->reg_width;
 
-       sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
-
        ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
        ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
 
@@ -687,7 +689,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                                prev->lli.dscr = desc->txd.phys;
                                /* insert the link descriptor to the LD ring */
                                list_add_tail(&desc->desc_node,
-                                               &first->txd.tx_list);
+                                               &first->tx_list);
                        }
                        prev = desc;
                        total_len += len;
@@ -729,7 +731,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                                prev->lli.dscr = desc->txd.phys;
                                /* insert the link descriptor to the LD ring */
                                list_add_tail(&desc->desc_node,
-                                               &first->txd.tx_list);
+                                               &first->tx_list);
                        }
                        prev = desc;
                        total_len += len;
@@ -757,13 +759,17 @@ err_desc_get:
        return NULL;
 }
 
-static void atc_terminate_all(struct dma_chan *chan)
+static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        struct at_dma           *atdma = to_at_dma(chan->device);
        struct at_desc          *desc, *_desc;
        LIST_HEAD(list);
 
+       /* Only supports DMA_TERMINATE_ALL */
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
+
        /*
         * This is only called when something went wrong elsewhere, so
         * we don't really care about the data. Just disable the
@@ -787,33 +793,31 @@ static void atc_terminate_all(struct dma_chan *chan)
        /* Flush all pending and queued descriptors */
        list_for_each_entry_safe(desc, _desc, &list, desc_node)
                atc_chain_complete(atchan, desc);
+
+       return 0;
 }
 
 /**
- * atc_is_tx_complete - poll for transaction completion
+ * atc_tx_status - poll for transaction completion
  * @chan: DMA channel
  * @cookie: transaction identifier to check status of
- * @done: if not %NULL, updated with last completed transaction
- * @used: if not %NULL, updated with last used transaction
+ * @txstate: if not %NULL updated with transaction state
  *
- * If @done and @used are passed in, upon return they reflect the driver
+ * If @txstate is passed in, upon return it reflect the driver
  * internal state and can be used with dma_async_is_complete() to check
  * the status of multiple cookies without re-checking hardware state.
  */
 static enum dma_status
-atc_is_tx_complete(struct dma_chan *chan,
+atc_tx_status(struct dma_chan *chan,
                dma_cookie_t cookie,
-               dma_cookie_t *done, dma_cookie_t *used)
+               struct dma_tx_state *txstate)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        dma_cookie_t            last_used;
        dma_cookie_t            last_complete;
        enum dma_status         ret;
 
-       dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
-                       cookie, done ? *done : 0, used ? *used : 0);
-
-       spin_lock_bh(atchan->lock);
+       spin_lock_bh(&atchan->lock);
 
        last_complete = atchan->completed_cookie;
        last_used = chan->cookie;
@@ -828,12 +832,12 @@ atc_is_tx_complete(struct dma_chan *chan,
                ret = dma_async_is_complete(cookie, last_complete, last_used);
        }
 
-       spin_unlock_bh(atchan->lock);
+       spin_unlock_bh(&atchan->lock);
 
-       if (done)
-               *done = last_complete;
-       if (used)
-               *used = last_used;
+       dma_set_tx_state(txstate, last_complete, last_used, 0);
+       dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
+                cookie, last_complete ? last_complete : 0,
+                last_used ? last_used : 0);
 
        return ret;
 }
@@ -1079,7 +1083,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
        /* set base routines */
        atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
        atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
-       atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
+       atdma->dma_common.device_tx_status = atc_tx_status;
        atdma->dma_common.device_issue_pending = atc_issue_pending;
        atdma->dma_common.dev = &pdev->dev;
 
@@ -1089,7 +1093,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
 
        if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
                atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
-               atdma->dma_common.device_terminate_all = atc_terminate_all;
+               atdma->dma_common.device_control = atc_control;
        }
 
        dma_writel(atdma, EN, AT_DMA_ENABLE);
@@ -1166,32 +1170,37 @@ static void at_dma_shutdown(struct platform_device *pdev)
        clk_disable(atdma->clk);
 }
 
-static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg)
+static int at_dma_suspend_noirq(struct device *dev)
 {
-       struct at_dma   *atdma = platform_get_drvdata(pdev);
+       struct platform_device *pdev = to_platform_device(dev);
+       struct at_dma *atdma = platform_get_drvdata(pdev);
 
        at_dma_off(platform_get_drvdata(pdev));
        clk_disable(atdma->clk);
        return 0;
 }
 
-static int at_dma_resume_early(struct platform_device *pdev)
+static int at_dma_resume_noirq(struct device *dev)
 {
-       struct at_dma   *atdma = platform_get_drvdata(pdev);
+       struct platform_device *pdev = to_platform_device(dev);
+       struct at_dma *atdma = platform_get_drvdata(pdev);
 
        clk_enable(atdma->clk);
        dma_writel(atdma, EN, AT_DMA_ENABLE);
        return 0;
-
 }
 
+static const struct dev_pm_ops at_dma_dev_pm_ops = {
+       .suspend_noirq = at_dma_suspend_noirq,
+       .resume_noirq = at_dma_resume_noirq,
+};
+
 static struct platform_driver at_dma_driver = {
        .remove         = __exit_p(at_dma_remove),
        .shutdown       = at_dma_shutdown,
-       .suspend_late   = at_dma_suspend_late,
-       .resume_early   = at_dma_resume_early,
        .driver = {
                .name   = "at_hdmac",
+               .pm     = &at_dma_dev_pm_ops,
        },
 };