2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver.
40 * Each device has a kref, which is initialized to 1 when the device is
41 * registered. A kref_get is done for each device registered. When the
42 * device is released, the corresponding kref_put is done in the release
43 * method. Every time one of the device's channels is allocated to a client,
44 * a kref_get occurs. When the channel is freed, the corresponding kref_put
45 * happens. The device's release function does a completion, so
46 * unregister_device does a remove event, device_unregister, a kref_put
47 * for the first reference, then waits on the completion for all other
48 * references to finish.
50 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
51 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
52 * signals that it wants to use a channel, and dma_chan_put is called when
53 * a channel is removed or a client using it is unregistered. A client can
54 * take extra references per outstanding transaction, as is the case with
55 * the NET DMA client. The release function does a kref_put on the device.
59 #include <linux/init.h>
60 #include <linux/module.h>
62 #include <linux/device.h>
63 #include <linux/dmaengine.h>
64 #include <linux/hardirq.h>
65 #include <linux/spinlock.h>
66 #include <linux/percpu.h>
67 #include <linux/rcupdate.h>
68 #include <linux/mutex.h>
69 #include <linux/jiffies.h>
70 #include <linux/rculist.h>
72 static DEFINE_MUTEX(dma_list_mutex);
73 static LIST_HEAD(dma_device_list);
74 static long dmaengine_ref_count;
76 /* --- sysfs implementation --- */
78 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
80 struct dma_chan *chan = to_dma_chan(dev);
81 unsigned long count = 0;
84 for_each_possible_cpu(i)
85 count += per_cpu_ptr(chan->local, i)->memcpy_count;
87 return sprintf(buf, "%lu\n", count);
90 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
93 struct dma_chan *chan = to_dma_chan(dev);
94 unsigned long count = 0;
97 for_each_possible_cpu(i)
98 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
100 return sprintf(buf, "%lu\n", count);
103 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
105 struct dma_chan *chan = to_dma_chan(dev);
107 return sprintf(buf, "%d\n", chan->client_count);
110 static struct device_attribute dma_attrs[] = {
111 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
112 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
113 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
117 static void dma_async_device_cleanup(struct kref *kref);
119 static void dma_dev_release(struct device *dev)
121 struct dma_chan *chan = to_dma_chan(dev);
122 kref_put(&chan->device->refcount, dma_async_device_cleanup);
125 static struct class dma_devclass = {
127 .dev_attrs = dma_attrs,
128 .dev_release = dma_dev_release,
131 /* --- client and device registration --- */
133 #define dma_device_satisfies_mask(device, mask) \
134 __dma_device_satisfies_mask((device), &(mask))
136 __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
140 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
142 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
145 static struct module *dma_chan_to_owner(struct dma_chan *chan)
147 return chan->device->dev->driver->owner;
151 * balance_ref_count - catch up the channel reference count
152 * @chan - channel to balance ->client_count versus dmaengine_ref_count
154 * balance_ref_count must be called under dma_list_mutex
156 static void balance_ref_count(struct dma_chan *chan)
158 struct module *owner = dma_chan_to_owner(chan);
160 while (chan->client_count < dmaengine_ref_count) {
162 chan->client_count++;
167 * dma_chan_get - try to grab a dma channel's parent driver module
168 * @chan - channel to grab
170 * Must be called under dma_list_mutex
172 static int dma_chan_get(struct dma_chan *chan)
175 struct module *owner = dma_chan_to_owner(chan);
177 if (chan->client_count) {
180 } else if (try_module_get(owner))
184 chan->client_count++;
186 /* allocate upon first client reference */
187 if (chan->client_count == 1 && err == 0) {
188 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
192 chan->client_count = 0;
194 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
195 balance_ref_count(chan);
202 * dma_chan_put - drop a reference to a dma channel's parent driver module
203 * @chan - channel to release
205 * Must be called under dma_list_mutex
207 static void dma_chan_put(struct dma_chan *chan)
209 if (!chan->client_count)
210 return; /* this channel failed alloc_chan_resources */
211 chan->client_count--;
212 module_put(dma_chan_to_owner(chan));
213 if (chan->client_count == 0)
214 chan->device->device_free_chan_resources(chan);
217 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
219 enum dma_status status;
220 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
222 dma_async_issue_pending(chan);
224 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
225 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
226 printk(KERN_ERR "dma_sync_wait_timeout!\n");
229 } while (status == DMA_IN_PROGRESS);
233 EXPORT_SYMBOL(dma_sync_wait);
236 * dma_chan_cleanup - release a DMA channel's resources
237 * @kref: kernel reference structure that contains the DMA channel device
239 void dma_chan_cleanup(struct kref *kref)
241 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
242 kref_put(&chan->device->refcount, dma_async_device_cleanup);
244 EXPORT_SYMBOL(dma_chan_cleanup);
246 static void dma_chan_free_rcu(struct rcu_head *rcu)
248 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
250 kref_put(&chan->refcount, dma_chan_cleanup);
253 static void dma_chan_release(struct dma_chan *chan)
255 call_rcu(&chan->rcu, dma_chan_free_rcu);
259 * dma_cap_mask_all - enable iteration over all operation types
261 static dma_cap_mask_t dma_cap_mask_all;
264 * dma_chan_tbl_ent - tracks channel allocations per core/operation
265 * @chan - associated channel for this entry
267 struct dma_chan_tbl_ent {
268 struct dma_chan *chan;
272 * channel_table - percpu lookup table for memory-to-memory offload providers
274 static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
276 static int __init dma_channel_table_init(void)
278 enum dma_transaction_type cap;
281 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
283 /* 'interrupt', 'private', and 'slave' are channel capabilities,
284 * but are not associated with an operation so they do not need
285 * an entry in the channel_table
287 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
288 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
289 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
291 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
292 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
293 if (!channel_table[cap]) {
300 pr_err("dmaengine: initialization failure\n");
301 for_each_dma_cap_mask(cap, dma_cap_mask_all)
302 if (channel_table[cap])
303 free_percpu(channel_table[cap]);
308 subsys_initcall(dma_channel_table_init);
311 * dma_find_channel - find a channel to carry out the operation
312 * @tx_type: transaction type
314 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
316 struct dma_chan *chan;
319 WARN_ONCE(dmaengine_ref_count == 0,
320 "client called %s without a reference", __func__);
323 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
328 EXPORT_SYMBOL(dma_find_channel);
331 * dma_issue_pending_all - flush all pending operations across all channels
333 void dma_issue_pending_all(void)
335 struct dma_device *device;
336 struct dma_chan *chan;
338 WARN_ONCE(dmaengine_ref_count == 0,
339 "client called %s without a reference", __func__);
342 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
343 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
345 list_for_each_entry(chan, &device->channels, device_node)
346 if (chan->client_count)
347 device->device_issue_pending(chan);
351 EXPORT_SYMBOL(dma_issue_pending_all);
354 * nth_chan - returns the nth channel of the given capability
355 * @cap: capability to match
356 * @n: nth channel desired
358 * Defaults to returning the channel with the desired capability and the
359 * lowest reference count when 'n' cannot be satisfied. Must be called
360 * under dma_list_mutex.
362 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
364 struct dma_device *device;
365 struct dma_chan *chan;
366 struct dma_chan *ret = NULL;
367 struct dma_chan *min = NULL;
369 list_for_each_entry(device, &dma_device_list, global_node) {
370 if (!dma_has_cap(cap, device->cap_mask) ||
371 dma_has_cap(DMA_PRIVATE, device->cap_mask))
373 list_for_each_entry(chan, &device->channels, device_node) {
374 if (!chan->client_count)
378 else if (chan->table_count < min->table_count)
400 * dma_channel_rebalance - redistribute the available channels
402 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
403 * operation type) in the SMP case, and operation isolation (avoid
404 * multi-tasking channels) in the non-SMP case. Must be called under
407 static void dma_channel_rebalance(void)
409 struct dma_chan *chan;
410 struct dma_device *device;
415 /* undo the last distribution */
416 for_each_dma_cap_mask(cap, dma_cap_mask_all)
417 for_each_possible_cpu(cpu)
418 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
420 list_for_each_entry(device, &dma_device_list, global_node) {
421 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
423 list_for_each_entry(chan, &device->channels, device_node)
424 chan->table_count = 0;
427 /* don't populate the channel_table if no clients are available */
428 if (!dmaengine_ref_count)
431 /* redistribute available channels */
433 for_each_dma_cap_mask(cap, dma_cap_mask_all)
434 for_each_online_cpu(cpu) {
435 if (num_possible_cpus() > 1)
436 chan = nth_chan(cap, n++);
438 chan = nth_chan(cap, -1);
440 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
444 static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev)
446 struct dma_chan *chan;
447 struct dma_chan *ret = NULL;
449 if (!__dma_device_satisfies_mask(dev, mask)) {
450 pr_debug("%s: wrong capabilities\n", __func__);
453 /* devices with multiple channels need special handling as we need to
454 * ensure that all channels are either private or public.
456 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
457 list_for_each_entry(chan, &dev->channels, device_node) {
458 /* some channels are already publicly allocated */
459 if (chan->client_count)
463 list_for_each_entry(chan, &dev->channels, device_node) {
464 if (chan->client_count) {
465 pr_debug("%s: %s busy\n",
466 __func__, dev_name(&chan->dev));
477 * dma_request_channel - try to allocate an exclusive channel
478 * @mask: capabilities that the channel must satisfy
479 * @fn: optional callback to disposition available channels
480 * @fn_param: opaque parameter to pass to dma_filter_fn
482 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
484 struct dma_device *device, *_d;
485 struct dma_chan *chan = NULL;
486 enum dma_state_client ack;
490 mutex_lock(&dma_list_mutex);
491 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
492 chan = private_candidate(mask, device);
497 ack = fn(chan, fn_param);
501 if (ack == DMA_ACK) {
502 /* Found a suitable channel, try to grab, prep, and
503 * return it. We first set DMA_PRIVATE to disable
504 * balance_ref_count as this channel will not be
505 * published in the general-purpose allocator
507 dma_cap_set(DMA_PRIVATE, device->cap_mask);
508 err = dma_chan_get(chan);
510 if (err == -ENODEV) {
511 pr_debug("%s: %s module removed\n", __func__,
512 dev_name(&chan->dev));
513 list_del_rcu(&device->global_node);
515 pr_err("dmaengine: failed to get %s: (%d)\n",
516 dev_name(&chan->dev), err);
519 } else if (ack == DMA_DUP) {
520 pr_debug("%s: %s filter said DMA_DUP\n",
521 __func__, dev_name(&chan->dev));
522 } else if (ack == DMA_NAK) {
523 pr_debug("%s: %s filter said DMA_NAK\n",
524 __func__, dev_name(&chan->dev));
527 WARN_ONCE(1, "filter_fn: unknown response?\n");
530 mutex_unlock(&dma_list_mutex);
532 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
533 chan ? dev_name(&chan->dev) : NULL);
537 EXPORT_SYMBOL_GPL(__dma_request_channel);
539 void dma_release_channel(struct dma_chan *chan)
541 mutex_lock(&dma_list_mutex);
542 WARN_ONCE(chan->client_count != 1,
543 "chan reference count %d != 1\n", chan->client_count);
545 mutex_unlock(&dma_list_mutex);
547 EXPORT_SYMBOL_GPL(dma_release_channel);
550 * dmaengine_get - register interest in dma_channels
552 void dmaengine_get(void)
554 struct dma_device *device, *_d;
555 struct dma_chan *chan;
558 mutex_lock(&dma_list_mutex);
559 dmaengine_ref_count++;
561 /* try to grab channels */
562 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
563 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
565 list_for_each_entry(chan, &device->channels, device_node) {
566 err = dma_chan_get(chan);
567 if (err == -ENODEV) {
568 /* module removed before we could use it */
569 list_del_rcu(&device->global_node);
572 pr_err("dmaengine: failed to get %s: (%d)\n",
573 dev_name(&chan->dev), err);
577 /* if this is the first reference and there were channels
578 * waiting we need to rebalance to get those channels
579 * incorporated into the channel table
581 if (dmaengine_ref_count == 1)
582 dma_channel_rebalance();
583 mutex_unlock(&dma_list_mutex);
585 EXPORT_SYMBOL(dmaengine_get);
588 * dmaengine_put - let dma drivers be removed when ref_count == 0
590 void dmaengine_put(void)
592 struct dma_device *device;
593 struct dma_chan *chan;
595 mutex_lock(&dma_list_mutex);
596 dmaengine_ref_count--;
597 BUG_ON(dmaengine_ref_count < 0);
598 /* drop channel references */
599 list_for_each_entry(device, &dma_device_list, global_node) {
600 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
602 list_for_each_entry(chan, &device->channels, device_node)
605 mutex_unlock(&dma_list_mutex);
607 EXPORT_SYMBOL(dmaengine_put);
610 * dma_async_device_register - registers DMA devices found
611 * @device: &dma_device
613 int dma_async_device_register(struct dma_device *device)
617 struct dma_chan* chan;
622 /* validate device routines */
623 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
624 !device->device_prep_dma_memcpy);
625 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
626 !device->device_prep_dma_xor);
627 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
628 !device->device_prep_dma_zero_sum);
629 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
630 !device->device_prep_dma_memset);
631 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
632 !device->device_prep_dma_interrupt);
633 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
634 !device->device_prep_slave_sg);
635 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
636 !device->device_terminate_all);
638 BUG_ON(!device->device_alloc_chan_resources);
639 BUG_ON(!device->device_free_chan_resources);
640 BUG_ON(!device->device_is_tx_complete);
641 BUG_ON(!device->device_issue_pending);
642 BUG_ON(!device->dev);
644 init_completion(&device->done);
645 kref_init(&device->refcount);
647 mutex_lock(&dma_list_mutex);
648 device->dev_id = id++;
649 mutex_unlock(&dma_list_mutex);
651 /* represent channels in sysfs. Probably want devs too */
652 list_for_each_entry(chan, &device->channels, device_node) {
653 chan->local = alloc_percpu(typeof(*chan->local));
654 if (chan->local == NULL)
657 chan->chan_id = chancnt++;
658 chan->dev.class = &dma_devclass;
659 chan->dev.parent = device->dev;
660 dev_set_name(&chan->dev, "dma%dchan%d",
661 device->dev_id, chan->chan_id);
663 rc = device_register(&chan->dev);
666 free_percpu(chan->local);
671 /* One for the channel, one of the class device */
672 kref_get(&device->refcount);
673 kref_get(&device->refcount);
674 kref_init(&chan->refcount);
675 chan->client_count = 0;
677 INIT_RCU_HEAD(&chan->rcu);
679 device->chancnt = chancnt;
681 mutex_lock(&dma_list_mutex);
682 /* take references on public channels */
683 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
684 list_for_each_entry(chan, &device->channels, device_node) {
685 /* if clients are already waiting for channels we need
686 * to take references on their behalf
688 if (dma_chan_get(chan) == -ENODEV) {
689 /* note we can only get here for the first
690 * channel as the remaining channels are
691 * guaranteed to get a reference
694 mutex_unlock(&dma_list_mutex);
698 list_add_tail_rcu(&device->global_node, &dma_device_list);
699 dma_channel_rebalance();
700 mutex_unlock(&dma_list_mutex);
705 list_for_each_entry(chan, &device->channels, device_node) {
706 if (chan->local == NULL)
708 kref_put(&device->refcount, dma_async_device_cleanup);
709 device_unregister(&chan->dev);
711 free_percpu(chan->local);
715 EXPORT_SYMBOL(dma_async_device_register);
718 * dma_async_device_cleanup - function called when all references are released
719 * @kref: kernel reference object
721 static void dma_async_device_cleanup(struct kref *kref)
723 struct dma_device *device;
725 device = container_of(kref, struct dma_device, refcount);
726 complete(&device->done);
730 * dma_async_device_unregister - unregister a DMA device
731 * @device: &dma_device
733 void dma_async_device_unregister(struct dma_device *device)
735 struct dma_chan *chan;
737 mutex_lock(&dma_list_mutex);
738 list_del_rcu(&device->global_node);
739 dma_channel_rebalance();
740 mutex_unlock(&dma_list_mutex);
742 list_for_each_entry(chan, &device->channels, device_node) {
743 WARN_ONCE(chan->client_count,
744 "%s called while %d clients hold a reference\n",
745 __func__, chan->client_count);
746 device_unregister(&chan->dev);
747 dma_chan_release(chan);
750 kref_put(&device->refcount, dma_async_device_cleanup);
751 wait_for_completion(&device->done);
753 EXPORT_SYMBOL(dma_async_device_unregister);
756 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
757 * @chan: DMA channel to offload copy to
758 * @dest: destination address (virtual)
759 * @src: source address (virtual)
762 * Both @dest and @src must be mappable to a bus address according to the
763 * DMA mapping API rules for streaming mappings.
764 * Both @dest and @src must stay memory resident (kernel memory or locked
768 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
769 void *src, size_t len)
771 struct dma_device *dev = chan->device;
772 struct dma_async_tx_descriptor *tx;
773 dma_addr_t dma_dest, dma_src;
777 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
778 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
779 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
783 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
784 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
789 cookie = tx->tx_submit(tx);
792 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
793 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
798 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
801 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
802 * @chan: DMA channel to offload copy to
803 * @page: destination page
804 * @offset: offset in page to copy to
805 * @kdata: source address (virtual)
808 * Both @page/@offset and @kdata must be mappable to a bus address according
809 * to the DMA mapping API rules for streaming mappings.
810 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
811 * locked user space pages)
814 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
815 unsigned int offset, void *kdata, size_t len)
817 struct dma_device *dev = chan->device;
818 struct dma_async_tx_descriptor *tx;
819 dma_addr_t dma_dest, dma_src;
823 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
824 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
825 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
829 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
830 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
835 cookie = tx->tx_submit(tx);
838 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
839 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
844 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
847 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
848 * @chan: DMA channel to offload copy to
849 * @dest_pg: destination page
850 * @dest_off: offset in page to copy to
851 * @src_pg: source page
852 * @src_off: offset in page to copy from
855 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
856 * address according to the DMA mapping API rules for streaming mappings.
857 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
858 * (kernel memory or locked user space pages).
861 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
862 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
865 struct dma_device *dev = chan->device;
866 struct dma_async_tx_descriptor *tx;
867 dma_addr_t dma_dest, dma_src;
871 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
872 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
874 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
878 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
879 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
884 cookie = tx->tx_submit(tx);
887 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
888 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
893 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
895 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
896 struct dma_chan *chan)
899 spin_lock_init(&tx->lock);
901 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
903 /* dma_wait_for_async_tx - spin wait for a transaction to complete
904 * @tx: in-flight transaction to wait on
906 * This routine assumes that tx was obtained from a call to async_memcpy,
907 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
908 * and submitted). Walking the parent chain is only meant to cover for DMA
909 * drivers that do not implement the DMA_INTERRUPT capability and may race with
910 * the driver's descriptor cleanup routine.
913 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
915 enum dma_status status;
916 struct dma_async_tx_descriptor *iter;
917 struct dma_async_tx_descriptor *parent;
922 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
923 " %s\n", __func__, dev_name(&tx->chan->dev));
925 /* poll through the dependency chain, return when tx is complete */
929 /* find the root of the unsubmitted dependency chain */
931 parent = iter->parent;
938 /* there is a small window for ->parent == NULL and
941 while (iter->cookie == -EBUSY)
944 status = dma_sync_wait(iter->chan, iter->cookie);
945 } while (status == DMA_IN_PROGRESS || (iter != tx));
949 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
951 /* dma_run_dependencies - helper routine for dma drivers to process
952 * (start) dependent operations on their target channel
953 * @tx: transaction with dependencies
955 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
957 struct dma_async_tx_descriptor *dep = tx->next;
958 struct dma_async_tx_descriptor *dep_next;
959 struct dma_chan *chan;
966 /* keep submitting up until a channel switch is detected
967 * in that case we will be called again as a result of
968 * processing the interrupt from async_tx_channel_switch
970 for (; dep; dep = dep_next) {
971 spin_lock_bh(&dep->lock);
973 dep_next = dep->next;
974 if (dep_next && dep_next->chan == chan)
975 dep->next = NULL; /* ->next will be submitted */
977 dep_next = NULL; /* submit current dep and terminate */
978 spin_unlock_bh(&dep->lock);
983 chan->device->device_issue_pending(chan);
985 EXPORT_SYMBOL_GPL(dma_run_dependencies);
987 static int __init dma_bus_init(void)
989 mutex_init(&dma_list_mutex);
990 return class_register(&dma_devclass);
992 subsys_initcall(dma_bus_init);