2 * core routines for the asynchronous memory transfer/transform api
4 * Copyright © 2006, Intel Corporation.
6 * Dan Williams <dan.j.williams@intel.com>
8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <linux/rculist.h>
27 #include <linux/kernel.h>
28 #include <linux/async_tx.h>
30 #ifdef CONFIG_DMA_ENGINE
31 static enum dma_state_client
32 dma_channel_add_remove(struct dma_client *client,
33 struct dma_chan *chan, enum dma_state state);
35 static struct dma_client async_tx_dma = {
36 .event_callback = dma_channel_add_remove,
37 /* .cap_mask == 0 defaults to all channels */
41 * async_tx_lock - protect modification of async_tx_master_list and serialize
42 * rebalance operations
44 static DEFINE_SPINLOCK(async_tx_lock);
46 static LIST_HEAD(async_tx_master_list);
49 free_dma_chan_ref(struct rcu_head *rcu)
51 struct dma_chan_ref *ref;
52 ref = container_of(rcu, struct dma_chan_ref, rcu);
57 init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan)
59 INIT_LIST_HEAD(&ref->node);
60 INIT_RCU_HEAD(&ref->rcu);
62 atomic_set(&ref->count, 0);
65 static enum dma_state_client
66 dma_channel_add_remove(struct dma_client *client,
67 struct dma_chan *chan, enum dma_state state)
69 unsigned long found, flags;
70 struct dma_chan_ref *master_ref, *ref;
71 enum dma_state_client ack = DMA_DUP; /* default: take no action */
74 case DMA_RESOURCE_AVAILABLE:
77 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
78 if (ref->chan == chan) {
84 pr_debug("async_tx: dma resource available [%s]\n",
85 found ? "old" : "new");
92 /* add the channel to the generic management list */
93 master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
95 init_dma_chan_ref(master_ref, chan);
96 spin_lock_irqsave(&async_tx_lock, flags);
97 list_add_tail_rcu(&master_ref->node,
98 &async_tx_master_list);
99 spin_unlock_irqrestore(&async_tx_lock,
102 printk(KERN_WARNING "async_tx: unable to create"
103 " new master entry in response to"
104 " a DMA_RESOURCE_ADDED event"
109 case DMA_RESOURCE_REMOVED:
111 spin_lock_irqsave(&async_tx_lock, flags);
112 list_for_each_entry(ref, &async_tx_master_list, node)
113 if (ref->chan == chan) {
114 list_del_rcu(&ref->node);
115 call_rcu(&ref->rcu, free_dma_chan_ref);
119 spin_unlock_irqrestore(&async_tx_lock, flags);
121 pr_debug("async_tx: dma resource removed [%s]\n",
122 found ? "ours" : "not ours");
129 case DMA_RESOURCE_SUSPEND:
130 case DMA_RESOURCE_RESUME:
131 printk(KERN_WARNING "async_tx: does not support dma channel"
132 " suspend/resume\n");
141 static int __init async_tx_init(void)
143 dma_async_client_register(&async_tx_dma);
144 dma_async_client_chan_request(&async_tx_dma);
146 printk(KERN_INFO "async_tx: api initialized (async)\n");
151 static void __exit async_tx_exit(void)
153 dma_async_client_unregister(&async_tx_dma);
157 * __async_tx_find_channel - find a channel to carry out the operation or let
158 * the transaction execute synchronously
159 * @depend_tx: transaction dependency
160 * @tx_type: transaction type
163 __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
164 enum dma_transaction_type tx_type)
166 /* see if we can keep the chain on one channel */
168 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
169 return depend_tx->chan;
170 return dma_find_channel(tx_type);
172 EXPORT_SYMBOL_GPL(__async_tx_find_channel);
174 static int __init async_tx_init(void)
176 printk(KERN_INFO "async_tx: api initialized (sync-only)\n");
180 static void __exit async_tx_exit(void)
188 * async_tx_channel_switch - queue an interrupt descriptor with a dependency
190 * @depend_tx: the operation that must finish before the new operation runs
191 * @tx: the new operation
194 async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
195 struct dma_async_tx_descriptor *tx)
197 struct dma_chan *chan;
198 struct dma_device *device;
199 struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
201 /* first check to see if we can still append to depend_tx */
202 spin_lock_bh(&depend_tx->lock);
203 if (depend_tx->parent && depend_tx->chan == tx->chan) {
204 tx->parent = depend_tx;
205 depend_tx->next = tx;
208 spin_unlock_bh(&depend_tx->lock);
213 chan = depend_tx->chan;
214 device = chan->device;
216 /* see if we can schedule an interrupt
217 * otherwise poll for completion
219 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
220 intr_tx = device->device_prep_dma_interrupt(chan, 0);
225 intr_tx->callback = NULL;
226 intr_tx->callback_param = NULL;
227 tx->parent = intr_tx;
228 /* safe to set ->next outside the lock since we know we are
233 /* check if we need to append */
234 spin_lock_bh(&depend_tx->lock);
235 if (depend_tx->parent) {
236 intr_tx->parent = depend_tx;
237 depend_tx->next = intr_tx;
238 async_tx_ack(intr_tx);
241 spin_unlock_bh(&depend_tx->lock);
244 intr_tx->parent = NULL;
245 intr_tx->tx_submit(intr_tx);
246 async_tx_ack(intr_tx);
249 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
250 panic("%s: DMA_ERROR waiting for depend_tx\n",
258 * submit_disposition - while holding depend_tx->lock we must avoid submitting
259 * new operations to prevent a circular locking dependency with
260 * drivers that already hold a channel lock when calling
261 * async_tx_run_dependencies.
262 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
263 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
264 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
266 enum submit_disposition {
268 ASYNC_TX_CHANNEL_SWITCH,
269 ASYNC_TX_DIRECT_SUBMIT,
273 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
274 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
275 dma_async_tx_callback cb_fn, void *cb_param)
277 tx->callback = cb_fn;
278 tx->callback_param = cb_param;
281 enum submit_disposition s;
283 /* sanity check the dependency chain:
284 * 1/ if ack is already set then we cannot be sure
285 * we are referring to the correct operation
286 * 2/ dependencies are 1:1 i.e. two transactions can
287 * not depend on the same parent
289 BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next ||
292 /* the lock prevents async_tx_run_dependencies from missing
293 * the setting of ->next when ->parent != NULL
295 spin_lock_bh(&depend_tx->lock);
296 if (depend_tx->parent) {
297 /* we have a parent so we can not submit directly
298 * if we are staying on the same channel: append
299 * else: channel switch
301 if (depend_tx->chan == chan) {
302 tx->parent = depend_tx;
303 depend_tx->next = tx;
304 s = ASYNC_TX_SUBMITTED;
306 s = ASYNC_TX_CHANNEL_SWITCH;
308 /* we do not have a parent so we may be able to submit
309 * directly if we are staying on the same channel
311 if (depend_tx->chan == chan)
312 s = ASYNC_TX_DIRECT_SUBMIT;
314 s = ASYNC_TX_CHANNEL_SWITCH;
316 spin_unlock_bh(&depend_tx->lock);
319 case ASYNC_TX_SUBMITTED:
321 case ASYNC_TX_CHANNEL_SWITCH:
322 async_tx_channel_switch(depend_tx, tx);
324 case ASYNC_TX_DIRECT_SUBMIT:
334 if (flags & ASYNC_TX_ACK)
337 if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
338 async_tx_ack(depend_tx);
340 EXPORT_SYMBOL_GPL(async_tx_submit);
343 * async_trigger_callback - schedules the callback function to be run after
344 * any dependent operations have been completed.
345 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
346 * @depend_tx: 'callback' requires the completion of this transaction
347 * @cb_fn: function to call after depend_tx completes
348 * @cb_param: parameter to pass to the callback routine
350 struct dma_async_tx_descriptor *
351 async_trigger_callback(enum async_tx_flags flags,
352 struct dma_async_tx_descriptor *depend_tx,
353 dma_async_tx_callback cb_fn, void *cb_param)
355 struct dma_chan *chan;
356 struct dma_device *device;
357 struct dma_async_tx_descriptor *tx;
360 chan = depend_tx->chan;
361 device = chan->device;
363 /* see if we can schedule an interrupt
364 * otherwise poll for completion
366 if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
369 tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
374 pr_debug("%s: (async)\n", __func__);
376 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
378 pr_debug("%s: (sync)\n", __func__);
380 /* wait for any prerequisite operations */
381 async_tx_quiesce(&depend_tx);
383 async_tx_sync_epilog(cb_fn, cb_param);
388 EXPORT_SYMBOL_GPL(async_trigger_callback);
391 * async_tx_quiesce - ensure tx is complete and freeable upon return
392 * @tx - transaction to quiesce
394 void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
397 /* if ack is already set then we cannot be sure
398 * we are referring to the correct operation
400 BUG_ON(async_tx_test_ack(*tx));
401 if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
402 panic("DMA_ERROR waiting for transaction\n");
407 EXPORT_SYMBOL_GPL(async_tx_quiesce);
409 module_init(async_tx_init);
410 module_exit(async_tx_exit);
412 MODULE_AUTHOR("Intel Corporation");
413 MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
414 MODULE_LICENSE("GPL");