2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
24 #include <linux/device.h>
25 #include <linux/uio.h>
26 #include <linux/dma-mapping.h>
29 * typedef dma_cookie_t - an opaque DMA cookie
31 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
33 typedef s32 dma_cookie_t;
35 #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
38 * enum dma_status - DMA transaction status
39 * @DMA_SUCCESS: transaction completed successfully
40 * @DMA_IN_PROGRESS: transaction not yet processed
41 * @DMA_ERROR: transaction failed
50 * enum dma_transaction_type - DMA transaction types/indexes
52 * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is
53 * automatically set as dma devices are registered.
55 enum dma_transaction_type {
71 /* last transaction type for creation of the capabilities mask */
72 #define DMA_TX_TYPE_END (DMA_SLAVE + 1)
76 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
77 * control completion, and communicate status.
78 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
80 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
81 * acknowledges receipt, i.e. has has a chance to establish any dependency
83 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
84 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
85 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
86 * (if not set, do the source dma-unmapping as page)
87 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
88 * (if not set, do the destination dma-unmapping as page)
89 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
90 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
91 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
92 * sources that were the result of a previous operation, in the case of a PQ
93 * operation it continues the calculation with new sources
94 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
95 * on the result of this operation
98 DMA_PREP_INTERRUPT = (1 << 0),
99 DMA_CTRL_ACK = (1 << 1),
100 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
101 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
102 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
103 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
104 DMA_PREP_PQ_DISABLE_P = (1 << 6),
105 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
106 DMA_PREP_CONTINUE = (1 << 8),
107 DMA_PREP_FENCE = (1 << 9),
111 * enum sum_check_bits - bit position of pq_check_flags
113 enum sum_check_bits {
119 * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
120 * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
121 * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
123 enum sum_check_flags {
124 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
125 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
130 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
131 * See linux/cpumask.h
133 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
136 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
137 * @memcpy_count: transaction counter
138 * @bytes_transferred: byte counter
141 struct dma_chan_percpu {
143 unsigned long memcpy_count;
144 unsigned long bytes_transferred;
148 * struct dma_chan - devices supply DMA channels, clients use them
149 * @device: ptr to the dma device who supplies this channel, always !%NULL
150 * @cookie: last cookie value returned to client
151 * @chan_id: channel ID for sysfs
152 * @dev: class device for sysfs
153 * @device_node: used to add this to the device chan list
154 * @local: per-cpu pointer to a struct dma_chan_percpu
155 * @client-count: how many clients are using this channel
156 * @table_count: number of appearances in the mem-to-mem allocation table
157 * @private: private data for certain client-channel associations
160 struct dma_device *device;
165 struct dma_chan_dev *dev;
167 struct list_head device_node;
168 struct dma_chan_percpu *local;
175 * struct dma_chan_dev - relate sysfs device node to backing channel device
176 * @chan - driver channel device
177 * @device - sysfs device
178 * @dev_id - parent dma_device dev_id
179 * @idr_ref - reference count to gate release of dma_device dev_id
181 struct dma_chan_dev {
182 struct dma_chan *chan;
183 struct device device;
188 static inline const char *dma_chan_name(struct dma_chan *chan)
190 return dev_name(&chan->dev->device);
193 void dma_chan_cleanup(struct kref *kref);
196 * typedef dma_filter_fn - callback filter for dma_request_channel
197 * @chan: channel to be reviewed
198 * @filter_param: opaque parameter passed through dma_request_channel
200 * When this optional parameter is specified in a call to dma_request_channel a
201 * suitable channel is passed to this routine for further dispositioning before
202 * being returned. Where 'suitable' indicates a non-busy channel that
203 * satisfies the given capability mask. It returns 'true' to indicate that the
204 * channel is suitable.
206 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
208 typedef void (*dma_async_tx_callback)(void *dma_async_param);
210 * struct dma_async_tx_descriptor - async transaction descriptor
211 * ---dma generic offload fields---
212 * @cookie: tracking cookie for this transaction, set to -EBUSY if
213 * this tx is sitting on a dependency list
214 * @flags: flags to augment operation preparation, control completion, and
216 * @phys: physical address of the descriptor
217 * @tx_list: driver common field for operations that require multiple
219 * @chan: target channel for this operation
220 * @tx_submit: set the prepared descriptor(s) to be executed by the engine
221 * @callback: routine to call after this operation is complete
222 * @callback_param: general parameter to pass to the callback routine
223 * ---async_tx api specific fields---
224 * @next: at completion submit this descriptor
225 * @parent: pointer to the next level up in the dependency chain
226 * @lock: protect the parent and next pointers
228 struct dma_async_tx_descriptor {
230 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
232 struct list_head tx_list;
233 struct dma_chan *chan;
234 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
235 dma_async_tx_callback callback;
236 void *callback_param;
237 struct dma_async_tx_descriptor *next;
238 struct dma_async_tx_descriptor *parent;
243 * struct dma_device - info on the entity supplying DMA services
244 * @chancnt: how many DMA channels are supported
245 * @privatecnt: how many DMA channels are requested by dma_request_channel
246 * @channels: the list of struct dma_chan
247 * @global_node: list_head for global dma_device_list
248 * @cap_mask: one or more dma_capability flags
249 * @max_xor: maximum number of xor sources, 0 if no capability
250 * @max_pq: maximum number of PQ sources and PQ-continue capability
251 * @dev_id: unique device ID
252 * @dev: struct device reference for dma mapping api
253 * @device_alloc_chan_resources: allocate resources and return the
254 * number of allocated descriptors
255 * @device_free_chan_resources: release DMA channel's resources
256 * @device_prep_dma_memcpy: prepares a memcpy operation
257 * @device_prep_dma_xor: prepares a xor operation
258 * @device_prep_dma_xor_val: prepares a xor validation operation
259 * @device_prep_dma_pq: prepares a pq operation
260 * @device_prep_dma_pq_val: prepares a pqzero_sum operation
261 * @device_prep_dma_memset: prepares a memset operation
262 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
263 * @device_prep_slave_sg: prepares a slave dma operation
264 * @device_terminate_all: terminate all pending operations
265 * @device_is_tx_complete: poll for transaction completion
266 * @device_issue_pending: push pending transactions to hardware
270 unsigned int chancnt;
271 unsigned int privatecnt;
272 struct list_head channels;
273 struct list_head global_node;
274 dma_cap_mask_t cap_mask;
275 unsigned short max_xor;
276 unsigned short max_pq;
277 #define DMA_HAS_PQ_CONTINUE (1 << 15)
282 int (*device_alloc_chan_resources)(struct dma_chan *chan);
283 void (*device_free_chan_resources)(struct dma_chan *chan);
285 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
286 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
287 size_t len, unsigned long flags);
288 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
289 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
290 unsigned int src_cnt, size_t len, unsigned long flags);
291 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
292 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
293 size_t len, enum sum_check_flags *result, unsigned long flags);
294 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
295 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
296 unsigned int src_cnt, const unsigned char *scf,
297 size_t len, unsigned long flags);
298 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
299 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
300 unsigned int src_cnt, const unsigned char *scf, size_t len,
301 enum sum_check_flags *pqres, unsigned long flags);
302 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
303 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
304 unsigned long flags);
305 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
306 struct dma_chan *chan, unsigned long flags);
308 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
309 struct dma_chan *chan, struct scatterlist *sgl,
310 unsigned int sg_len, enum dma_data_direction direction,
311 unsigned long flags);
312 void (*device_terminate_all)(struct dma_chan *chan);
314 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
315 dma_cookie_t cookie, dma_cookie_t *last,
317 void (*device_issue_pending)(struct dma_chan *chan);
321 dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
325 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
328 static inline bool dmaf_continue(enum dma_ctrl_flags flags)
330 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
333 static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
335 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
337 return (flags & mask) == mask;
340 static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
342 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
345 static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
347 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
350 /* dma_maxpq - reduce maxpq in the face of continued operations
351 * @dma - dma device with PQ capability
352 * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
354 * When an engine does not support native continuation we need 3 extra
355 * source slots to reuse P and Q with the following coefficients:
356 * 1/ {00} * P : remove P from Q', but use it as a source for P'
357 * 2/ {01} * Q : use Q to continue Q' calculation
358 * 3/ {00} * Q : subtract Q from P' to cancel (2)
360 * In the case where P is disabled we only need 1 extra source:
361 * 1/ {01} * Q : use Q to continue Q' calculation
363 static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
365 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
366 return dma_dev_to_maxpq(dma);
367 else if (dmaf_p_disabled_continue(flags))
368 return dma_dev_to_maxpq(dma) - 1;
369 else if (dmaf_continue(flags))
370 return dma_dev_to_maxpq(dma) - 3;
374 /* --- public DMA engine API --- */
376 #ifdef CONFIG_DMA_ENGINE
377 void dmaengine_get(void);
378 void dmaengine_put(void);
380 static inline void dmaengine_get(void)
383 static inline void dmaengine_put(void)
388 #ifdef CONFIG_NET_DMA
389 #define net_dmaengine_get() dmaengine_get()
390 #define net_dmaengine_put() dmaengine_put()
392 static inline void net_dmaengine_get(void)
395 static inline void net_dmaengine_put(void)
400 #ifdef CONFIG_ASYNC_TX_DMA
401 #define async_dmaengine_get() dmaengine_get()
402 #define async_dmaengine_put() dmaengine_put()
403 #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
404 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
406 #define async_dma_find_channel(type) dma_find_channel(type)
407 #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */
409 static inline void async_dmaengine_get(void)
412 static inline void async_dmaengine_put(void)
415 static inline struct dma_chan *
416 async_dma_find_channel(enum dma_transaction_type type)
420 #endif /* CONFIG_ASYNC_TX_DMA */
422 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
423 void *dest, void *src, size_t len);
424 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
425 struct page *page, unsigned int offset, void *kdata, size_t len);
426 dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
427 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
428 unsigned int src_off, size_t len);
429 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
430 struct dma_chan *chan);
432 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
434 tx->flags |= DMA_CTRL_ACK;
437 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
439 tx->flags &= ~DMA_CTRL_ACK;
442 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
444 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
447 #define first_dma_cap(mask) __first_dma_cap(&(mask))
448 static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
450 return min_t(int, DMA_TX_TYPE_END,
451 find_first_bit(srcp->bits, DMA_TX_TYPE_END));
454 #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
455 static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
457 return min_t(int, DMA_TX_TYPE_END,
458 find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
461 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
463 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
465 set_bit(tx_type, dstp->bits);
468 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
470 __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
472 clear_bit(tx_type, dstp->bits);
475 #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
476 static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
478 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
481 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
483 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
485 return test_bit(tx_type, srcp->bits);
488 #define for_each_dma_cap_mask(cap, mask) \
489 for ((cap) = first_dma_cap(mask); \
490 (cap) < DMA_TX_TYPE_END; \
491 (cap) = next_dma_cap((cap), (mask)))
494 * dma_async_issue_pending - flush pending transactions to HW
495 * @chan: target DMA channel
497 * This allows drivers to push copies to HW in batches,
498 * reducing MMIO writes where possible.
500 static inline void dma_async_issue_pending(struct dma_chan *chan)
502 chan->device->device_issue_pending(chan);
505 #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
508 * dma_async_is_tx_complete - poll for transaction completion
510 * @cookie: transaction identifier to check status of
511 * @last: returns last completed cookie, can be NULL
512 * @used: returns last issued cookie, can be NULL
514 * If @last and @used are passed in, upon return they reflect the driver
515 * internal state and can be used with dma_async_is_complete() to check
516 * the status of multiple cookies without re-checking hardware state.
518 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
519 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
521 return chan->device->device_is_tx_complete(chan, cookie, last, used);
524 #define dma_async_memcpy_complete(chan, cookie, last, used)\
525 dma_async_is_tx_complete(chan, cookie, last, used)
528 * dma_async_is_complete - test a cookie against chan state
529 * @cookie: transaction identifier to test status of
530 * @last_complete: last know completed transaction
531 * @last_used: last cookie value handed out
533 * dma_async_is_complete() is used in dma_async_memcpy_complete()
534 * the test logic is separated for lightweight testing of multiple cookies
536 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
537 dma_cookie_t last_complete, dma_cookie_t last_used)
539 if (last_complete <= last_used) {
540 if ((cookie <= last_complete) || (cookie > last_used))
543 if ((cookie <= last_complete) && (cookie > last_used))
546 return DMA_IN_PROGRESS;
549 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
550 #ifdef CONFIG_DMA_ENGINE
551 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
552 void dma_issue_pending_all(void);
554 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
558 static inline void dma_issue_pending_all(void)
564 /* --- DMA device --- */
566 int dma_async_device_register(struct dma_device *device);
567 void dma_async_device_unregister(struct dma_device *device);
568 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
569 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
570 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
571 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
572 void dma_release_channel(struct dma_chan *chan);
574 /* --- Helper iov-locking functions --- */
576 struct dma_page_list {
577 char __user *base_address;
582 struct dma_pinned_list {
584 struct dma_page_list page_list[0];
587 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
588 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
590 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
591 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
592 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
593 struct dma_pinned_list *pinned_list, struct page *page,
594 unsigned int offset, size_t len);
596 #endif /* DMAENGINE_H */