2 * offload engine driver for the Intel Xscale series of i/o processors
3 * Copyright © 2006, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * This driver supports the asynchrounous DMA copy and RAID engines available
22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/async_tx.h>
28 #include <linux/delay.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/spinlock.h>
31 #include <linux/interrupt.h>
32 #include <linux/platform_device.h>
33 #include <linux/memory.h>
34 #include <linux/ioport.h>
36 #include <asm/arch/adma.h>
38 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
39 #define to_iop_adma_device(dev) \
40 container_of(dev, struct iop_adma_device, common)
41 #define tx_to_iop_adma_slot(tx) \
42 container_of(tx, struct iop_adma_desc_slot, async_tx)
45 * iop_adma_free_slots - flags descriptor slots for reuse
47 * Caller must hold &iop_chan->lock while calling this function
49 static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
51 int stride = slot->slots_per_op;
54 slot->slots_per_op = 0;
55 slot = list_entry(slot->slot_node.next,
56 struct iop_adma_desc_slot,
62 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
63 struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
65 BUG_ON(desc->async_tx.cookie < 0);
66 if (desc->async_tx.cookie > 0) {
67 cookie = desc->async_tx.cookie;
68 desc->async_tx.cookie = 0;
70 /* call the callback (must not sleep or submit new
71 * operations to this channel)
73 if (desc->async_tx.callback)
74 desc->async_tx.callback(
75 desc->async_tx.callback_param);
77 /* unmap dma addresses
78 * (unmap_single vs unmap_page?)
80 if (desc->group_head && desc->unmap_len) {
81 struct iop_adma_desc_slot *unmap = desc->group_head;
83 &iop_chan->device->pdev->dev;
84 u32 len = unmap->unmap_len;
85 u32 src_cnt = unmap->unmap_src_cnt;
86 dma_addr_t addr = iop_desc_get_dest_addr(unmap,
89 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
91 addr = iop_desc_get_src_addr(unmap,
94 dma_unmap_page(dev, addr, len,
97 desc->group_head = NULL;
101 /* run dependent operations */
102 async_tx_run_dependencies(&desc->async_tx);
108 iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
109 struct iop_adma_chan *iop_chan)
111 /* the client is allowed to attach dependent operations
114 if (!desc->async_tx.ack)
117 /* leave the last descriptor in the chain
118 * so we can append to it
120 if (desc->chain_node.next == &iop_chan->chain)
123 dev_dbg(iop_chan->device->common.dev,
124 "\tfree slot: %d slots_per_op: %d\n",
125 desc->idx, desc->slots_per_op);
127 list_del(&desc->chain_node);
128 iop_adma_free_slots(desc);
133 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
135 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
136 dma_cookie_t cookie = 0;
137 u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
138 int busy = iop_chan_is_busy(iop_chan);
139 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
141 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
142 /* free completed slots from the chain starting with
143 * the oldest descriptor
145 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
147 pr_debug("\tcookie: %d slot: %d busy: %d "
148 "this_desc: %#x next_desc: %#x ack: %d\n",
149 iter->async_tx.cookie, iter->idx, busy,
150 iter->async_tx.phys, iop_desc_get_next_desc(iter),
153 prefetch(&_iter->async_tx);
155 /* do not advance past the current descriptor loaded into the
156 * hardware channel, subsequent descriptors are either in
157 * process or have not been submitted
162 /* stop the search if we reach the current descriptor and the
163 * channel is busy, or if it appears that the current descriptor
164 * needs to be re-read (i.e. has been appended to)
166 if (iter->async_tx.phys == current_desc) {
167 BUG_ON(seen_current++);
168 if (busy || iop_desc_get_next_desc(iter))
172 /* detect the start of a group transaction */
173 if (!slot_cnt && !slots_per_op) {
174 slot_cnt = iter->slot_cnt;
175 slots_per_op = iter->slots_per_op;
176 if (slot_cnt <= slots_per_op) {
183 pr_debug("\tgroup++\n");
186 slot_cnt -= slots_per_op;
189 /* all the members of a group are complete */
190 if (slots_per_op != 0 && slot_cnt == 0) {
191 struct iop_adma_desc_slot *grp_iter, *_grp_iter;
192 int end_of_chain = 0;
193 pr_debug("\tgroup end\n");
195 /* collect the total results */
196 if (grp_start->xor_check_result) {
197 u32 zero_sum_result = 0;
198 slot_cnt = grp_start->slot_cnt;
199 grp_iter = grp_start;
201 list_for_each_entry_from(grp_iter,
202 &iop_chan->chain, chain_node) {
204 iop_desc_get_zero_result(grp_iter);
205 pr_debug("\titer%d result: %d\n",
206 grp_iter->idx, zero_sum_result);
207 slot_cnt -= slots_per_op;
211 pr_debug("\tgrp_start->xor_check_result: %p\n",
212 grp_start->xor_check_result);
213 *grp_start->xor_check_result = zero_sum_result;
216 /* clean up the group */
217 slot_cnt = grp_start->slot_cnt;
218 grp_iter = grp_start;
219 list_for_each_entry_safe_from(grp_iter, _grp_iter,
220 &iop_chan->chain, chain_node) {
221 cookie = iop_adma_run_tx_complete_actions(
222 grp_iter, iop_chan, cookie);
224 slot_cnt -= slots_per_op;
225 end_of_chain = iop_adma_clean_slot(grp_iter,
228 if (slot_cnt == 0 || end_of_chain)
232 /* the group should be complete at this point */
241 } else if (slots_per_op) /* wait for group completion */
244 /* write back zero sum results (single descriptor case) */
245 if (iter->xor_check_result && iter->async_tx.cookie)
246 *iter->xor_check_result =
247 iop_desc_get_zero_result(iter);
249 cookie = iop_adma_run_tx_complete_actions(
250 iter, iop_chan, cookie);
252 if (iop_adma_clean_slot(iter, iop_chan))
256 BUG_ON(!seen_current);
259 iop_chan->completed_cookie = cookie;
260 pr_debug("\tcompleted cookie %d\n", cookie);
265 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
267 spin_lock_bh(&iop_chan->lock);
268 __iop_adma_slot_cleanup(iop_chan);
269 spin_unlock_bh(&iop_chan->lock);
272 static void iop_adma_tasklet(unsigned long data)
274 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
276 spin_lock(&iop_chan->lock);
277 __iop_adma_slot_cleanup(iop_chan);
278 spin_unlock(&iop_chan->lock);
281 static struct iop_adma_desc_slot *
282 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
285 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
287 int slots_found, retry = 0;
289 /* start search from the last allocated descrtiptor
290 * if a contiguous allocation can not be found start searching
291 * from the beginning of the list
296 iter = iop_chan->last_used;
298 iter = list_entry(&iop_chan->all_slots,
299 struct iop_adma_desc_slot,
302 list_for_each_entry_safe_continue(
303 iter, _iter, &iop_chan->all_slots, slot_node) {
305 prefetch(&_iter->async_tx);
306 if (iter->slots_per_op) {
307 /* give up after finding the first busy slot
308 * on the second pass through the list
317 /* start the allocation if the slot is correctly aligned */
318 if (!slots_found++) {
319 if (iop_desc_is_aligned(iter, slots_per_op))
327 if (slots_found == num_slots) {
328 struct iop_adma_desc_slot *alloc_tail = NULL;
329 struct iop_adma_desc_slot *last_used = NULL;
333 dev_dbg(iop_chan->device->common.dev,
334 "allocated slot: %d "
335 "(desc %p phys: %#x) slots_per_op %d\n",
336 iter->idx, iter->hw_desc,
337 iter->async_tx.phys, slots_per_op);
339 /* pre-ack all but the last descriptor */
340 if (num_slots != slots_per_op)
341 iter->async_tx.ack = 1;
343 iter->async_tx.ack = 0;
345 list_add_tail(&iter->chain_node, &chain);
347 iter->async_tx.cookie = 0;
348 iter->slot_cnt = num_slots;
349 iter->xor_check_result = NULL;
350 for (i = 0; i < slots_per_op; i++) {
351 iter->slots_per_op = slots_per_op - i;
353 iter = list_entry(iter->slot_node.next,
354 struct iop_adma_desc_slot,
357 num_slots -= slots_per_op;
359 alloc_tail->group_head = alloc_start;
360 alloc_tail->async_tx.cookie = -EBUSY;
361 list_splice(&chain, &alloc_tail->async_tx.tx_list);
362 iop_chan->last_used = last_used;
363 iop_desc_clear_next_desc(alloc_start);
364 iop_desc_clear_next_desc(alloc_tail);
371 /* try to free some slots if the allocation fails */
372 tasklet_schedule(&iop_chan->irq_tasklet);
378 iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
379 struct iop_adma_desc_slot *desc)
381 dma_cookie_t cookie = iop_chan->common.cookie;
385 iop_chan->common.cookie = desc->async_tx.cookie = cookie;
389 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
391 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
394 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
395 iop_chan->pending = 0;
396 iop_chan_append(iop_chan);
401 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
403 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
404 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
405 struct iop_adma_desc_slot *grp_start, *old_chain_tail;
410 grp_start = sw_desc->group_head;
411 slot_cnt = grp_start->slot_cnt;
412 slots_per_op = grp_start->slots_per_op;
414 spin_lock_bh(&iop_chan->lock);
415 cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
417 old_chain_tail = list_entry(iop_chan->chain.prev,
418 struct iop_adma_desc_slot, chain_node);
419 list_splice_init(&sw_desc->async_tx.tx_list,
420 &old_chain_tail->chain_node);
422 /* fix up the hardware chain */
423 iop_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
425 /* 1/ don't add pre-chained descriptors
426 * 2/ dummy read to flush next_desc write
428 BUG_ON(iop_desc_get_next_desc(sw_desc));
430 /* increment the pending count by the number of slots
431 * memcpy operations have a 1:1 (slot:operation) relation
432 * other operations are heavier and will pop the threshold
435 iop_chan->pending += slot_cnt;
436 iop_adma_check_threshold(iop_chan);
437 spin_unlock_bh(&iop_chan->lock);
439 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
440 __func__, sw_desc->async_tx.cookie, sw_desc->idx);
445 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
446 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
448 /* returns the number of allocated descriptors */
449 static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
453 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
454 struct iop_adma_desc_slot *slot = NULL;
455 int init = iop_chan->slots_allocated ? 0 : 1;
456 struct iop_adma_platform_data *plat_data =
457 iop_chan->device->pdev->dev.platform_data;
458 int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
460 /* Allocate descriptor slots */
462 idx = iop_chan->slots_allocated;
463 if (idx == num_descs_in_pool)
466 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
468 printk(KERN_INFO "IOP ADMA Channel only initialized"
469 " %d descriptor slots", idx);
472 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
473 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
475 dma_async_tx_descriptor_init(&slot->async_tx, chan);
476 slot->async_tx.tx_submit = iop_adma_tx_submit;
477 INIT_LIST_HEAD(&slot->chain_node);
478 INIT_LIST_HEAD(&slot->slot_node);
479 INIT_LIST_HEAD(&slot->async_tx.tx_list);
480 hw_desc = (char *) iop_chan->device->dma_desc_pool;
481 slot->async_tx.phys =
482 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
485 spin_lock_bh(&iop_chan->lock);
486 iop_chan->slots_allocated++;
487 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
488 spin_unlock_bh(&iop_chan->lock);
489 } while (iop_chan->slots_allocated < num_descs_in_pool);
491 if (idx && !iop_chan->last_used)
492 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
493 struct iop_adma_desc_slot,
496 dev_dbg(iop_chan->device->common.dev,
497 "allocated %d descriptor slots last_used: %p\n",
498 iop_chan->slots_allocated, iop_chan->last_used);
500 /* initialize the channel and the chain with a null operation */
502 if (dma_has_cap(DMA_MEMCPY,
503 iop_chan->device->common.cap_mask))
504 iop_chan_start_null_memcpy(iop_chan);
505 else if (dma_has_cap(DMA_XOR,
506 iop_chan->device->common.cap_mask))
507 iop_chan_start_null_xor(iop_chan);
512 return (idx > 0) ? idx : -ENOMEM;
515 static struct dma_async_tx_descriptor *
516 iop_adma_prep_dma_interrupt(struct dma_chan *chan)
518 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
519 struct iop_adma_desc_slot *sw_desc, *grp_start;
520 int slot_cnt, slots_per_op;
522 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
524 spin_lock_bh(&iop_chan->lock);
525 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
526 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
528 grp_start = sw_desc->group_head;
529 iop_desc_init_interrupt(grp_start, iop_chan);
530 grp_start->unmap_len = 0;
532 spin_unlock_bh(&iop_chan->lock);
534 return sw_desc ? &sw_desc->async_tx : NULL;
537 static struct dma_async_tx_descriptor *
538 iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
539 dma_addr_t dma_src, size_t len, unsigned long flags)
541 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
542 struct iop_adma_desc_slot *sw_desc, *grp_start;
543 int slot_cnt, slots_per_op;
547 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
549 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
552 spin_lock_bh(&iop_chan->lock);
553 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
554 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
556 grp_start = sw_desc->group_head;
557 iop_desc_init_memcpy(grp_start, flags);
558 iop_desc_set_byte_count(grp_start, iop_chan, len);
559 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
560 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
561 sw_desc->unmap_src_cnt = 1;
562 sw_desc->unmap_len = len;
564 spin_unlock_bh(&iop_chan->lock);
566 return sw_desc ? &sw_desc->async_tx : NULL;
569 static struct dma_async_tx_descriptor *
570 iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
571 int value, size_t len, unsigned long flags)
573 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
574 struct iop_adma_desc_slot *sw_desc, *grp_start;
575 int slot_cnt, slots_per_op;
579 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
581 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
584 spin_lock_bh(&iop_chan->lock);
585 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
586 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
588 grp_start = sw_desc->group_head;
589 iop_desc_init_memset(grp_start, flags);
590 iop_desc_set_byte_count(grp_start, iop_chan, len);
591 iop_desc_set_block_fill_val(grp_start, value);
592 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
593 sw_desc->unmap_src_cnt = 1;
594 sw_desc->unmap_len = len;
596 spin_unlock_bh(&iop_chan->lock);
598 return sw_desc ? &sw_desc->async_tx : NULL;
601 static struct dma_async_tx_descriptor *
602 iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
603 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
606 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
607 struct iop_adma_desc_slot *sw_desc, *grp_start;
608 int slot_cnt, slots_per_op;
612 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
614 dev_dbg(iop_chan->device->common.dev,
615 "%s src_cnt: %d len: %u flags: %lx\n",
616 __func__, src_cnt, len, flags);
618 spin_lock_bh(&iop_chan->lock);
619 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
620 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
622 grp_start = sw_desc->group_head;
623 iop_desc_init_xor(grp_start, src_cnt, flags);
624 iop_desc_set_byte_count(grp_start, iop_chan, len);
625 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
626 sw_desc->unmap_src_cnt = src_cnt;
627 sw_desc->unmap_len = len;
629 iop_desc_set_xor_src_addr(grp_start, src_cnt,
632 spin_unlock_bh(&iop_chan->lock);
634 return sw_desc ? &sw_desc->async_tx : NULL;
637 static struct dma_async_tx_descriptor *
638 iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
639 unsigned int src_cnt, size_t len, u32 *result,
642 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
643 struct iop_adma_desc_slot *sw_desc, *grp_start;
644 int slot_cnt, slots_per_op;
649 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
650 __func__, src_cnt, len);
652 spin_lock_bh(&iop_chan->lock);
653 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
654 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
656 grp_start = sw_desc->group_head;
657 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
658 iop_desc_set_zero_sum_byte_count(grp_start, len);
659 grp_start->xor_check_result = result;
660 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
661 __func__, grp_start->xor_check_result);
662 sw_desc->unmap_src_cnt = src_cnt;
663 sw_desc->unmap_len = len;
665 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
668 spin_unlock_bh(&iop_chan->lock);
670 return sw_desc ? &sw_desc->async_tx : NULL;
673 static void iop_adma_free_chan_resources(struct dma_chan *chan)
675 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
676 struct iop_adma_desc_slot *iter, *_iter;
677 int in_use_descs = 0;
679 iop_adma_slot_cleanup(iop_chan);
681 spin_lock_bh(&iop_chan->lock);
682 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
685 list_del(&iter->chain_node);
687 list_for_each_entry_safe_reverse(
688 iter, _iter, &iop_chan->all_slots, slot_node) {
689 list_del(&iter->slot_node);
691 iop_chan->slots_allocated--;
693 iop_chan->last_used = NULL;
695 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
696 __func__, iop_chan->slots_allocated);
697 spin_unlock_bh(&iop_chan->lock);
699 /* one is ok since we left it on there on purpose */
700 if (in_use_descs > 1)
701 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
706 * iop_adma_is_complete - poll the status of an ADMA transaction
707 * @chan: ADMA channel handle
708 * @cookie: ADMA transaction identifier
710 static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
715 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
716 dma_cookie_t last_used;
717 dma_cookie_t last_complete;
720 last_used = chan->cookie;
721 last_complete = iop_chan->completed_cookie;
724 *done = last_complete;
728 ret = dma_async_is_complete(cookie, last_complete, last_used);
729 if (ret == DMA_SUCCESS)
732 iop_adma_slot_cleanup(iop_chan);
734 last_used = chan->cookie;
735 last_complete = iop_chan->completed_cookie;
738 *done = last_complete;
742 return dma_async_is_complete(cookie, last_complete, last_used);
745 static irqreturn_t iop_adma_eot_handler(int irq, void *data)
747 struct iop_adma_chan *chan = data;
749 dev_dbg(chan->device->common.dev, "%s\n", __func__);
751 tasklet_schedule(&chan->irq_tasklet);
753 iop_adma_device_clear_eot_status(chan);
758 static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
760 struct iop_adma_chan *chan = data;
762 dev_dbg(chan->device->common.dev, "%s\n", __func__);
764 tasklet_schedule(&chan->irq_tasklet);
766 iop_adma_device_clear_eoc_status(chan);
771 static irqreturn_t iop_adma_err_handler(int irq, void *data)
773 struct iop_adma_chan *chan = data;
774 unsigned long status = iop_chan_get_status(chan);
776 dev_printk(KERN_ERR, chan->device->common.dev,
777 "error ( %s%s%s%s%s%s%s)\n",
778 iop_is_err_int_parity(status, chan) ? "int_parity " : "",
779 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
780 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
781 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
782 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
783 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
784 iop_is_err_split_tx(status, chan) ? "split_tx " : "");
786 iop_adma_device_clear_err_status(chan);
793 static void iop_adma_issue_pending(struct dma_chan *chan)
795 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
797 if (iop_chan->pending) {
798 iop_chan->pending = 0;
799 iop_chan_append(iop_chan);
804 * Perform a transaction to verify the HW works.
806 #define IOP_ADMA_TEST_SIZE 2000
808 static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
812 dma_addr_t src_dma, dest_dma;
813 struct dma_chan *dma_chan;
815 struct dma_async_tx_descriptor *tx;
817 struct iop_adma_chan *iop_chan;
819 dev_dbg(device->common.dev, "%s\n", __func__);
821 src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
824 dest = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
830 /* Fill in src buffer */
831 for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
832 ((u8 *) src)[i] = (u8)i;
834 memset(dest, 0, IOP_ADMA_TEST_SIZE);
836 /* Start copy, using first DMA channel */
837 dma_chan = container_of(device->common.channels.next,
840 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
845 dest_dma = dma_map_single(dma_chan->device->dev, dest,
846 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
847 src_dma = dma_map_single(dma_chan->device->dev, src,
848 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
849 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
850 IOP_ADMA_TEST_SIZE, 1);
852 cookie = iop_adma_tx_submit(tx);
853 iop_adma_issue_pending(dma_chan);
857 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
859 dev_printk(KERN_ERR, dma_chan->device->dev,
860 "Self-test copy timed out, disabling\n");
865 iop_chan = to_iop_adma_chan(dma_chan);
866 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
867 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
868 if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
869 dev_printk(KERN_ERR, dma_chan->device->dev,
870 "Self-test copy failed compare, disabling\n");
876 iop_adma_free_chan_resources(dma_chan);
883 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
885 iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
889 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
890 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
891 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
892 dma_addr_t dma_addr, dest_dma;
893 struct dma_async_tx_descriptor *tx;
894 struct dma_chan *dma_chan;
900 struct iop_adma_chan *iop_chan;
902 dev_dbg(device->common.dev, "%s\n", __func__);
904 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
905 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
906 if (!xor_srcs[src_idx])
908 __free_page(xor_srcs[src_idx]);
913 dest = alloc_page(GFP_KERNEL);
916 __free_page(xor_srcs[src_idx]);
920 /* Fill in src buffers */
921 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
922 u8 *ptr = page_address(xor_srcs[src_idx]);
923 for (i = 0; i < PAGE_SIZE; i++)
924 ptr[i] = (1 << src_idx);
927 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
928 cmp_byte ^= (u8) (1 << src_idx);
930 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
931 (cmp_byte << 8) | cmp_byte;
933 memset(page_address(dest), 0, PAGE_SIZE);
935 dma_chan = container_of(device->common.channels.next,
938 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
944 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
945 PAGE_SIZE, DMA_FROM_DEVICE);
946 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
947 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
948 0, PAGE_SIZE, DMA_TO_DEVICE);
949 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
950 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1);
952 cookie = iop_adma_tx_submit(tx);
953 iop_adma_issue_pending(dma_chan);
957 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
959 dev_printk(KERN_ERR, dma_chan->device->dev,
960 "Self-test xor timed out, disabling\n");
965 iop_chan = to_iop_adma_chan(dma_chan);
966 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
967 PAGE_SIZE, DMA_FROM_DEVICE);
968 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
969 u32 *ptr = page_address(dest);
970 if (ptr[i] != cmp_word) {
971 dev_printk(KERN_ERR, dma_chan->device->dev,
972 "Self-test xor failed compare, disabling\n");
977 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
978 PAGE_SIZE, DMA_TO_DEVICE);
980 /* skip zero sum if the capability is not present */
981 if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask))
984 /* zero sum the sources with the destintation page */
985 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
986 zero_sum_srcs[i] = xor_srcs[i];
987 zero_sum_srcs[i] = dest;
991 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
992 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
993 zero_sum_srcs[i], 0, PAGE_SIZE,
995 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
996 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
997 &zero_sum_result, 1);
999 cookie = iop_adma_tx_submit(tx);
1000 iop_adma_issue_pending(dma_chan);
1004 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1005 dev_printk(KERN_ERR, dma_chan->device->dev,
1006 "Self-test zero sum timed out, disabling\n");
1008 goto free_resources;
1011 if (zero_sum_result != 0) {
1012 dev_printk(KERN_ERR, dma_chan->device->dev,
1013 "Self-test zero sum failed compare, disabling\n");
1015 goto free_resources;
1019 dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
1020 PAGE_SIZE, DMA_FROM_DEVICE);
1021 tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1);
1023 cookie = iop_adma_tx_submit(tx);
1024 iop_adma_issue_pending(dma_chan);
1028 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1029 dev_printk(KERN_ERR, dma_chan->device->dev,
1030 "Self-test memset timed out, disabling\n");
1032 goto free_resources;
1035 for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1036 u32 *ptr = page_address(dest);
1038 dev_printk(KERN_ERR, dma_chan->device->dev,
1039 "Self-test memset failed compare, disabling\n");
1041 goto free_resources;
1045 /* test for non-zero parity sum */
1046 zero_sum_result = 0;
1047 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1048 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1049 zero_sum_srcs[i], 0, PAGE_SIZE,
1051 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
1052 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1053 &zero_sum_result, 1);
1055 cookie = iop_adma_tx_submit(tx);
1056 iop_adma_issue_pending(dma_chan);
1060 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1061 dev_printk(KERN_ERR, dma_chan->device->dev,
1062 "Self-test non-zero sum timed out, disabling\n");
1064 goto free_resources;
1067 if (zero_sum_result != 1) {
1068 dev_printk(KERN_ERR, dma_chan->device->dev,
1069 "Self-test non-zero sum failed compare, disabling\n");
1071 goto free_resources;
1075 iop_adma_free_chan_resources(dma_chan);
1077 src_idx = IOP_ADMA_NUM_SRC_TEST;
1079 __free_page(xor_srcs[src_idx]);
1084 static int __devexit iop_adma_remove(struct platform_device *dev)
1086 struct iop_adma_device *device = platform_get_drvdata(dev);
1087 struct dma_chan *chan, *_chan;
1088 struct iop_adma_chan *iop_chan;
1090 struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
1092 dma_async_device_unregister(&device->common);
1094 for (i = 0; i < 3; i++) {
1096 irq = platform_get_irq(dev, i);
1097 free_irq(irq, device);
1100 dma_free_coherent(&dev->dev, plat_data->pool_size,
1101 device->dma_desc_pool_virt, device->dma_desc_pool);
1104 struct resource *res;
1105 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
1106 release_mem_region(res->start, res->end - res->start);
1109 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1111 iop_chan = to_iop_adma_chan(chan);
1112 list_del(&chan->device_node);
1120 static int __devinit iop_adma_probe(struct platform_device *pdev)
1122 struct resource *res;
1124 struct iop_adma_device *adev;
1125 struct iop_adma_chan *iop_chan;
1126 struct dma_device *dma_dev;
1127 struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
1129 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1133 if (!devm_request_mem_region(&pdev->dev, res->start,
1134 res->end - res->start, pdev->name))
1137 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1140 dma_dev = &adev->common;
1142 /* allocate coherent memory for hardware descriptors
1143 * note: writecombine gives slightly better performance, but
1144 * requires that we explicitly flush the writes
1146 if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1147 plat_data->pool_size,
1148 &adev->dma_desc_pool,
1149 GFP_KERNEL)) == NULL) {
1154 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
1155 __func__, adev->dma_desc_pool_virt,
1156 (void *) adev->dma_desc_pool);
1158 adev->id = plat_data->hw_id;
1160 /* discover transaction capabilites from the platform data */
1161 dma_dev->cap_mask = plat_data->cap_mask;
1164 platform_set_drvdata(pdev, adev);
1166 INIT_LIST_HEAD(&dma_dev->channels);
1168 /* set base routines */
1169 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1170 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1171 dma_dev->device_is_tx_complete = iop_adma_is_complete;
1172 dma_dev->device_issue_pending = iop_adma_issue_pending;
1173 dma_dev->dev = &pdev->dev;
1175 /* set prep routines based on capability */
1176 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1177 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1178 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1179 dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
1180 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1181 dma_dev->max_xor = iop_adma_get_max_xor();
1182 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1184 if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask))
1185 dma_dev->device_prep_dma_zero_sum =
1186 iop_adma_prep_dma_zero_sum;
1187 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1188 dma_dev->device_prep_dma_interrupt =
1189 iop_adma_prep_dma_interrupt;
1191 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1196 iop_chan->device = adev;
1198 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1199 res->end - res->start);
1200 if (!iop_chan->mmr_base) {
1202 goto err_free_iop_chan;
1204 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1207 /* clear errors before enabling interrupts */
1208 iop_adma_device_clear_err_status(iop_chan);
1210 for (i = 0; i < 3; i++) {
1211 irq_handler_t handler[] = { iop_adma_eot_handler,
1212 iop_adma_eoc_handler,
1213 iop_adma_err_handler };
1214 int irq = platform_get_irq(pdev, i);
1217 goto err_free_iop_chan;
1219 ret = devm_request_irq(&pdev->dev, irq,
1220 handler[i], 0, pdev->name, iop_chan);
1222 goto err_free_iop_chan;
1226 spin_lock_init(&iop_chan->lock);
1227 INIT_LIST_HEAD(&iop_chan->chain);
1228 INIT_LIST_HEAD(&iop_chan->all_slots);
1229 INIT_RCU_HEAD(&iop_chan->common.rcu);
1230 iop_chan->common.device = dma_dev;
1231 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1233 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1234 ret = iop_adma_memcpy_self_test(adev);
1235 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1237 goto err_free_iop_chan;
1240 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
1241 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
1242 ret = iop_adma_xor_zero_sum_self_test(adev);
1243 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1245 goto err_free_iop_chan;
1248 dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
1249 "( %s%s%s%s%s%s%s%s%s%s)\n",
1250 dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "",
1251 dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "",
1252 dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "",
1253 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1254 dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "",
1255 dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "",
1256 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1257 dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "",
1258 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1259 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1261 dma_async_device_register(dma_dev);
1267 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1268 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1275 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1277 struct iop_adma_desc_slot *sw_desc, *grp_start;
1278 dma_cookie_t cookie;
1279 int slot_cnt, slots_per_op;
1281 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1283 spin_lock_bh(&iop_chan->lock);
1284 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1285 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1287 grp_start = sw_desc->group_head;
1289 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
1290 sw_desc->async_tx.ack = 1;
1291 iop_desc_init_memcpy(grp_start, 0);
1292 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1293 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1294 iop_desc_set_memcpy_src_addr(grp_start, 0);
1296 cookie = iop_chan->common.cookie;
1301 /* initialize the completed cookie to be less than
1302 * the most recently used cookie
1304 iop_chan->completed_cookie = cookie - 1;
1305 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1307 /* channel should not be busy */
1308 BUG_ON(iop_chan_is_busy(iop_chan));
1310 /* clear any prior error-status bits */
1311 iop_adma_device_clear_err_status(iop_chan);
1313 /* disable operation */
1314 iop_chan_disable(iop_chan);
1316 /* set the descriptor address */
1317 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1319 /* 1/ don't add pre-chained descriptors
1320 * 2/ dummy read to flush next_desc write
1322 BUG_ON(iop_desc_get_next_desc(sw_desc));
1324 /* run the descriptor */
1325 iop_chan_enable(iop_chan);
1327 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1328 "failed to allocate null descriptor\n");
1329 spin_unlock_bh(&iop_chan->lock);
1332 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1334 struct iop_adma_desc_slot *sw_desc, *grp_start;
1335 dma_cookie_t cookie;
1336 int slot_cnt, slots_per_op;
1338 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1340 spin_lock_bh(&iop_chan->lock);
1341 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1342 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1344 grp_start = sw_desc->group_head;
1345 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
1346 sw_desc->async_tx.ack = 1;
1347 iop_desc_init_null_xor(grp_start, 2, 0);
1348 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1349 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1350 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1351 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1353 cookie = iop_chan->common.cookie;
1358 /* initialize the completed cookie to be less than
1359 * the most recently used cookie
1361 iop_chan->completed_cookie = cookie - 1;
1362 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1364 /* channel should not be busy */
1365 BUG_ON(iop_chan_is_busy(iop_chan));
1367 /* clear any prior error-status bits */
1368 iop_adma_device_clear_err_status(iop_chan);
1370 /* disable operation */
1371 iop_chan_disable(iop_chan);
1373 /* set the descriptor address */
1374 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1376 /* 1/ don't add pre-chained descriptors
1377 * 2/ dummy read to flush next_desc write
1379 BUG_ON(iop_desc_get_next_desc(sw_desc));
1381 /* run the descriptor */
1382 iop_chan_enable(iop_chan);
1384 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1385 "failed to allocate null descriptor\n");
1386 spin_unlock_bh(&iop_chan->lock);
1389 static struct platform_driver iop_adma_driver = {
1390 .probe = iop_adma_probe,
1391 .remove = iop_adma_remove,
1393 .owner = THIS_MODULE,
1398 static int __init iop_adma_init (void)
1400 return platform_driver_register(&iop_adma_driver);
1403 /* it's currently unsafe to unload this module */
1405 static void __exit iop_adma_exit (void)
1407 platform_driver_unregister(&iop_adma_driver);
1410 module_exit(iop_adma_exit);
1413 module_init(iop_adma_init);
1415 MODULE_AUTHOR("Intel Corporation");
1416 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1417 MODULE_LICENSE("GPL");