2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
38 #include "registers.h"
41 int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
46 /* internal functions */
47 static void ioat1_cleanup(struct ioat_dma_chan *ioat);
48 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
51 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
53 * @data: interrupt data
55 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
57 struct ioatdma_device *instance = data;
58 struct ioat_chan_common *chan;
59 unsigned long attnstatus;
63 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
65 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
68 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
69 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
73 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
74 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
75 chan = ioat_chan_by_index(instance, bit);
76 tasklet_schedule(&chan->cleanup_task);
79 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
84 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
86 * @data: interrupt data
88 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
90 struct ioat_chan_common *chan = data;
92 tasklet_schedule(&chan->cleanup_task);
97 static void ioat1_cleanup_tasklet(unsigned long data);
99 /* common channel initialization */
100 void ioat_init_channel(struct ioatdma_device *device,
101 struct ioat_chan_common *chan, int idx,
102 work_func_t work_fn, void (*tasklet)(unsigned long),
103 unsigned long tasklet_data)
105 struct dma_device *dma = &device->common;
107 chan->device = device;
108 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
109 INIT_DELAYED_WORK(&chan->work, work_fn);
110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma;
112 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan;
114 tasklet_init(&chan->cleanup_task, tasklet, tasklet_data);
115 tasklet_disable(&chan->cleanup_task);
118 static void ioat1_reset_part2(struct work_struct *work);
121 * ioat1_dma_enumerate_channels - find and initialize the device's channels
122 * @device: the device to be enumerated
124 static int ioat1_enumerate_channels(struct ioatdma_device *device)
129 struct ioat_dma_chan *ioat;
130 struct device *dev = &device->pdev->dev;
131 struct dma_device *dma = &device->common;
133 INIT_LIST_HEAD(&dma->channels);
134 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
135 dma->chancnt &= 0x1f; /* bits [4:0] valid */
136 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
137 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
138 dma->chancnt, ARRAY_SIZE(device->idx));
139 dma->chancnt = ARRAY_SIZE(device->idx);
141 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
142 xfercap_scale &= 0x1f; /* bits [4:0] valid */
143 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
144 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
146 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
147 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
150 for (i = 0; i < dma->chancnt; i++) {
151 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
155 ioat_init_channel(device, &ioat->base, i,
157 ioat1_cleanup_tasklet,
158 (unsigned long) ioat);
159 ioat->xfercap = xfercap;
160 spin_lock_init(&ioat->desc_lock);
161 INIT_LIST_HEAD(&ioat->free_desc);
162 INIT_LIST_HEAD(&ioat->used_desc);
169 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
171 * @chan: DMA channel handle
174 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
176 void __iomem *reg_base = ioat->base.reg_base;
178 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
179 __func__, ioat->pending);
181 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
184 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
186 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
188 if (ioat->pending > 0) {
189 spin_lock_bh(&ioat->desc_lock);
190 __ioat1_dma_memcpy_issue_pending(ioat);
191 spin_unlock_bh(&ioat->desc_lock);
196 * ioat1_reset_part2 - reinit the channel after a reset
198 static void ioat1_reset_part2(struct work_struct *work)
200 struct ioat_chan_common *chan;
201 struct ioat_dma_chan *ioat;
202 struct ioat_desc_sw *desc;
204 bool start_null = false;
206 chan = container_of(work, struct ioat_chan_common, work.work);
207 ioat = container_of(chan, struct ioat_dma_chan, base);
208 spin_lock_bh(&chan->cleanup_lock);
209 spin_lock_bh(&ioat->desc_lock);
211 *chan->completion = 0;
214 /* count the descriptors waiting */
216 if (ioat->used_desc.prev) {
217 desc = to_ioat_desc(ioat->used_desc.prev);
220 desc = to_ioat_desc(desc->node.next);
221 } while (&desc->node != ioat->used_desc.next);
226 * write the new starting descriptor address
227 * this puts channel engine into ARMED state
229 desc = to_ioat_desc(ioat->used_desc.prev);
230 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
231 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
232 writel(((u64) desc->txd.phys) >> 32,
233 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
235 writeb(IOAT_CHANCMD_START, chan->reg_base
236 + IOAT_CHANCMD_OFFSET(chan->device->version));
239 spin_unlock_bh(&ioat->desc_lock);
240 spin_unlock_bh(&chan->cleanup_lock);
242 dev_err(to_dev(chan),
243 "chan%d reset - %d descs waiting, %d total desc\n",
244 chan_num(chan), dmacount, ioat->desccount);
247 ioat1_dma_start_null_desc(ioat);
251 * ioat1_reset_channel - restart a channel
252 * @ioat: IOAT DMA channel handle
254 static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
256 struct ioat_chan_common *chan = &ioat->base;
257 void __iomem *reg_base = chan->reg_base;
258 u32 chansts, chanerr;
260 if (!ioat->used_desc.prev)
263 dev_dbg(to_dev(chan), "%s\n", __func__);
264 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
265 chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS;
267 dev_err(to_dev(chan),
268 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
269 chan_num(chan), chansts, chanerr);
270 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
274 * whack it upside the head with a reset
275 * and wait for things to settle out.
276 * force the pending count to a really big negative
277 * to make sure no one forces an issue_pending
278 * while we're waiting.
281 spin_lock_bh(&ioat->desc_lock);
282 ioat->pending = INT_MIN;
283 writeb(IOAT_CHANCMD_RESET,
284 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
285 spin_unlock_bh(&ioat->desc_lock);
287 /* schedule the 2nd half instead of sleeping a long time */
288 schedule_delayed_work(&chan->work, RESET_DELAY);
292 * ioat1_chan_watchdog - watch for stuck channels
294 static void ioat1_chan_watchdog(struct work_struct *work)
296 struct ioatdma_device *device =
297 container_of(work, struct ioatdma_device, work.work);
298 struct ioat_dma_chan *ioat;
299 struct ioat_chan_common *chan;
303 unsigned long compl_desc_addr_hw;
305 for (i = 0; i < device->common.chancnt; i++) {
306 chan = ioat_chan_by_index(device, i);
307 ioat = container_of(chan, struct ioat_dma_chan, base);
309 if (/* have we started processing anything yet */
310 chan->last_completion
311 /* have we completed any since last watchdog cycle? */
312 && (chan->last_completion == chan->watchdog_completion)
313 /* has TCP stuck on one cookie since last watchdog? */
314 && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie)
315 && (chan->watchdog_tcp_cookie != chan->completed_cookie)
316 /* is there something in the chain to be processed? */
317 /* CB1 chain always has at least the last one processed */
318 && (ioat->used_desc.prev != ioat->used_desc.next)
319 && ioat->pending == 0) {
322 * check CHANSTS register for completed
323 * descriptor address.
324 * if it is different than completion writeback,
326 * and it has changed since the last watchdog
327 * we can assume that channel
328 * is still working correctly
329 * and the problem is in completion writeback.
330 * update completion writeback
331 * with actual CHANSTS value
333 * try resetting the channel
336 /* we need to read the low address first as this
337 * causes the chipset to latch the upper bits
338 * for the subsequent read
340 completion_low = readl(chan->reg_base +
341 IOAT_CHANSTS_OFFSET_LOW(chan->device->version));
342 completion = readl(chan->reg_base +
343 IOAT_CHANSTS_OFFSET_HIGH(chan->device->version));
345 completion |= completion_low;
346 compl_desc_addr_hw = completion &
347 IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
349 if ((compl_desc_addr_hw != 0)
350 && (compl_desc_addr_hw != chan->watchdog_completion)
351 && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) {
352 chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
353 *chan->completion = completion;
355 ioat1_reset_channel(ioat);
356 chan->watchdog_completion = 0;
357 chan->last_compl_desc_addr_hw = 0;
360 chan->last_compl_desc_addr_hw = 0;
361 chan->watchdog_completion = chan->last_completion;
364 chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie;
367 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
370 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
372 struct dma_chan *c = tx->chan;
373 struct ioat_dma_chan *ioat = to_ioat_chan(c);
374 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
375 struct ioat_desc_sw *first;
376 struct ioat_desc_sw *chain_tail;
379 spin_lock_bh(&ioat->desc_lock);
380 /* cookie incr and addition to used_list must be atomic */
387 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
389 /* write address into NextDescriptor field of last desc in chain */
390 first = to_ioat_desc(tx->tx_list.next);
391 chain_tail = to_ioat_desc(ioat->used_desc.prev);
392 /* make descriptor updates globally visible before chaining */
394 chain_tail->hw->next = first->txd.phys;
395 list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
396 dump_desc_dbg(ioat, chain_tail);
397 dump_desc_dbg(ioat, first);
399 ioat->pending += desc->hw->tx_cnt;
400 if (ioat->pending >= ioat_pending_level)
401 __ioat1_dma_memcpy_issue_pending(ioat);
402 spin_unlock_bh(&ioat->desc_lock);
408 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
409 * @ioat: the channel supplying the memory pool for the descriptors
410 * @flags: allocation flags
412 static struct ioat_desc_sw *
413 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
415 struct ioat_dma_descriptor *desc;
416 struct ioat_desc_sw *desc_sw;
417 struct ioatdma_device *ioatdma_device;
420 ioatdma_device = ioat->base.device;
421 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
425 desc_sw = kzalloc(sizeof(*desc_sw), flags);
426 if (unlikely(!desc_sw)) {
427 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
431 memset(desc, 0, sizeof(*desc));
433 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
434 desc_sw->txd.tx_submit = ioat1_tx_submit;
436 desc_sw->txd.phys = phys;
437 set_desc_id(desc_sw, -1);
442 static int ioat_initial_desc_count = 256;
443 module_param(ioat_initial_desc_count, int, 0644);
444 MODULE_PARM_DESC(ioat_initial_desc_count,
445 "ioat1: initial descriptors per channel (default: 256)");
447 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
448 * @chan: the channel to be filled out
450 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
452 struct ioat_dma_chan *ioat = to_ioat_chan(c);
453 struct ioat_chan_common *chan = &ioat->base;
454 struct ioat_desc_sw *desc;
459 /* have we already been set up? */
460 if (!list_empty(&ioat->free_desc))
461 return ioat->desccount;
463 /* Setup register to interrupt and write completion status on error */
464 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
466 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
468 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
469 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
472 /* Allocate descriptors */
473 for (i = 0; i < ioat_initial_desc_count; i++) {
474 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
476 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
479 set_desc_id(desc, i);
480 list_add_tail(&desc->node, &tmp_list);
482 spin_lock_bh(&ioat->desc_lock);
484 list_splice(&tmp_list, &ioat->free_desc);
485 spin_unlock_bh(&ioat->desc_lock);
487 /* allocate a completion writeback area */
488 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
489 chan->completion = pci_pool_alloc(chan->device->completion_pool,
490 GFP_KERNEL, &chan->completion_dma);
491 memset(chan->completion, 0, sizeof(*chan->completion));
492 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
493 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
494 writel(((u64) chan->completion_dma) >> 32,
495 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
497 tasklet_enable(&chan->cleanup_task);
498 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
499 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
500 __func__, ioat->desccount);
501 return ioat->desccount;
505 * ioat1_dma_free_chan_resources - release all the descriptors
506 * @chan: the channel to be cleaned
508 static void ioat1_dma_free_chan_resources(struct dma_chan *c)
510 struct ioat_dma_chan *ioat = to_ioat_chan(c);
511 struct ioat_chan_common *chan = &ioat->base;
512 struct ioatdma_device *ioatdma_device = chan->device;
513 struct ioat_desc_sw *desc, *_desc;
514 int in_use_descs = 0;
516 /* Before freeing channel resources first check
517 * if they have been previously allocated for this channel.
519 if (ioat->desccount == 0)
522 tasklet_disable(&chan->cleanup_task);
525 /* Delay 100ms after reset to allow internal DMA logic to quiesce
526 * before removing DMA descriptor resources.
528 writeb(IOAT_CHANCMD_RESET,
529 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
532 spin_lock_bh(&ioat->desc_lock);
533 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
534 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
535 __func__, desc_id(desc));
536 dump_desc_dbg(ioat, desc);
538 list_del(&desc->node);
539 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
543 list_for_each_entry_safe(desc, _desc,
544 &ioat->free_desc, node) {
545 list_del(&desc->node);
546 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
550 spin_unlock_bh(&ioat->desc_lock);
552 pci_pool_free(ioatdma_device->completion_pool,
554 chan->completion_dma);
556 /* one is ok since we left it on there on purpose */
557 if (in_use_descs > 1)
558 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
561 chan->last_completion = 0;
562 chan->completion_dma = 0;
563 chan->watchdog_completion = 0;
564 chan->last_compl_desc_addr_hw = 0;
565 chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0;
571 * ioat1_dma_get_next_descriptor - return the next available descriptor
572 * @ioat: IOAT DMA channel handle
574 * Gets the next descriptor from the chain, and must be called with the
575 * channel's desc_lock held. Allocates more descriptors if the channel
578 static struct ioat_desc_sw *
579 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
581 struct ioat_desc_sw *new;
583 if (!list_empty(&ioat->free_desc)) {
584 new = to_ioat_desc(ioat->free_desc.next);
585 list_del(&new->node);
587 /* try to get another desc */
588 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
590 dev_err(to_dev(&ioat->base), "alloc failed\n");
594 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
595 __func__, desc_id(new));
600 static struct dma_async_tx_descriptor *
601 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
602 dma_addr_t dma_src, size_t len, unsigned long flags)
604 struct ioat_dma_chan *ioat = to_ioat_chan(c);
605 struct ioat_desc_sw *desc;
608 dma_addr_t src = dma_src;
609 dma_addr_t dest = dma_dest;
610 size_t total_len = len;
611 struct ioat_dma_descriptor *hw = NULL;
614 spin_lock_bh(&ioat->desc_lock);
615 desc = ioat1_dma_get_next_descriptor(ioat);
621 copy = min_t(size_t, len, ioat->xfercap);
629 list_add_tail(&desc->node, &chain);
635 struct ioat_desc_sw *next;
637 async_tx_ack(&desc->txd);
638 next = ioat1_dma_get_next_descriptor(ioat);
639 hw->next = next ? next->txd.phys : 0;
640 dump_desc_dbg(ioat, desc);
647 struct ioat_chan_common *chan = &ioat->base;
649 dev_err(to_dev(chan),
650 "chan%d - get_next_desc failed\n", chan_num(chan));
651 list_splice(&chain, &ioat->free_desc);
652 spin_unlock_bh(&ioat->desc_lock);
655 spin_unlock_bh(&ioat->desc_lock);
657 desc->txd.flags = flags;
658 desc->len = total_len;
659 list_splice(&chain, &desc->txd.tx_list);
660 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
661 hw->ctl_f.compl_write = 1;
663 dump_desc_dbg(ioat, desc);
668 static void ioat1_cleanup_tasklet(unsigned long data)
670 struct ioat_dma_chan *chan = (void *)data;
673 writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
676 static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
677 int direction, enum dma_ctrl_flags flags, bool dst)
679 if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
680 (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
681 pci_unmap_single(pdev, addr, len, direction);
683 pci_unmap_page(pdev, addr, len, direction);
687 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
688 size_t len, struct ioat_dma_descriptor *hw)
690 struct pci_dev *pdev = chan->device->pdev;
691 size_t offset = len - hw->size;
693 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
694 ioat_unmap(pdev, hw->dst_addr - offset, len,
695 PCI_DMA_FROMDEVICE, flags, 1);
697 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
698 ioat_unmap(pdev, hw->src_addr - offset, len,
699 PCI_DMA_TODEVICE, flags, 0);
702 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
704 unsigned long phys_complete;
707 completion = *chan->completion;
708 phys_complete = completion & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
710 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
711 (unsigned long long) phys_complete);
713 if ((completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
714 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
715 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
716 readl(chan->reg_base + IOAT_CHANERR_OFFSET));
718 /* TODO do something to salvage the situation */
721 return phys_complete;
725 * ioat1_cleanup - cleanup up finished descriptors
726 * @chan: ioat channel to be cleaned up
728 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
730 struct ioat_chan_common *chan = &ioat->base;
731 unsigned long phys_complete;
732 struct ioat_desc_sw *desc, *_desc;
733 dma_cookie_t cookie = 0;
734 struct dma_async_tx_descriptor *tx;
736 prefetch(chan->completion);
738 if (!spin_trylock_bh(&chan->cleanup_lock))
741 phys_complete = ioat_get_current_completion(chan);
742 if (phys_complete == chan->last_completion) {
743 spin_unlock_bh(&chan->cleanup_lock);
745 * perhaps we're stuck so hard that the watchdog can't go off?
746 * try to catch it after 2 seconds
748 if (time_after(jiffies,
749 chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
750 ioat1_chan_watchdog(&(chan->device->work.work));
751 chan->last_completion_time = jiffies;
755 chan->last_completion_time = jiffies;
758 if (!spin_trylock_bh(&ioat->desc_lock)) {
759 spin_unlock_bh(&chan->cleanup_lock);
763 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
764 __func__, phys_complete);
765 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
768 * Incoming DMA requests may use multiple descriptors,
769 * due to exceeding xfercap, perhaps. If so, only the
770 * last one will have a cookie, and require unmapping.
772 dump_desc_dbg(ioat, desc);
775 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
777 tx->callback(tx->callback_param);
782 if (tx->phys != phys_complete) {
784 * a completed entry, but not the last, so clean
785 * up if the client is done with the descriptor
787 if (async_tx_test_ack(tx))
788 list_move_tail(&desc->node, &ioat->free_desc);
793 * last used desc. Do not remove, so we can
794 * append from it, but don't look at it next
799 /* TODO check status bits? */
804 spin_unlock_bh(&ioat->desc_lock);
806 chan->last_completion = phys_complete;
808 chan->completed_cookie = cookie;
810 spin_unlock_bh(&chan->cleanup_lock);
813 static enum dma_status
814 ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
815 dma_cookie_t *done, dma_cookie_t *used)
817 struct ioat_dma_chan *ioat = to_ioat_chan(c);
819 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
824 return ioat_is_complete(c, cookie, done, used);
827 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
829 struct ioat_chan_common *chan = &ioat->base;
830 struct ioat_desc_sw *desc;
831 struct ioat_dma_descriptor *hw;
833 spin_lock_bh(&ioat->desc_lock);
835 desc = ioat1_dma_get_next_descriptor(ioat);
838 dev_err(to_dev(chan),
839 "Unable to start null desc - get next desc failed\n");
840 spin_unlock_bh(&ioat->desc_lock);
847 hw->ctl_f.int_en = 1;
848 hw->ctl_f.compl_write = 1;
849 /* set size to non-zero value (channel returns error when size is 0) */
850 hw->size = NULL_DESC_BUFFER_SIZE;
853 async_tx_ack(&desc->txd);
855 list_add_tail(&desc->node, &ioat->used_desc);
856 dump_desc_dbg(ioat, desc);
858 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
859 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
860 writel(((u64) desc->txd.phys) >> 32,
861 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
863 writeb(IOAT_CHANCMD_START, chan->reg_base
864 + IOAT_CHANCMD_OFFSET(chan->device->version));
865 spin_unlock_bh(&ioat->desc_lock);
869 * Perform a IOAT transaction to verify the HW works.
871 #define IOAT_TEST_SIZE 2000
873 static void __devinit ioat_dma_test_callback(void *dma_async_param)
875 struct completion *cmp = dma_async_param;
881 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
882 * @device: device to be tested
884 static int __devinit ioat_dma_self_test(struct ioatdma_device *device)
889 struct dma_device *dma = &device->common;
890 struct device *dev = &device->pdev->dev;
891 struct dma_chan *dma_chan;
892 struct dma_async_tx_descriptor *tx;
893 dma_addr_t dma_dest, dma_src;
896 struct completion cmp;
900 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
903 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
909 /* Fill in src buffer */
910 for (i = 0; i < IOAT_TEST_SIZE; i++)
913 /* Start copy, using first DMA channel */
914 dma_chan = container_of(dma->channels.next, struct dma_chan,
916 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
917 dev_err(dev, "selftest cannot allocate chan resource\n");
922 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
923 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
924 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
926 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
927 IOAT_TEST_SIZE, flags);
929 dev_err(dev, "Self-test prep failed, disabling\n");
935 init_completion(&cmp);
936 tx->callback = ioat_dma_test_callback;
937 tx->callback_param = &cmp;
938 cookie = tx->tx_submit(tx);
940 dev_err(dev, "Self-test setup failed, disabling\n");
944 dma->device_issue_pending(dma_chan);
946 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
949 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
951 dev_err(dev, "Self-test copy timed out, disabling\n");
955 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
956 dev_err(dev, "Self-test copy failed compare, disabling\n");
962 dma->device_free_chan_resources(dma_chan);
969 static char ioat_interrupt_style[32] = "msix";
970 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
971 sizeof(ioat_interrupt_style), 0644);
972 MODULE_PARM_DESC(ioat_interrupt_style,
973 "set ioat interrupt style: msix (default), "
974 "msix-single-vector, msi, intx)");
977 * ioat_dma_setup_interrupts - setup interrupt handler
978 * @device: ioat device
980 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
982 struct ioat_chan_common *chan;
983 struct pci_dev *pdev = device->pdev;
984 struct device *dev = &pdev->dev;
985 struct msix_entry *msix;
990 if (!strcmp(ioat_interrupt_style, "msix"))
992 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
993 goto msix_single_vector;
994 if (!strcmp(ioat_interrupt_style, "msi"))
996 if (!strcmp(ioat_interrupt_style, "intx"))
998 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
1002 /* The number of MSI-X vectors should equal the number of channels */
1003 msixcnt = device->common.chancnt;
1004 for (i = 0; i < msixcnt; i++)
1005 device->msix_entries[i].entry = i;
1007 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
1011 goto msix_single_vector;
1013 for (i = 0; i < msixcnt; i++) {
1014 msix = &device->msix_entries[i];
1015 chan = ioat_chan_by_index(device, i);
1016 err = devm_request_irq(dev, msix->vector,
1017 ioat_dma_do_interrupt_msix, 0,
1020 for (j = 0; j < i; j++) {
1021 msix = &device->msix_entries[j];
1022 chan = ioat_chan_by_index(device, j);
1023 devm_free_irq(dev, msix->vector, chan);
1025 goto msix_single_vector;
1028 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1032 msix = &device->msix_entries[0];
1034 err = pci_enable_msix(pdev, device->msix_entries, 1);
1038 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1039 "ioat-msix", device);
1041 pci_disable_msix(pdev);
1047 err = pci_enable_msi(pdev);
1051 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1052 "ioat-msi", device);
1054 pci_disable_msi(pdev);
1060 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1061 IRQF_SHARED, "ioat-intx", device);
1066 if (device->intr_quirk)
1067 device->intr_quirk(device);
1068 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1069 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1073 /* Disable all interrupt generation */
1074 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1075 dev_err(dev, "no usable interrupts\n");
1079 static void ioat_disable_interrupts(struct ioatdma_device *device)
1081 /* Disable all interrupt generation */
1082 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1085 int __devinit ioat_probe(struct ioatdma_device *device)
1088 struct dma_device *dma = &device->common;
1089 struct pci_dev *pdev = device->pdev;
1090 struct device *dev = &pdev->dev;
1092 /* DMA coherent memory pool for DMA descriptor allocations */
1093 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1094 sizeof(struct ioat_dma_descriptor),
1096 if (!device->dma_pool) {
1101 device->completion_pool = pci_pool_create("completion_pool", pdev,
1102 sizeof(u64), SMP_CACHE_BYTES,
1105 if (!device->completion_pool) {
1107 goto err_completion_pool;
1110 device->enumerate_channels(device);
1112 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1113 dma->dev = &pdev->dev;
1115 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
1116 " %d channels, device version 0x%02x, driver version %s\n",
1117 dma->chancnt, device->version, IOAT_DMA_VERSION);
1119 if (!dma->chancnt) {
1120 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
1121 "zero channels detected\n");
1122 goto err_setup_interrupts;
1125 err = ioat_dma_setup_interrupts(device);
1127 goto err_setup_interrupts;
1129 err = ioat_dma_self_test(device);
1136 ioat_disable_interrupts(device);
1137 err_setup_interrupts:
1138 pci_pool_destroy(device->completion_pool);
1139 err_completion_pool:
1140 pci_pool_destroy(device->dma_pool);
1145 int __devinit ioat_register(struct ioatdma_device *device)
1147 int err = dma_async_device_register(&device->common);
1150 ioat_disable_interrupts(device);
1151 pci_pool_destroy(device->completion_pool);
1152 pci_pool_destroy(device->dma_pool);
1158 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1159 static void ioat1_intr_quirk(struct ioatdma_device *device)
1161 struct pci_dev *pdev = device->pdev;
1164 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1165 if (pdev->msi_enabled)
1166 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1168 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1169 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1172 int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1174 struct pci_dev *pdev = device->pdev;
1175 struct dma_device *dma;
1178 device->intr_quirk = ioat1_intr_quirk;
1179 device->enumerate_channels = ioat1_enumerate_channels;
1180 dma = &device->common;
1181 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1182 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1183 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1184 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1185 dma->device_is_tx_complete = ioat1_dma_is_complete;
1187 err = ioat_probe(device);
1190 ioat_set_tcp_copy_break(4096);
1191 err = ioat_register(device);
1195 device->dca = ioat_dca_init(pdev, device->reg_base);
1197 INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog);
1198 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1203 void __devexit ioat_dma_remove(struct ioatdma_device *device)
1205 struct dma_device *dma = &device->common;
1207 if (device->version != IOAT_VER_3_0)
1208 cancel_delayed_work(&device->work);
1210 ioat_disable_interrupts(device);
1212 dma_async_device_unregister(dma);
1214 pci_pool_destroy(device->dma_pool);
1215 pci_pool_destroy(device->completion_pool);
1217 INIT_LIST_HEAD(&dma->channels);