[IA64] Don't go beyond iosapic_intr_info's arraysize
[safe/jmp/linux-2.6] / drivers / dma / ioat_dma.c
index eef83ea..b3759c4 100644 (file)
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/i7300_idle.h>
 #include "ioatdma.h"
 #include "ioatdma_registers.h"
 #include "ioatdma_hw.h"
 
-#define INITIAL_IOAT_DESC_COUNT 128
-
 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
 
+#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
+static int ioat_pending_level = 4;
+module_param(ioat_pending_level, int, 0644);
+MODULE_PARM_DESC(ioat_pending_level,
+                "high-water mark for pushing ioat descriptors (default: 4)");
+
+#define RESET_DELAY  msecs_to_jiffies(100)
+#define WATCHDOG_DELAY  round_jiffies(msecs_to_jiffies(2000))
+static void ioat_dma_chan_reset_part2(struct work_struct *work);
+static void ioat_dma_chan_watchdog(struct work_struct *work);
+
+/*
+ * workaround for IOAT ver.3.0 null descriptor issue
+ * (channel returns error when size is 0)
+ */
+#define NULL_DESC_BUFFER_SIZE 1
+
 /* internal functions */
 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
 
+static struct ioat_desc_sw *
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
+static struct ioat_desc_sw *
+ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
+
+static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
+                                               struct ioatdma_device *device,
+                                               int index)
+{
+       return device->idx[index];
+}
+
+/**
+ * ioat_dma_do_interrupt - handler used for single vector interrupt mode
+ * @irq: interrupt id
+ * @data: interrupt data
+ */
+static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+{
+       struct ioatdma_device *instance = data;
+       struct ioat_dma_chan *ioat_chan;
+       unsigned long attnstatus;
+       int bit;
+       u8 intrctrl;
+
+       intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
+
+       if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
+               return IRQ_NONE;
+
+       if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
+               writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
+               return IRQ_NONE;
+       }
+
+       attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
+       for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
+               ioat_chan = ioat_lookup_chan_by_index(instance, bit);
+               tasklet_schedule(&ioat_chan->cleanup_task);
+       }
+
+       writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
+       return IRQ_HANDLED;
+}
+
+/**
+ * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
+ * @irq: interrupt id
+ * @data: interrupt data
+ */
+static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+{
+       struct ioat_dma_chan *ioat_chan = data;
+
+       tasklet_schedule(&ioat_chan->cleanup_task);
+
+       return IRQ_HANDLED;
+}
+
+static void ioat_dma_cleanup_tasklet(unsigned long data);
+
+/**
+ * ioat_dma_enumerate_channels - find and initialize the device's channels
+ * @device: the device to be enumerated
+ */
 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
 {
        u8 xfercap_scale;
@@ -54,10 +136,47 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
        int i;
        struct ioat_dma_chan *ioat_chan;
 
+       /*
+        * IOAT ver.3 workarounds
+        */
+       if (device->version == IOAT_VER_3_0) {
+               u32 chan_err_mask;
+               u16 dev_id;
+               u32 dmauncerrsts;
+
+               /*
+                * Write CHANERRMSK_INT with 3E07h to mask out the errors
+                * that can cause stability issues for IOAT ver.3
+                */
+               chan_err_mask = 0x3E07;
+               pci_write_config_dword(device->pdev,
+                       IOAT_PCI_CHANERRMASK_INT_OFFSET,
+                       chan_err_mask);
+
+               /*
+                * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+                * (workaround for spurious config parity error after restart)
+                */
+               pci_read_config_word(device->pdev,
+                       IOAT_PCI_DEVICE_ID_OFFSET,
+                       &dev_id);
+               if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+                       dmauncerrsts = 0x10;
+                       pci_write_config_dword(device->pdev,
+                               IOAT_PCI_DMAUNCERRSTS_OFFSET,
+                               dmauncerrsts);
+               }
+       }
+
        device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
        xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
        xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
 
+#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
+       if (i7300_idle_platform_probe(NULL, NULL) == 0) {
+               device->common.chancnt--;
+       }
+#endif
        for (i = 0; i < device->common.chancnt; i++) {
                ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
                if (!ioat_chan) {
@@ -68,6 +187,13 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
                ioat_chan->device = device;
                ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
                ioat_chan->xfercap = xfercap;
+               ioat_chan->desccount = 0;
+               INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
+               if (ioat_chan->device->version != IOAT_VER_1_2) {
+                       writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
+                                       | IOAT_DMA_DCA_ANY_CPU,
+                               ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+               }
                spin_lock_init(&ioat_chan->cleanup_lock);
                spin_lock_init(&ioat_chan->desc_lock);
                INIT_LIST_HEAD(&ioat_chan->free_desc);
@@ -76,78 +202,464 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
                ioat_chan->common.device = &device->common;
                list_add_tail(&ioat_chan->common.device_node,
                              &device->common.channels);
+               device->idx[i] = ioat_chan;
+               tasklet_init(&ioat_chan->cleanup_task,
+                            ioat_dma_cleanup_tasklet,
+                            (unsigned long) ioat_chan);
+               tasklet_disable(&ioat_chan->cleanup_task);
        }
        return device->common.chancnt;
 }
 
-static void ioat_set_src(dma_addr_t addr,
-                        struct dma_async_tx_descriptor *tx,
-                        int index)
+/**
+ * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
+ *                                 descriptors to hw
+ * @chan: DMA channel handle
+ */
+static inline void __ioat1_dma_memcpy_issue_pending(
+                                               struct ioat_dma_chan *ioat_chan)
 {
-       struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+       ioat_chan->pending = 0;
+       writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
+}
 
-       pci_unmap_addr_set(desc, src, addr);
+static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
-       list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
-               iter->hw->src_addr = addr;
-               addr += ioat_chan->xfercap;
+       if (ioat_chan->pending > 0) {
+               spin_lock_bh(&ioat_chan->desc_lock);
+               __ioat1_dma_memcpy_issue_pending(ioat_chan);
+               spin_unlock_bh(&ioat_chan->desc_lock);
        }
+}
 
+static inline void __ioat2_dma_memcpy_issue_pending(
+                                               struct ioat_dma_chan *ioat_chan)
+{
+       ioat_chan->pending = 0;
+       writew(ioat_chan->dmacount,
+              ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
 }
 
-static void ioat_set_dest(dma_addr_t addr,
-                         struct dma_async_tx_descriptor *tx,
-                         int index)
+static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
 {
-       struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+
+       if (ioat_chan->pending > 0) {
+               spin_lock_bh(&ioat_chan->desc_lock);
+               __ioat2_dma_memcpy_issue_pending(ioat_chan);
+               spin_unlock_bh(&ioat_chan->desc_lock);
+       }
+}
+
+
+/**
+ * ioat_dma_chan_reset_part2 - reinit the channel after a reset
+ */
+static void ioat_dma_chan_reset_part2(struct work_struct *work)
+{
+       struct ioat_dma_chan *ioat_chan =
+               container_of(work, struct ioat_dma_chan, work.work);
+       struct ioat_desc_sw *desc;
+
+       spin_lock_bh(&ioat_chan->cleanup_lock);
+       spin_lock_bh(&ioat_chan->desc_lock);
+
+       ioat_chan->completion_virt->low = 0;
+       ioat_chan->completion_virt->high = 0;
+       ioat_chan->pending = 0;
+
+       /*
+        * count the descriptors waiting, and be sure to do it
+        * right for both the CB1 line and the CB2 ring
+        */
+       ioat_chan->dmacount = 0;
+       if (ioat_chan->used_desc.prev) {
+               desc = to_ioat_desc(ioat_chan->used_desc.prev);
+               do {
+                       ioat_chan->dmacount++;
+                       desc = to_ioat_desc(desc->node.next);
+               } while (&desc->node != ioat_chan->used_desc.next);
+       }
+
+       /*
+        * write the new starting descriptor address
+        * this puts channel engine into ARMED state
+        */
+       desc = to_ioat_desc(ioat_chan->used_desc.prev);
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
+               writel(((u64) desc->async_tx.phys) >> 32,
+                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
+
+               writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
+                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+               break;
+       case IOAT_VER_2_0:
+               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+               writel(((u64) desc->async_tx.phys) >> 32,
+                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+
+               /* tell the engine to go with what's left to be done */
+               writew(ioat_chan->dmacount,
+                      ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+
+               break;
+       }
+       dev_err(&ioat_chan->device->pdev->dev,
+               "chan%d reset - %d descs waiting, %d total desc\n",
+               chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
+
+       spin_unlock_bh(&ioat_chan->desc_lock);
+       spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+/**
+ * ioat_dma_reset_channel - restart a channel
+ * @ioat_chan: IOAT DMA channel handle
+ */
+static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
+{
+       u32 chansts, chanerr;
+
+       if (!ioat_chan->used_desc.prev)
+               return;
+
+       chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       chansts = (ioat_chan->completion_virt->low
+                                       & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
+       if (chanerr) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
+                       chan_num(ioat_chan), chansts, chanerr);
+               writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       }
+
+       /*
+        * whack it upside the head with a reset
+        * and wait for things to settle out.
+        * force the pending count to a really big negative
+        * to make sure no one forces an issue_pending
+        * while we're waiting.
+        */
+
+       spin_lock_bh(&ioat_chan->desc_lock);
+       ioat_chan->pending = INT_MIN;
+       writeb(IOAT_CHANCMD_RESET,
+              ioat_chan->reg_base
+              + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+       spin_unlock_bh(&ioat_chan->desc_lock);
+
+       /* schedule the 2nd half instead of sleeping a long time */
+       schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
+}
+
+/**
+ * ioat_dma_chan_watchdog - watch for stuck channels
+ */
+static void ioat_dma_chan_watchdog(struct work_struct *work)
+{
+       struct ioatdma_device *device =
+               container_of(work, struct ioatdma_device, work.work);
+       struct ioat_dma_chan *ioat_chan;
+       int i;
+
+       union {
+               u64 full;
+               struct {
+                       u32 low;
+                       u32 high;
+               };
+       } completion_hw;
+       unsigned long compl_desc_addr_hw;
+
+       for (i = 0; i < device->common.chancnt; i++) {
+               ioat_chan = ioat_lookup_chan_by_index(device, i);
+
+               if (ioat_chan->device->version == IOAT_VER_1_2
+                       /* have we started processing anything yet */
+                   && ioat_chan->last_completion
+                       /* have we completed any since last watchdog cycle? */
+                   && (ioat_chan->last_completion ==
+                               ioat_chan->watchdog_completion)
+                       /* has TCP stuck on one cookie since last watchdog? */
+                   && (ioat_chan->watchdog_tcp_cookie ==
+                               ioat_chan->watchdog_last_tcp_cookie)
+                   && (ioat_chan->watchdog_tcp_cookie !=
+                               ioat_chan->completed_cookie)
+                       /* is there something in the chain to be processed? */
+                       /* CB1 chain always has at least the last one processed */
+                   && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
+                   && ioat_chan->pending == 0) {
+
+                       /*
+                        * check CHANSTS register for completed
+                        * descriptor address.
+                        * if it is different than completion writeback,
+                        * it is not zero
+                        * and it has changed since the last watchdog
+                        *     we can assume that channel
+                        *     is still working correctly
+                        *     and the problem is in completion writeback.
+                        *     update completion writeback
+                        *     with actual CHANSTS value
+                        * else
+                        *     try resetting the channel
+                        */
+
+                       completion_hw.low = readl(ioat_chan->reg_base +
+                               IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
+                       completion_hw.high = readl(ioat_chan->reg_base +
+                               IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
+#if (BITS_PER_LONG == 64)
+                       compl_desc_addr_hw =
+                               completion_hw.full
+                               & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+#else
+                       compl_desc_addr_hw =
+                               completion_hw.low & IOAT_LOW_COMPLETION_MASK;
+#endif
 
-       pci_unmap_addr_set(desc, dst, addr);
+                       if ((compl_desc_addr_hw != 0)
+                          && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
+                          && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
+                               ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
+                               ioat_chan->completion_virt->low = completion_hw.low;
+                               ioat_chan->completion_virt->high = completion_hw.high;
+                       } else {
+                               ioat_dma_reset_channel(ioat_chan);
+                               ioat_chan->watchdog_completion = 0;
+                               ioat_chan->last_compl_desc_addr_hw = 0;
+                       }
 
-       list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
-               iter->hw->dst_addr = addr;
-               addr += ioat_chan->xfercap;
+               /*
+                * for version 2.0 if there are descriptors yet to be processed
+                * and the last completed hasn't changed since the last watchdog
+                *      if they haven't hit the pending level
+                *          issue the pending to push them through
+                *      else
+                *          try resetting the channel
+                */
+               } else if (ioat_chan->device->version == IOAT_VER_2_0
+                   && ioat_chan->used_desc.prev
+                   && ioat_chan->last_completion
+                   && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
+
+                       if (ioat_chan->pending < ioat_pending_level)
+                               ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
+                       else {
+                               ioat_dma_reset_channel(ioat_chan);
+                               ioat_chan->watchdog_completion = 0;
+                       }
+               } else {
+                       ioat_chan->last_compl_desc_addr_hw = 0;
+                       ioat_chan->watchdog_completion
+                                       = ioat_chan->last_completion;
+               }
+
+               ioat_chan->watchdog_last_tcp_cookie =
+                       ioat_chan->watchdog_tcp_cookie;
        }
+
+       schedule_delayed_work(&device->work, WATCHDOG_DELAY);
 }
 
-static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
+static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
 {
        struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
-       struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
-       int append = 0;
+       struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
+       struct ioat_desc_sw *prev, *new;
+       struct ioat_dma_descriptor *hw;
        dma_cookie_t cookie;
-       struct ioat_desc_sw *group_start;
+       LIST_HEAD(new_chain);
+       u32 copy;
+       size_t len;
+       dma_addr_t src, dst;
+       unsigned long orig_flags;
+       unsigned int desc_count = 0;
+
+       /* src and dest and len are stored in the initial descriptor */
+       len = first->len;
+       src = first->src;
+       dst = first->dst;
+       orig_flags = first->async_tx.flags;
+       new = first;
 
-       group_start = list_entry(desc->async_tx.tx_list.next,
-                                struct ioat_desc_sw, node);
        spin_lock_bh(&ioat_chan->desc_lock);
+       prev = to_ioat_desc(ioat_chan->used_desc.prev);
+       prefetch(prev->hw);
+       do {
+               copy = min_t(size_t, len, ioat_chan->xfercap);
+
+               async_tx_ack(&new->async_tx);
+
+               hw = new->hw;
+               hw->size = copy;
+               hw->ctl = 0;
+               hw->src_addr = src;
+               hw->dst_addr = dst;
+               hw->next = 0;
+
+               /* chain together the physical address list for the HW */
+               wmb();
+               prev->hw->next = (u64) new->async_tx.phys;
+
+               len -= copy;
+               dst += copy;
+               src += copy;
+
+               list_add_tail(&new->node, &new_chain);
+               desc_count++;
+               prev = new;
+       } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
+
+       if (!new) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "tx submit failed\n");
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               return -ENOMEM;
+       }
+
+       hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+       if (first->async_tx.callback) {
+               hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
+               if (first != new) {
+                       /* move callback into to last desc */
+                       new->async_tx.callback = first->async_tx.callback;
+                       new->async_tx.callback_param
+                                       = first->async_tx.callback_param;
+                       first->async_tx.callback = NULL;
+                       first->async_tx.callback_param = NULL;
+               }
+       }
+
+       new->tx_cnt = desc_count;
+       new->async_tx.flags = orig_flags; /* client is in control of this ack */
+
+       /* store the original values for use in later cleanup */
+       if (new != first) {
+               new->src = first->src;
+               new->dst = first->dst;
+               new->len = first->len;
+       }
+
        /* cookie incr and addition to used_list must be atomic */
        cookie = ioat_chan->common.cookie;
        cookie++;
        if (cookie < 0)
                cookie = 1;
-       ioat_chan->common.cookie = desc->async_tx.cookie = cookie;
+       ioat_chan->common.cookie = new->async_tx.cookie = cookie;
 
        /* write address into NextDescriptor field of last desc in chain */
        to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
-                                               group_start->async_tx.phys;
-       list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev);
+                                                       first->async_tx.phys;
+       list_splice_tail(&new_chain, &ioat_chan->used_desc);
 
-       ioat_chan->pending += desc->tx_cnt;
-       if (ioat_chan->pending >= 4) {
-               append = 1;
-               ioat_chan->pending = 0;
-       }
+       ioat_chan->dmacount += desc_count;
+       ioat_chan->pending += desc_count;
+       if (ioat_chan->pending >= ioat_pending_level)
+               __ioat1_dma_memcpy_issue_pending(ioat_chan);
        spin_unlock_bh(&ioat_chan->desc_lock);
 
-       if (append)
-               writeb(IOAT_CHANCMD_APPEND,
-                       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+       return cookie;
+}
+
+static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+       struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
+       struct ioat_desc_sw *new;
+       struct ioat_dma_descriptor *hw;
+       dma_cookie_t cookie;
+       u32 copy;
+       size_t len;
+       dma_addr_t src, dst;
+       unsigned long orig_flags;
+       unsigned int desc_count = 0;
+
+       /* src and dest and len are stored in the initial descriptor */
+       len = first->len;
+       src = first->src;
+       dst = first->dst;
+       orig_flags = first->async_tx.flags;
+       new = first;
+
+       /*
+        * ioat_chan->desc_lock is still in force in version 2 path
+        * it gets unlocked at end of this function
+        */
+       do {
+               copy = min_t(size_t, len, ioat_chan->xfercap);
+
+               async_tx_ack(&new->async_tx);
+
+               hw = new->hw;
+               hw->size = copy;
+               hw->ctl = 0;
+               hw->src_addr = src;
+               hw->dst_addr = dst;
+
+               len -= copy;
+               dst += copy;
+               src += copy;
+               desc_count++;
+       } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
+
+       if (!new) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "tx submit failed\n");
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               return -ENOMEM;
+       }
+
+       hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+       if (first->async_tx.callback) {
+               hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
+               if (first != new) {
+                       /* move callback into to last desc */
+                       new->async_tx.callback = first->async_tx.callback;
+                       new->async_tx.callback_param
+                                       = first->async_tx.callback_param;
+                       first->async_tx.callback = NULL;
+                       first->async_tx.callback_param = NULL;
+               }
+       }
+
+       new->tx_cnt = desc_count;
+       new->async_tx.flags = orig_flags; /* client is in control of this ack */
+
+       /* store the original values for use in later cleanup */
+       if (new != first) {
+               new->src = first->src;
+               new->dst = first->dst;
+               new->len = first->len;
+       }
+
+       /* cookie incr and addition to used_list must be atomic */
+       cookie = ioat_chan->common.cookie;
+       cookie++;
+       if (cookie < 0)
+               cookie = 1;
+       ioat_chan->common.cookie = new->async_tx.cookie = cookie;
+
+       ioat_chan->dmacount += desc_count;
+       ioat_chan->pending += desc_count;
+       if (ioat_chan->pending >= ioat_pending_level)
+               __ioat2_dma_memcpy_issue_pending(ioat_chan);
+       spin_unlock_bh(&ioat_chan->desc_lock);
 
        return cookie;
 }
 
+/**
+ * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
+ * @ioat_chan: the channel supplying the memory pool for the descriptors
+ * @flags: allocation flags
+ */
 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
                                        struct ioat_dma_chan *ioat_chan,
                                        gfp_t flags)
@@ -170,21 +682,62 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
 
        memset(desc, 0, sizeof(*desc));
        dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
-       desc_sw->async_tx.tx_set_src = ioat_set_src;
-       desc_sw->async_tx.tx_set_dest = ioat_set_dest;
-       desc_sw->async_tx.tx_submit = ioat_tx_submit;
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               desc_sw->async_tx.tx_submit = ioat1_tx_submit;
+               break;
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               desc_sw->async_tx.tx_submit = ioat2_tx_submit;
+               break;
+       }
        INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
+
        desc_sw->hw = desc;
        desc_sw->async_tx.phys = phys;
 
        return desc_sw;
 }
 
-/* returns the actual number of allocated descriptors */
+static int ioat_initial_desc_count = 256;
+module_param(ioat_initial_desc_count, int, 0644);
+MODULE_PARM_DESC(ioat_initial_desc_count,
+                "initial descriptors per channel (default: 256)");
+
+/**
+ * ioat2_dma_massage_chan_desc - link the descriptors into a circle
+ * @ioat_chan: the channel to be massaged
+ */
+static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
+{
+       struct ioat_desc_sw *desc, *_desc;
+
+       /* setup used_desc */
+       ioat_chan->used_desc.next = ioat_chan->free_desc.next;
+       ioat_chan->used_desc.prev = NULL;
+
+       /* pull free_desc out of the circle so that every node is a hw
+        * descriptor, but leave it pointing to the list
+        */
+       ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
+       ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
+
+       /* circle link the hw descriptors */
+       desc = to_ioat_desc(ioat_chan->free_desc.next);
+       desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
+       list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
+               desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
+       }
+}
+
+/**
+ * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
+ * @chan: the channel to be filled out
+ */
 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
 {
        struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       struct ioat_desc_sw *desc = NULL;
+       struct ioat_desc_sw *desc;
        u16 chanctrl;
        u32 chanerr;
        int i;
@@ -192,7 +745,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
 
        /* have we already been set up? */
        if (!list_empty(&ioat_chan->free_desc))
-               return INITIAL_IOAT_DESC_COUNT;
+               return ioat_chan->desccount;
 
        /* Setup register to interrupt and write completion status on error */
        chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
@@ -203,22 +756,25 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
        chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
        if (chanerr) {
                dev_err(&ioat_chan->device->pdev->dev,
-                       "ioatdma: CHANERR = %x, clearing\n", chanerr);
+                       "CHANERR = %x, clearing\n", chanerr);
                writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
        }
 
        /* Allocate descriptors */
-       for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
+       for (i = 0; i < ioat_initial_desc_count; i++) {
                desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
                if (!desc) {
                        dev_err(&ioat_chan->device->pdev->dev,
-                               "ioatdma: Only %d initial descriptors\n", i);
+                               "Only %d initial descriptors\n", i);
                        break;
                }
                list_add_tail(&desc->node, &tmp_list);
        }
        spin_lock_bh(&ioat_chan->desc_lock);
+       ioat_chan->desccount = i;
        list_splice(&tmp_list, &ioat_chan->free_desc);
+       if (ioat_chan->device->version != IOAT_VER_1_2)
+               ioat2_dma_massage_chan_desc(ioat_chan);
        spin_unlock_bh(&ioat_chan->desc_lock);
 
        /* allocate a completion writeback area */
@@ -234,10 +790,15 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
        writel(((u64) ioat_chan->completion_addr) >> 32,
               ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
 
-       ioat_dma_start_null_desc(ioat_chan);
-       return i;
+       tasklet_enable(&ioat_chan->cleanup_task);
+       ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
+       return ioat_chan->desccount;
 }
 
+/**
+ * ioat_dma_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
 {
        struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
@@ -245,23 +806,58 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
        struct ioat_desc_sw *desc, *_desc;
        int in_use_descs = 0;
 
+       /* Before freeing channel resources first check
+        * if they have been previously allocated for this channel.
+        */
+       if (ioat_chan->desccount == 0)
+               return;
+
+       tasklet_disable(&ioat_chan->cleanup_task);
        ioat_dma_memcpy_cleanup(ioat_chan);
 
-       writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+       /* Delay 100ms after reset to allow internal DMA logic to quiesce
+        * before removing DMA descriptor resources.
+        */
+       writeb(IOAT_CHANCMD_RESET,
+              ioat_chan->reg_base
+                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+       mdelay(100);
 
        spin_lock_bh(&ioat_chan->desc_lock);
-       list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
-               in_use_descs++;
-               list_del(&desc->node);
-               pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-                             desc->async_tx.phys);
-               kfree(desc);
-       }
-       list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
-               list_del(&desc->node);
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               list_for_each_entry_safe(desc, _desc,
+                                        &ioat_chan->used_desc, node) {
+                       in_use_descs++;
+                       list_del(&desc->node);
+                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+                                     desc->async_tx.phys);
+                       kfree(desc);
+               }
+               list_for_each_entry_safe(desc, _desc,
+                                        &ioat_chan->free_desc, node) {
+                       list_del(&desc->node);
+                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+                                     desc->async_tx.phys);
+                       kfree(desc);
+               }
+               break;
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               list_for_each_entry_safe(desc, _desc,
+                                        ioat_chan->free_desc.next, node) {
+                       list_del(&desc->node);
+                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+                                     desc->async_tx.phys);
+                       kfree(desc);
+               }
+               desc = to_ioat_desc(ioat_chan->free_desc.next);
                pci_pool_free(ioatdma_device->dma_pool, desc->hw,
                              desc->async_tx.phys);
                kfree(desc);
+               INIT_LIST_HEAD(&ioat_chan->free_desc);
+               INIT_LIST_HEAD(&ioat_chan->used_desc);
+               break;
        }
        spin_unlock_bh(&ioat_chan->desc_lock);
 
@@ -272,103 +868,233 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
        /* one is ok since we left it on there on purpose */
        if (in_use_descs > 1)
                dev_err(&ioat_chan->device->pdev->dev,
-                       "ioatdma: Freeing %d in use descriptors!\n",
+                       "Freeing %d in use descriptors!\n",
                        in_use_descs - 1);
 
        ioat_chan->last_completion = ioat_chan->completion_addr = 0;
+       ioat_chan->pending = 0;
+       ioat_chan->dmacount = 0;
+       ioat_chan->desccount = 0;
+       ioat_chan->watchdog_completion = 0;
+       ioat_chan->last_compl_desc_addr_hw = 0;
+       ioat_chan->watchdog_tcp_cookie =
+               ioat_chan->watchdog_last_tcp_cookie = 0;
 }
 
-static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
-                                               struct dma_chan *chan,
-                                               size_t len,
-                                               int int_en)
+/**
+ * ioat_dma_get_next_descriptor - return the next available descriptor
+ * @ioat_chan: IOAT DMA channel handle
+ *
+ * Gets the next descriptor from the chain, and must be called with the
+ * channel's desc_lock held.  Allocates more descriptors if the channel
+ * has run out.
+ */
+static struct ioat_desc_sw *
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
 {
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       struct ioat_desc_sw *first, *prev, *new;
-       LIST_HEAD(new_chain);
-       u32 copy;
-       size_t orig_len;
-       int desc_count = 0;
-
-       if (!len)
-               return NULL;
+       struct ioat_desc_sw *new;
 
-       orig_len = len;
+       if (!list_empty(&ioat_chan->free_desc)) {
+               new = to_ioat_desc(ioat_chan->free_desc.next);
+               list_del(&new->node);
+       } else {
+               /* try to get another desc */
+               new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
+               if (!new) {
+                       dev_err(&ioat_chan->device->pdev->dev,
+                               "alloc failed\n");
+                       return NULL;
+               }
+       }
 
-       first = NULL;
-       prev = NULL;
+       prefetch(new->hw);
+       return new;
+}
 
-       spin_lock_bh(&ioat_chan->desc_lock);
-       while (len) {
-               if (!list_empty(&ioat_chan->free_desc)) {
-                       new = to_ioat_desc(ioat_chan->free_desc.next);
-                       list_del(&new->node);
-               } else {
-                       /* try to get another desc */
-                       new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
-                       /* will this ever happen? */
-                       /* TODO add upper limit on these */
-                       BUG_ON(!new);
+static struct ioat_desc_sw *
+ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
+{
+       struct ioat_desc_sw *new;
+
+       /*
+        * used.prev points to where to start processing
+        * used.next points to next free descriptor
+        * if used.prev == NULL, there are none waiting to be processed
+        * if used.next == used.prev.prev, there is only one free descriptor,
+        *      and we need to use it to as a noop descriptor before
+        *      linking in a new set of descriptors, since the device
+        *      has probably already read the pointer to it
+        */
+       if (ioat_chan->used_desc.prev &&
+           ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
+
+               struct ioat_desc_sw *desc;
+               struct ioat_desc_sw *noop_desc;
+               int i;
+
+               /* set up the noop descriptor */
+               noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
+               /* set size to non-zero value (channel returns error when size is 0) */
+               noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
+               noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
+               noop_desc->hw->src_addr = 0;
+               noop_desc->hw->dst_addr = 0;
+
+               ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
+               ioat_chan->pending++;
+               ioat_chan->dmacount++;
+
+               /* try to get a few more descriptors */
+               for (i = 16; i; i--) {
+                       desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
+                       if (!desc) {
+                               dev_err(&ioat_chan->device->pdev->dev,
+                                       "alloc failed\n");
+                               break;
+                       }
+                       list_add_tail(&desc->node, ioat_chan->used_desc.next);
+
+                       desc->hw->next
+                               = to_ioat_desc(desc->node.next)->async_tx.phys;
+                       to_ioat_desc(desc->node.prev)->hw->next
+                               = desc->async_tx.phys;
+                       ioat_chan->desccount++;
                }
 
-               copy = min((u32) len, ioat_chan->xfercap);
+               ioat_chan->used_desc.next = noop_desc->node.next;
+       }
+       new = to_ioat_desc(ioat_chan->used_desc.next);
+       prefetch(new);
+       ioat_chan->used_desc.next = new->node.next;
 
-               new->hw->size = copy;
-               new->hw->ctl = 0;
-               new->async_tx.cookie = 0;
-               new->async_tx.ack = 1;
+       if (ioat_chan->used_desc.prev == NULL)
+               ioat_chan->used_desc.prev = &new->node;
 
-               /* chain together the physical address list for the HW */
-               if (!first)
-                       first = new;
-               else
-                       prev->hw->next = (u64) new->async_tx.phys;
+       prefetch(new->hw);
+       return new;
+}
 
-               prev = new;
-               len  -= copy;
-               list_add_tail(&new->node, &new_chain);
-               desc_count++;
-       }
+static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
+                                               struct ioat_dma_chan *ioat_chan)
+{
+       if (!ioat_chan)
+               return NULL;
 
-       list_splice(&new_chain, &new->async_tx.tx_list);
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               return ioat1_dma_get_next_descriptor(ioat_chan);
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               return ioat2_dma_get_next_descriptor(ioat_chan);
+       }
+       return NULL;
+}
 
-       new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
-       new->hw->next = 0;
-       new->tx_cnt = desc_count;
-       new->async_tx.ack = 0; /* client is in control of this ack */
-       new->async_tx.cookie = -EBUSY;
+static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
+                                               struct dma_chan *chan,
+                                               dma_addr_t dma_dest,
+                                               dma_addr_t dma_src,
+                                               size_t len,
+                                               unsigned long flags)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+       struct ioat_desc_sw *new;
 
-       pci_unmap_len_set(new, len, orig_len);
+       spin_lock_bh(&ioat_chan->desc_lock);
+       new = ioat_dma_get_next_descriptor(ioat_chan);
        spin_unlock_bh(&ioat_chan->desc_lock);
 
-       return new ? &new->async_tx : NULL;
+       if (new) {
+               new->len = len;
+               new->dst = dma_dest;
+               new->src = dma_src;
+               new->async_tx.flags = flags;
+               return &new->async_tx;
+       } else {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
+                       chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
+               return NULL;
+       }
 }
 
-/**
- * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
- *                                 descriptors to hw
- * @chan: DMA channel handle
- */
-static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
+static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
+                                               struct dma_chan *chan,
+                                               dma_addr_t dma_dest,
+                                               dma_addr_t dma_src,
+                                               size_t len,
+                                               unsigned long flags)
 {
        struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+       struct ioat_desc_sw *new;
 
-       if (ioat_chan->pending != 0) {
-               ioat_chan->pending = 0;
-               writeb(IOAT_CHANCMD_APPEND,
-                      ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+       spin_lock_bh(&ioat_chan->desc_lock);
+       new = ioat2_dma_get_next_descriptor(ioat_chan);
+
+       /*
+        * leave ioat_chan->desc_lock set in ioat 2 path
+        * it will get unlocked at end of tx_submit
+        */
+
+       if (new) {
+               new->len = len;
+               new->dst = dma_dest;
+               new->src = dma_src;
+               new->async_tx.flags = flags;
+               return &new->async_tx;
+       } else {
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
+                       chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
+               return NULL;
        }
 }
 
+static void ioat_dma_cleanup_tasklet(unsigned long data)
+{
+       struct ioat_dma_chan *chan = (void *)data;
+       ioat_dma_memcpy_cleanup(chan);
+       writew(IOAT_CHANCTRL_INT_DISABLE,
+              chan->reg_base + IOAT_CHANCTRL_OFFSET);
+}
+
+static void
+ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
+{
+       /*
+        * yes we are unmapping both _page and _single
+        * alloc'd regions with unmap_page. Is this
+        * *really* that bad?
+        */
+       if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
+               pci_unmap_page(ioat_chan->device->pdev,
+                               pci_unmap_addr(desc, dst),
+                               pci_unmap_len(desc, len),
+                               PCI_DMA_FROMDEVICE);
+
+       if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
+               pci_unmap_page(ioat_chan->device->pdev,
+                               pci_unmap_addr(desc, src),
+                               pci_unmap_len(desc, len),
+                               PCI_DMA_TODEVICE);
+}
+
+/**
+ * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
+ * @chan: ioat channel to be cleaned up
+ */
 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
 {
        unsigned long phys_complete;
        struct ioat_desc_sw *desc, *_desc;
        dma_cookie_t cookie = 0;
+       unsigned long desc_phys;
+       struct ioat_desc_sw *latest_desc;
 
        prefetch(ioat_chan->completion_virt);
 
-       if (!spin_trylock(&ioat_chan->cleanup_lock))
+       if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
                return;
 
        /* The completion writeback can happen at any time,
@@ -378,71 +1104,133 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
 
 #if (BITS_PER_LONG == 64)
        phys_complete =
-       ioat_chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+               ioat_chan->completion_virt->full
+               & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
 #else
-       phys_complete = ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
+       phys_complete =
+               ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
 #endif
 
-       if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
+       if ((ioat_chan->completion_virt->full
+               & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
                                IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
                dev_err(&ioat_chan->device->pdev->dev,
-                       "ioatdma: Channel halted, chanerr = %x\n",
+                       "Channel halted, chanerr = %x\n",
                        readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
 
                /* TODO do something to salvage the situation */
        }
 
        if (phys_complete == ioat_chan->last_completion) {
-               spin_unlock(&ioat_chan->cleanup_lock);
+               spin_unlock_bh(&ioat_chan->cleanup_lock);
+               /*
+                * perhaps we're stuck so hard that the watchdog can't go off?
+                * try to catch it after 2 seconds
+                */
+               if (ioat_chan->device->version != IOAT_VER_3_0) {
+                       if (time_after(jiffies,
+                                      ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
+                               ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
+                               ioat_chan->last_completion_time = jiffies;
+                       }
+               }
                return;
        }
+       ioat_chan->last_completion_time = jiffies;
 
-       spin_lock_bh(&ioat_chan->desc_lock);
-       list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
-
-               /*
-                * Incoming DMA requests may use multiple descriptors, due to
-                * exceeding xfercap, perhaps. If so, only the last one will
-                * have a cookie, and require unmapping.
-                */
-               if (desc->async_tx.cookie) {
-                       cookie = desc->async_tx.cookie;
+       cookie = 0;
+       if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
+               spin_unlock_bh(&ioat_chan->cleanup_lock);
+               return;
+       }
 
-                       /*
-                        * yes we are unmapping both _page and _single alloc'd
-                        * regions with unmap_page. Is this *really* that bad?
-                        */
-                       pci_unmap_page(ioat_chan->device->pdev,
-                                       pci_unmap_addr(desc, dst),
-                                       pci_unmap_len(desc, len),
-                                       PCI_DMA_FROMDEVICE);
-                       pci_unmap_page(ioat_chan->device->pdev,
-                                       pci_unmap_addr(desc, src),
-                                       pci_unmap_len(desc, len),
-                                       PCI_DMA_TODEVICE);
-               }
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               list_for_each_entry_safe(desc, _desc,
+                                        &ioat_chan->used_desc, node) {
 
-               if (desc->async_tx.phys != phys_complete) {
                        /*
-                        * a completed entry, but not the last, so cleanup
-                        * if the client is done with the descriptor
+                        * Incoming DMA requests may use multiple descriptors,
+                        * due to exceeding xfercap, perhaps. If so, only the
+                        * last one will have a cookie, and require unmapping.
                         */
-                       if (desc->async_tx.ack) {
-                               list_del(&desc->node);
-                               list_add_tail(&desc->node,
-                                             &ioat_chan->free_desc);
-                       } else
+                       if (desc->async_tx.cookie) {
+                               cookie = desc->async_tx.cookie;
+                               ioat_dma_unmap(ioat_chan, desc);
+                               if (desc->async_tx.callback) {
+                                       desc->async_tx.callback(desc->async_tx.callback_param);
+                                       desc->async_tx.callback = NULL;
+                               }
+                       }
+
+                       if (desc->async_tx.phys != phys_complete) {
+                               /*
+                                * a completed entry, but not the last, so clean
+                                * up if the client is done with the descriptor
+                                */
+                               if (async_tx_test_ack(&desc->async_tx)) {
+                                       list_del(&desc->node);
+                                       list_add_tail(&desc->node,
+                                                     &ioat_chan->free_desc);
+                               } else
+                                       desc->async_tx.cookie = 0;
+                       } else {
+                               /*
+                                * last used desc. Do not remove, so we can
+                                * append from it, but don't look at it next
+                                * time, either
+                                */
                                desc->async_tx.cookie = 0;
-               } else {
-                       /*
-                        * last used desc. Do not remove, so we can append from
-                        * it, but don't look at it next time, either
-                        */
-                       desc->async_tx.cookie = 0;
 
-                       /* TODO check status bits? */
+                               /* TODO check status bits? */
+                               break;
+                       }
+               }
+               break;
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               /* has some other thread has already cleaned up? */
+               if (ioat_chan->used_desc.prev == NULL)
                        break;
+
+               /* work backwards to find latest finished desc */
+               desc = to_ioat_desc(ioat_chan->used_desc.next);
+               latest_desc = NULL;
+               do {
+                       desc = to_ioat_desc(desc->node.prev);
+                       desc_phys = (unsigned long)desc->async_tx.phys
+                                      & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+                       if (desc_phys == phys_complete) {
+                               latest_desc = desc;
+                               break;
+                       }
+               } while (&desc->node != ioat_chan->used_desc.prev);
+
+               if (latest_desc != NULL) {
+
+                       /* work forwards to clear finished descriptors */
+                       for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
+                            &desc->node != latest_desc->node.next &&
+                            &desc->node != ioat_chan->used_desc.next;
+                            desc = to_ioat_desc(desc->node.next)) {
+                               if (desc->async_tx.cookie) {
+                                       cookie = desc->async_tx.cookie;
+                                       desc->async_tx.cookie = 0;
+                                       ioat_dma_unmap(ioat_chan, desc);
+                                       if (desc->async_tx.callback) {
+                                               desc->async_tx.callback(desc->async_tx.callback_param);
+                                               desc->async_tx.callback = NULL;
+                                       }
+                               }
+                       }
+
+                       /* move used.prev up beyond those that are finished */
+                       if (&desc->node == ioat_chan->used_desc.next)
+                               ioat_chan->used_desc.prev = NULL;
+                       else
+                               ioat_chan->used_desc.prev = &desc->node;
                }
+               break;
        }
 
        spin_unlock_bh(&ioat_chan->desc_lock);
@@ -451,18 +1239,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
        if (cookie != 0)
                ioat_chan->completed_cookie = cookie;
 
-       spin_unlock(&ioat_chan->cleanup_lock);
-}
-
-static void ioat_dma_dependency_added(struct dma_chan *chan)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       spin_lock_bh(&ioat_chan->desc_lock);
-       if (ioat_chan->pending == 0) {
-               spin_unlock_bh(&ioat_chan->desc_lock);
-               ioat_dma_memcpy_cleanup(ioat_chan);
-       } else
-               spin_unlock_bh(&ioat_chan->desc_lock);
+       spin_unlock_bh(&ioat_chan->cleanup_lock);
 }
 
 /**
@@ -484,6 +1261,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
 
        last_used = chan->cookie;
        last_complete = ioat_chan->completed_cookie;
+       ioat_chan->watchdog_tcp_cookie = cookie;
 
        if (done)
                *done = last_complete;
@@ -507,63 +1285,54 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
        return dma_async_is_complete(cookie, last_complete, last_used);
 }
 
-/* PCI API */
-
-static irqreturn_t ioat_do_interrupt(int irq, void *data)
-{
-       struct ioatdma_device *instance = data;
-       unsigned long attnstatus;
-       u8 intrctrl;
-
-       intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
-
-       if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
-               return IRQ_NONE;
-
-       if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
-               writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
-               return IRQ_NONE;
-       }
-
-       attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
-
-       printk(KERN_ERR "ioatdma: interrupt! status %lx\n", attnstatus);
-
-       writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
-       return IRQ_HANDLED;
-}
-
 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
 {
        struct ioat_desc_sw *desc;
 
        spin_lock_bh(&ioat_chan->desc_lock);
 
-       if (!list_empty(&ioat_chan->free_desc)) {
-               desc = to_ioat_desc(ioat_chan->free_desc.next);
-               list_del(&desc->node);
-       } else {
-               /* try to get another desc */
+       desc = ioat_dma_get_next_descriptor(ioat_chan);
+
+       if (!desc) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "Unable to start null desc - get next desc failed\n");
                spin_unlock_bh(&ioat_chan->desc_lock);
-               desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
-               spin_lock_bh(&ioat_chan->desc_lock);
-               /* will this ever happen? */
-               BUG_ON(!desc);
+               return;
        }
 
-       desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
-       desc->hw->next = 0;
-       desc->async_tx.ack = 1;
-
-       list_add_tail(&desc->node, &ioat_chan->used_desc);
+       desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
+                               | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
+                               | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+       /* set size to non-zero value (channel returns error when size is 0) */
+       desc->hw->size = NULL_DESC_BUFFER_SIZE;
+       desc->hw->src_addr = 0;
+       desc->hw->dst_addr = 0;
+       async_tx_ack(&desc->async_tx);
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               desc->hw->next = 0;
+               list_add_tail(&desc->node, &ioat_chan->used_desc);
+
+               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
+               writel(((u64) desc->async_tx.phys) >> 32,
+                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
+
+               writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
+                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+               break;
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+               writel(((u64) desc->async_tx.phys) >> 32,
+                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+
+               ioat_chan->dmacount++;
+               __ioat2_dma_memcpy_issue_pending(ioat_chan);
+               break;
+       }
        spin_unlock_bh(&ioat_chan->desc_lock);
-
-       writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
-              ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
-       writel(((u64) desc->async_tx.phys) >> 32,
-              ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
-
-       writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
 }
 
 /*
@@ -571,16 +1340,28 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
  */
 #define IOAT_TEST_SIZE 2000
 
-static int ioat_self_test(struct ioatdma_device *device)
+static void ioat_dma_test_callback(void *dma_async_param)
+{
+       struct completion *cmp = dma_async_param;
+
+       complete(cmp);
+}
+
+/**
+ * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
+ * @device: device to be tested
+ */
+static int ioat_dma_self_test(struct ioatdma_device *device)
 {
        int i;
        u8 *src;
        u8 *dest;
        struct dma_chan *dma_chan;
        struct dma_async_tx_descriptor *tx;
-       dma_addr_t addr;
+       dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
        int err = 0;
+       struct completion cmp;
 
        src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
        if (!src)
@@ -599,46 +1380,221 @@ static int ioat_self_test(struct ioatdma_device *device)
        dma_chan = container_of(device->common.channels.next,
                                struct dma_chan,
                                device_node);
-       if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
+       if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
                dev_err(&device->pdev->dev,
                        "selftest cannot allocate chan resource\n");
                err = -ENODEV;
                goto out;
        }
 
-       tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
+       dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
+                                DMA_TO_DEVICE);
+       dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
+                                 DMA_FROM_DEVICE);
+       tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
+                                                  IOAT_TEST_SIZE, 0);
+       if (!tx) {
+               dev_err(&device->pdev->dev,
+                       "Self-test prep failed, disabling\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+
        async_tx_ack(tx);
-       addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
-                       DMA_TO_DEVICE);
-       ioat_set_src(addr, tx, 0);
-       addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
-                       DMA_FROM_DEVICE);
-       ioat_set_dest(addr, tx, 0);
-       cookie = ioat_tx_submit(tx);
-       ioat_dma_memcpy_issue_pending(dma_chan);
-       msleep(1);
-
-       if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+       init_completion(&cmp);
+       tx->callback = ioat_dma_test_callback;
+       tx->callback_param = &cmp;
+       cookie = tx->tx_submit(tx);
+       if (cookie < 0) {
+               dev_err(&device->pdev->dev,
+                       "Self-test setup failed, disabling\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+       device->common.device_issue_pending(dma_chan);
+
+       wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+       if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+                                       != DMA_SUCCESS) {
                dev_err(&device->pdev->dev,
-                       "ioatdma: Self-test copy timed out, disabling\n");
+                       "Self-test copy timed out, disabling\n");
                err = -ENODEV;
                goto free_resources;
        }
        if (memcmp(src, dest, IOAT_TEST_SIZE)) {
                dev_err(&device->pdev->dev,
-                       "ioatdma: Self-test copy failed compare, disabling\n");
+                       "Self-test copy failed compare, disabling\n");
                err = -ENODEV;
                goto free_resources;
        }
 
 free_resources:
-       ioat_dma_free_chan_resources(dma_chan);
+       device->common.device_free_chan_resources(dma_chan);
 out:
        kfree(src);
        kfree(dest);
        return err;
 }
 
+static char ioat_interrupt_style[32] = "msix";
+module_param_string(ioat_interrupt_style, ioat_interrupt_style,
+                   sizeof(ioat_interrupt_style), 0644);
+MODULE_PARM_DESC(ioat_interrupt_style,
+                "set ioat interrupt style: msix (default), "
+                "msix-single-vector, msi, intx)");
+
+/**
+ * ioat_dma_setup_interrupts - setup interrupt handler
+ * @device: ioat device
+ */
+static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+{
+       struct ioat_dma_chan *ioat_chan;
+       int err, i, j, msixcnt;
+       u8 intrctrl = 0;
+
+       if (!strcmp(ioat_interrupt_style, "msix"))
+               goto msix;
+       if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
+               goto msix_single_vector;
+       if (!strcmp(ioat_interrupt_style, "msi"))
+               goto msi;
+       if (!strcmp(ioat_interrupt_style, "intx"))
+               goto intx;
+       dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
+               ioat_interrupt_style);
+       goto err_no_irq;
+
+msix:
+       /* The number of MSI-X vectors should equal the number of channels */
+       msixcnt = device->common.chancnt;
+       for (i = 0; i < msixcnt; i++)
+               device->msix_entries[i].entry = i;
+
+       err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
+       if (err < 0)
+               goto msi;
+       if (err > 0)
+               goto msix_single_vector;
+
+       for (i = 0; i < msixcnt; i++) {
+               ioat_chan = ioat_lookup_chan_by_index(device, i);
+               err = request_irq(device->msix_entries[i].vector,
+                                 ioat_dma_do_interrupt_msix,
+                                 0, "ioat-msix", ioat_chan);
+               if (err) {
+                       for (j = 0; j < i; j++) {
+                               ioat_chan =
+                                       ioat_lookup_chan_by_index(device, j);
+                               free_irq(device->msix_entries[j].vector,
+                                        ioat_chan);
+                       }
+                       goto msix_single_vector;
+               }
+       }
+       intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+       device->irq_mode = msix_multi_vector;
+       goto done;
+
+msix_single_vector:
+       device->msix_entries[0].entry = 0;
+       err = pci_enable_msix(device->pdev, device->msix_entries, 1);
+       if (err)
+               goto msi;
+
+       err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
+                         0, "ioat-msix", device);
+       if (err) {
+               pci_disable_msix(device->pdev);
+               goto msi;
+       }
+       device->irq_mode = msix_single_vector;
+       goto done;
+
+msi:
+       err = pci_enable_msi(device->pdev);
+       if (err)
+               goto intx;
+
+       err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
+                         0, "ioat-msi", device);
+       if (err) {
+               pci_disable_msi(device->pdev);
+               goto intx;
+       }
+       /*
+        * CB 1.2 devices need a bit set in configuration space to enable MSI
+        */
+       if (device->version == IOAT_VER_1_2) {
+               u32 dmactrl;
+               pci_read_config_dword(device->pdev,
+                                     IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
+               dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
+               pci_write_config_dword(device->pdev,
+                                      IOAT_PCI_DMACTRL_OFFSET, dmactrl);
+       }
+       device->irq_mode = msi;
+       goto done;
+
+intx:
+       err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
+                         IRQF_SHARED, "ioat-intx", device);
+       if (err)
+               goto err_no_irq;
+       device->irq_mode = intx;
+
+done:
+       intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
+       writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
+       return 0;
+
+err_no_irq:
+       /* Disable all interrupt generation */
+       writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+       dev_err(&device->pdev->dev, "no usable interrupts\n");
+       device->irq_mode = none;
+       return -1;
+}
+
+/**
+ * ioat_dma_remove_interrupts - remove whatever interrupts were set
+ * @device: ioat device
+ */
+static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
+{
+       struct ioat_dma_chan *ioat_chan;
+       int i;
+
+       /* Disable all interrupt generation */
+       writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+
+       switch (device->irq_mode) {
+       case msix_multi_vector:
+               for (i = 0; i < device->common.chancnt; i++) {
+                       ioat_chan = ioat_lookup_chan_by_index(device, i);
+                       free_irq(device->msix_entries[i].vector, ioat_chan);
+               }
+               pci_disable_msix(device->pdev);
+               break;
+       case msix_single_vector:
+               free_irq(device->msix_entries[0].vector, device);
+               pci_disable_msix(device->pdev);
+               break;
+       case msi:
+               free_irq(device->pdev->irq, device);
+               pci_disable_msi(device->pdev);
+               break;
+       case intx:
+               free_irq(device->pdev->irq, device);
+               break;
+       case none:
+               dev_warn(&device->pdev->dev,
+                        "call to %s without interrupts setup\n", __func__);
+       }
+       device->irq_mode = none;
+}
+
 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
                                      void __iomem *iobase)
 {
@@ -674,50 +1630,64 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
        INIT_LIST_HEAD(&device->common.channels);
        ioat_dma_enumerate_channels(device);
 
-       dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
        device->common.device_alloc_chan_resources =
                                                ioat_dma_alloc_chan_resources;
        device->common.device_free_chan_resources =
                                                ioat_dma_free_chan_resources;
-       device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
-       device->common.device_is_tx_complete = ioat_dma_is_complete;
-       device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
-       device->common.device_dependency_added = ioat_dma_dependency_added;
        device->common.dev = &pdev->dev;
-       printk(KERN_INFO "ioatdma: Intel(R) I/OAT DMA Engine found,"
-              " %d channels, device version 0x%02x\n",
-              device->common.chancnt, device->version);
 
-       pci_set_drvdata(pdev, device);
-       err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat",
-               device);
-       if (err)
-               goto err_irq;
+       dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
+       device->common.device_is_tx_complete = ioat_dma_is_complete;
+       switch (device->version) {
+       case IOAT_VER_1_2:
+               device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
+               device->common.device_issue_pending =
+                                               ioat1_dma_memcpy_issue_pending;
+               break;
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
+               device->common.device_issue_pending =
+                                               ioat2_dma_memcpy_issue_pending;
+               break;
+       }
+
+       dev_err(&device->pdev->dev,
+               "Intel(R) I/OAT DMA Engine found,"
+               " %d channels, device version 0x%02x, driver version %s\n",
+               device->common.chancnt, device->version, IOAT_DMA_VERSION);
 
-       writeb(IOAT_INTRCTRL_MASTER_INT_EN,
-              device->reg_base + IOAT_INTRCTRL_OFFSET);
-       pci_set_master(pdev);
+       err = ioat_dma_setup_interrupts(device);
+       if (err)
+               goto err_setup_interrupts;
 
-       err = ioat_self_test(device);
+       err = ioat_dma_self_test(device);
        if (err)
                goto err_self_test;
 
+       ioat_set_tcp_copy_break(device);
+
        dma_async_device_register(&device->common);
 
+       if (device->version != IOAT_VER_3_0) {
+               INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
+               schedule_delayed_work(&device->work,
+                                     WATCHDOG_DELAY);
+       }
+
        return device;
 
 err_self_test:
-       free_irq(device->pdev->irq, device);
-err_irq:
+       ioat_dma_remove_interrupts(device);
+err_setup_interrupts:
        pci_pool_destroy(device->completion_pool);
 err_completion_pool:
        pci_pool_destroy(device->dma_pool);
 err_dma_pool:
        kfree(device);
 err_kzalloc:
-       iounmap(iobase);
-       printk(KERN_ERR
-              "ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n");
+       dev_err(&pdev->dev,
+               "Intel(R) I/OAT DMA Engine initialization failed\n");
        return NULL;
 }
 
@@ -726,13 +1696,21 @@ void ioat_dma_remove(struct ioatdma_device *device)
        struct dma_chan *chan, *_chan;
        struct ioat_dma_chan *ioat_chan;
 
-       dma_async_device_unregister(&device->common);
+       ioat_dma_remove_interrupts(device);
 
-       free_irq(device->pdev->irq, device);
+       dma_async_device_unregister(&device->common);
 
        pci_pool_destroy(device->dma_pool);
        pci_pool_destroy(device->completion_pool);
 
+       iounmap(device->reg_base);
+       pci_release_regions(device->pdev);
+       pci_disable_device(device->pdev);
+
+       if (device->version != IOAT_VER_3_0) {
+               cancel_delayed_work(&device->work);
+       }
+
        list_for_each_entry_safe(chan, _chan,
                                 &device->common.channels, device_node) {
                ioat_chan = to_ioat_chan(chan);