tree-wide: fix assorted typos all over the place
[safe/jmp/linux-2.6] / drivers / net / cxgb3 / sge.c
index 3f2cf8a..56ba872 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -36,6 +36,7 @@
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/dma-mapping.h>
+#include <net/arp.h>
 #include "common.h"
 #include "regs.h"
 #include "sge_defs.h"
 #define USE_GTS 0
 
 #define SGE_RX_SM_BUF_SIZE 1536
+
 #define SGE_RX_COPY_THRES  256
+#define SGE_RX_PULL_LEN    128
+
+#define SGE_PG_RSVD SMP_CACHE_BYTES
+/*
+ * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
+ * It must be a divisor of PAGE_SIZE.  If set to 0 FL0 will use sk_buffs
+ * directly.
+ */
+#define FL0_PG_CHUNK_SIZE  2048
+#define FL0_PG_ORDER 0
+#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
+#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
+#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
+#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
 
-# define SGE_RX_DROP_THRES 16
+#define SGE_RX_DROP_THRES 16
+#define RX_RECLAIM_PERIOD (HZ/4)
 
 /*
+ * Max number of Rx buffers we replenish at a time.
+ */
+#define MAX_RX_REFILL 16U
+/*
  * Period of the Tx buffer reclaim timer.  This timer does not need to run
  * frequently as Tx buffers are usually reclaimed by new Tx packets.
  */
 #define TX_RECLAIM_PERIOD (HZ / 4)
+#define TX_RECLAIM_TIMER_CHUNK 64U
+#define TX_RECLAIM_CHUNK 16U
 
 /* WR size in bytes */
 #define WR_LEN (WR_FLITS * 8)
@@ -70,7 +93,7 @@ enum {
 };
 
 struct tx_desc {
-       u64 flit[TX_DESC_FLITS];
+       __be64 flit[TX_DESC_FLITS];
 };
 
 struct rx_desc {
@@ -82,11 +105,18 @@ struct rx_desc {
 
 struct tx_sw_desc {            /* SW state per Tx descriptor */
        struct sk_buff *skb;
+       u8 eop;       /* set if last descriptor for packet */
+       u8 addr_idx;  /* buffer index of first SGL entry in descriptor */
+       u8 fragidx;   /* first page fragment associated with descriptor */
+       s8 sflit;     /* start flit of first SGL entry in descriptor */
 };
 
-struct rx_sw_desc {            /* SW state per Rx descriptor */
-       struct sk_buff *skb;
-        DECLARE_PCI_UNMAP_ADDR(dma_addr);
+struct rx_sw_desc {                /* SW state per Rx descriptor */
+       union {
+               struct sk_buff *skb;
+               struct fl_pg_chunk pg_chunk;
+       };
+       DECLARE_PCI_UNMAP_ADDR(dma_addr);
 };
 
 struct rsp_desc {              /* response queue descriptor */
@@ -97,11 +127,13 @@ struct rsp_desc {          /* response queue descriptor */
        u8 intr_gen;
 };
 
-struct unmap_info {            /* packet unmapping info, overlays skb->cb */
-       int sflit;              /* start flit of first SGL entry in Tx descriptor */
-       u16 fragidx;            /* first page fragment in current Tx descriptor */
-       u16 addr_idx;           /* buffer index of first SGL entry in descriptor */
-       u32 len;                /* mapped length of skb main body */
+/*
+ * Holds unmapping information for Tx packets that need deferred unmapping.
+ * This structure lives at skb->head and must be allocated by callers.
+ */
+struct deferred_unmap_info {
+       struct pci_dev *pdev;
+       dma_addr_t addr[MAX_SKB_FRAGS + 1];
 };
 
 /*
@@ -156,6 +188,7 @@ static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
 static inline void refill_rspq(struct adapter *adapter,
                               const struct sge_rspq *q, unsigned int credits)
 {
+       rmb();
        t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
                     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
 }
@@ -188,32 +221,36 @@ static inline int need_skb_unmap(void)
  *
  *     Unmap the main body of an sk_buff and its page fragments, if any.
  *     Because of the fairly complicated structure of our SGLs and the desire
- *     to conserve space for metadata, we keep the information necessary to
- *     unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
- *     in the Tx descriptors (the physical addresses of the various data
- *     buffers).  The send functions initialize the state in skb->cb so we
- *     can unmap the buffers held in the first Tx descriptor here, and we
- *     have enough information at this point to update the state for the next
- *     Tx descriptor.
+ *     to conserve space for metadata, the information necessary to unmap an
+ *     sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
+ *     descriptors (the physical addresses of the various data buffers), and
+ *     the SW descriptor state (assorted indices).  The send functions
+ *     initialize the indices for the first packet descriptor so we can unmap
+ *     the buffers held in the first Tx descriptor here, and we have enough
+ *     information at this point to set the state for the next Tx descriptor.
+ *
+ *     Note that it is possible to clean up the first descriptor of a packet
+ *     before the send routines have written the next descriptors, but this
+ *     race does not cause any problem.  We just end up writing the unmapping
+ *     info for the descriptor first.
  */
 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
                             unsigned int cidx, struct pci_dev *pdev)
 {
        const struct sg_ent *sgp;
-       struct unmap_info *ui = (struct unmap_info *)skb->cb;
-       int nfrags, frag_idx, curflit, j = ui->addr_idx;
+       struct tx_sw_desc *d = &q->sdesc[cidx];
+       int nfrags, frag_idx, curflit, j = d->addr_idx;
 
-       sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
+       sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
+       frag_idx = d->fragidx;
 
-       if (ui->len) {
-               pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
-                                PCI_DMA_TODEVICE);
-               ui->len = 0;    /* so we know for next descriptor for this skb */
+       if (frag_idx == 0 && skb_headlen(skb)) {
+               pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
+                                skb_headlen(skb), PCI_DMA_TODEVICE);
                j = 1;
        }
 
-       frag_idx = ui->fragidx;
-       curflit = ui->sflit + 1 + j;
+       curflit = d->sflit + 1 + j;
        nfrags = skb_shinfo(skb)->nr_frags;
 
        while (frag_idx < nfrags && curflit < WR_FLITS) {
@@ -229,10 +266,11 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
                frag_idx++;
        }
 
-       if (frag_idx < nfrags) {        /* SGL continues into next Tx descriptor */
-               ui->fragidx = frag_idx;
-               ui->addr_idx = j;
-               ui->sflit = curflit - WR_FLITS - j;     /* sflit can be -1 */
+       if (frag_idx < nfrags) {   /* SGL continues into next Tx descriptor */
+               d = cidx + 1 == q->size ? q->sdesc : d + 1;
+               d->fragidx = frag_idx;
+               d->addr_idx = j;
+               d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
        }
 }
 
@@ -252,12 +290,15 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
        struct pci_dev *pdev = adapter->pdev;
        unsigned int cidx = q->cidx;
 
+       const int need_unmap = need_skb_unmap() &&
+                              q->cntxt_id >= FW_TUNNEL_SGEEC_START;
+
        d = &q->sdesc[cidx];
        while (n--) {
                if (d->skb) {   /* an SGL is present */
-                       if (need_skb_unmap())
+                       if (need_unmap)
                                unmap_skb(d->skb, q, cidx, pdev);
-                       if (d->skb->priority == cidx)
+                       if (d->eop)
                                kfree_skb(d->skb);
                }
                ++d;
@@ -273,21 +314,25 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
  *     reclaim_completed_tx - reclaims completed Tx descriptors
  *     @adapter: the adapter
  *     @q: the Tx queue to reclaim completed descriptors from
+ *     @chunk: maximum number of descriptors to reclaim
  *
  *     Reclaims Tx descriptors that the SGE has indicated it has processed,
  *     and frees the associated buffers if possible.  Called with the Tx
  *     queue's lock held.
  */
-static inline void reclaim_completed_tx(struct adapter *adapter,
-                                       struct sge_txq *q)
+static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
+                                               struct sge_txq *q,
+                                               unsigned int chunk)
 {
        unsigned int reclaim = q->processed - q->cleaned;
 
+       reclaim = min(chunk, reclaim);
        if (reclaim) {
                free_tx_desc(adapter, q, reclaim);
                q->cleaned += reclaim;
                q->in_use -= reclaim;
        }
+       return q->processed - q->cleaned;
 }
 
 /**
@@ -303,6 +348,26 @@ static inline int should_restart_tx(const struct sge_txq *q)
        return q->in_use - r < (q->size >> 1);
 }
 
+static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
+                         struct rx_sw_desc *d)
+{
+       if (q->use_pages && d->pg_chunk.page) {
+               (*d->pg_chunk.p_cnt)--;
+               if (!*d->pg_chunk.p_cnt)
+                       pci_unmap_page(pdev,
+                                      d->pg_chunk.mapping,
+                                      q->alloc_size, PCI_DMA_FROMDEVICE);
+
+               put_page(d->pg_chunk.page);
+               d->pg_chunk.page = NULL;
+       } else {
+               pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
+                                q->buf_size, PCI_DMA_FROMDEVICE);
+               kfree_skb(d->skb);
+               d->skb = NULL;
+       }
+}
+
 /**
  *     free_rx_bufs - free the Rx buffers on an SGE free list
  *     @pdev: the PCI device associated with the adapter
@@ -318,18 +383,21 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
        while (q->credits--) {
                struct rx_sw_desc *d = &q->sdesc[cidx];
 
-               pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
-                                q->buf_size, PCI_DMA_FROMDEVICE);
-               kfree_skb(d->skb);
-               d->skb = NULL;
+
+               clear_rx_desc(pdev, q, d);
                if (++cidx == q->size)
                        cidx = 0;
        }
+
+       if (q->pg_chunk.page) {
+               __free_pages(q->pg_chunk.page, q->order);
+               q->pg_chunk.page = NULL;
+       }
 }
 
 /**
  *     add_one_rx_buf - add a packet buffer to a free-buffer list
- *     @skb: the buffer to add
+ *     @va:  buffer start VA
  *     @len: the buffer length
  *     @d: the HW Rx descriptor to write
  *     @sd: the SW Rx descriptor to write
@@ -339,14 +407,16 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
  *     Add a buffer of the given length to the supplied HW and SW Rx
  *     descriptors.
  */
-static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
-                                 struct rx_desc *d, struct rx_sw_desc *sd,
-                                 unsigned int gen, struct pci_dev *pdev)
+static inline int add_one_rx_buf(void *va, unsigned int len,
+                                struct rx_desc *d, struct rx_sw_desc *sd,
+                                unsigned int gen, struct pci_dev *pdev)
 {
        dma_addr_t mapping;
 
-       sd->skb = skb;
-       mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+       mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
+       if (unlikely(pci_dma_mapping_error(pdev, mapping)))
+               return -ENOMEM;
+
        pci_unmap_addr_set(sd, dma_addr, mapping);
 
        d->addr_lo = cpu_to_be32(mapping);
@@ -354,6 +424,64 @@ static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
        wmb();
        d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
        d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+       return 0;
+}
+
+static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
+                                  unsigned int gen)
+{
+       d->addr_lo = cpu_to_be32(mapping);
+       d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+       wmb();
+       d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+       d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+       return 0;
+}
+
+static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
+                         struct rx_sw_desc *sd, gfp_t gfp,
+                         unsigned int order)
+{
+       if (!q->pg_chunk.page) {
+               dma_addr_t mapping;
+
+               q->pg_chunk.page = alloc_pages(gfp, order);
+               if (unlikely(!q->pg_chunk.page))
+                       return -ENOMEM;
+               q->pg_chunk.va = page_address(q->pg_chunk.page);
+               q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
+                                   SGE_PG_RSVD;
+               q->pg_chunk.offset = 0;
+               mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
+                                      0, q->alloc_size, PCI_DMA_FROMDEVICE);
+               q->pg_chunk.mapping = mapping;
+       }
+       sd->pg_chunk = q->pg_chunk;
+
+       prefetch(sd->pg_chunk.p_cnt);
+
+       q->pg_chunk.offset += q->buf_size;
+       if (q->pg_chunk.offset == (PAGE_SIZE << order))
+               q->pg_chunk.page = NULL;
+       else {
+               q->pg_chunk.va += q->buf_size;
+               get_page(q->pg_chunk.page);
+       }
+
+       if (sd->pg_chunk.offset == 0)
+               *sd->pg_chunk.p_cnt = 1;
+       else
+               *sd->pg_chunk.p_cnt += 1;
+
+       return 0;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+       if (q->pend_cred >= q->credits / 4) {
+               q->pend_cred = 0;
+               t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+       }
 }
 
 /**
@@ -367,18 +495,46 @@ static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
  *     allocated with the supplied gfp flags.  The caller must assure that
  *     @n does not exceed the queue's capacity.
  */
-static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
+static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
 {
        struct rx_sw_desc *sd = &q->sdesc[q->pidx];
        struct rx_desc *d = &q->desc[q->pidx];
+       unsigned int count = 0;
 
        while (n--) {
-               struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+               dma_addr_t mapping;
+               int err;
 
-               if (!skb)
-                       break;
+               if (q->use_pages) {
+                       if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
+                                                   q->order))) {
+nomem:                         q->alloc_failed++;
+                               break;
+                       }
+                       mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
+                       pci_unmap_addr_set(sd, dma_addr, mapping);
+
+                       add_one_rx_chunk(mapping, d, q->gen);
+                       pci_dma_sync_single_for_device(adap->pdev, mapping,
+                                               q->buf_size - SGE_PG_RSVD,
+                                               PCI_DMA_FROMDEVICE);
+               } else {
+                       void *buf_start;
+
+                       struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+                       if (!skb)
+                               goto nomem;
+
+                       sd->skb = skb;
+                       buf_start = skb->data;
+                       err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
+                                            q->gen, adap->pdev);
+                       if (unlikely(err)) {
+                               clear_rx_desc(adap->pdev, q, sd);
+                               break;
+                       }
+               }
 
-               add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
                d++;
                sd++;
                if (++q->pidx == q->size) {
@@ -387,15 +543,20 @@ static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
                        sd = q->sdesc;
                        d = q->desc;
                }
-               q->credits++;
+               count++;
        }
 
-       t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+       q->credits += count;
+       q->pend_cred += count;
+       ring_fl_db(adap, q);
+
+       return count;
 }
 
 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
 {
-       refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
+       refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
+                 GFP_ATOMIC | __GFP_COMP);
 }
 
 /**
@@ -419,13 +580,15 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
        wmb();
        to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
        to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
-       q->credits++;
 
        if (++q->pidx == q->size) {
                q->pidx = 0;
                q->gen ^= 1;
        }
-       t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+
+       q->credits++;
+       q->pend_cred++;
+       ring_fl_db(adap, q);
 }
 
 /**
@@ -446,7 +609,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
  *     of the SW ring.
  */
 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
-                       size_t sw_size, dma_addr_t *phys, void *metadata)
+                       size_t sw_size, dma_addr_t * phys, void *metadata)
 {
        size_t len = nelem * elem_size;
        void *s = NULL;
@@ -454,21 +617,48 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
 
        if (!p)
                return NULL;
-       if (sw_size) {
+       if (sw_size && metadata) {
                s = kcalloc(nelem, sw_size, GFP_KERNEL);
 
                if (!s) {
                        dma_free_coherent(&pdev->dev, len, p, *phys);
                        return NULL;
                }
-       }
-       if (metadata)
                *(void **)metadata = s;
+       }
        memset(p, 0, len);
        return p;
 }
 
 /**
+ *     t3_reset_qset - reset a sge qset
+ *     @q: the queue set
+ *
+ *     Reset the qset structure.
+ *     the NAPI structure is preserved in the event of
+ *     the qset's reincarnation, for example during EEH recovery.
+ */
+static void t3_reset_qset(struct sge_qset *q)
+{
+       if (q->adap &&
+           !(q->adap->flags & NAPI_INIT)) {
+               memset(q, 0, sizeof(*q));
+               return;
+       }
+
+       q->adap = NULL;
+       memset(&q->rspq, 0, sizeof(q->rspq));
+       memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
+       memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
+       q->txq_stopped = 0;
+       q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
+       q->rx_reclaim_timer.function = NULL;
+       q->nomem = 0;
+       napi_free_frags(&q->napi);
+}
+
+
+/**
  *     free_qset - free the resources of an SGE queue set
  *     @adapter: the adapter owning the queue set
  *     @q: the queue set
@@ -477,19 +667,16 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  *     as HW contexts, packet buffers, and descriptor rings.  Traffic to the
  *     queue set must be quiesced prior to calling this.
  */
-void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
 {
        int i;
        struct pci_dev *pdev = adapter->pdev;
 
-       if (q->tx_reclaim_timer.function)
-               del_timer_sync(&q->tx_reclaim_timer);
-
        for (i = 0; i < SGE_RXQ_PER_SET; ++i)
                if (q->fl[i].desc) {
-                       spin_lock(&adapter->sge.reg_lock);
+                       spin_lock_irq(&adapter->sge.reg_lock);
                        t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
-                       spin_unlock(&adapter->sge.reg_lock);
+                       spin_unlock_irq(&adapter->sge.reg_lock);
                        free_rx_bufs(pdev, &q->fl[i]);
                        kfree(q->fl[i].sdesc);
                        dma_free_coherent(&pdev->dev,
@@ -500,9 +687,9 @@ void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
 
        for (i = 0; i < SGE_TXQ_PER_SET; ++i)
                if (q->txq[i].desc) {
-                       spin_lock(&adapter->sge.reg_lock);
+                       spin_lock_irq(&adapter->sge.reg_lock);
                        t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
-                       spin_unlock(&adapter->sge.reg_lock);
+                       spin_unlock_irq(&adapter->sge.reg_lock);
                        if (q->txq[i].sdesc) {
                                free_tx_desc(adapter, &q->txq[i],
                                             q->txq[i].in_use);
@@ -516,18 +703,15 @@ void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
                }
 
        if (q->rspq.desc) {
-               spin_lock(&adapter->sge.reg_lock);
+               spin_lock_irq(&adapter->sge.reg_lock);
                t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
-               spin_unlock(&adapter->sge.reg_lock);
+               spin_unlock_irq(&adapter->sge.reg_lock);
                dma_free_coherent(&pdev->dev,
                                  q->rspq.size * sizeof(struct rsp_desc),
                                  q->rspq.desc, q->rspq.phys_addr);
        }
 
-       if (q->netdev)
-               q->netdev->atalk_ptr = NULL;
-
-       memset(q, 0, sizeof(*q));
+       t3_reset_qset(q);
 }
 
 /**
@@ -597,31 +781,32 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
        struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
 
        prefetch(sd->skb->data);
+       fl->credits--;
 
        if (len <= SGE_RX_COPY_THRES) {
                skb = alloc_skb(len, GFP_ATOMIC);
                if (likely(skb != NULL)) {
                        __skb_put(skb, len);
                        pci_dma_sync_single_for_cpu(adap->pdev,
-                                                   pci_unmap_addr(sd,
-                                                                  dma_addr),
-                                                   len, PCI_DMA_FROMDEVICE);
+                                           pci_unmap_addr(sd, dma_addr), len,
+                                           PCI_DMA_FROMDEVICE);
                        memcpy(skb->data, sd->skb->data, len);
                        pci_dma_sync_single_for_device(adap->pdev,
-                                                      pci_unmap_addr(sd,
-                                                                     dma_addr),
-                                                      len, PCI_DMA_FROMDEVICE);
+                                           pci_unmap_addr(sd, dma_addr), len,
+                                           PCI_DMA_FROMDEVICE);
                } else if (!drop_thres)
                        goto use_orig_buf;
-             recycle:
+recycle:
                recycle_rx_buf(adap, fl, fl->cidx);
                return skb;
        }
 
-       if (unlikely(fl->credits < drop_thres))
+       if (unlikely(fl->credits < drop_thres) &&
+           refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
+                     GFP_ATOMIC | __GFP_COMP) == 0)
                goto recycle;
 
-      use_orig_buf:
+use_orig_buf:
        pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
                         fl->buf_size, PCI_DMA_FROMDEVICE);
        skb = sd->skb;
@@ -631,6 +816,101 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
 }
 
 /**
+ *     get_packet_pg - return the next ingress packet buffer from a free list
+ *     @adap: the adapter that received the packet
+ *     @fl: the SGE free list holding the packet
+ *     @len: the packet length including any SGE padding
+ *     @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ *     Get the next packet from a free list populated with page chunks.
+ *     If the packet is small we make a copy and recycle the original buffer,
+ *     otherwise we attach the original buffer as a page fragment to a fresh
+ *     sk_buff.  If a positive drop threshold is supplied packets are dropped
+ *     and their buffers recycled if (a) the number of remaining buffers is
+ *     under the threshold and the packet is too big to copy, or (b) there's
+ *     no system memory.
+ *
+ *     Note: this function is similar to @get_packet but deals with Rx buffers
+ *     that are page chunks rather than sk_buffs.
+ */
+static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
+                                    struct sge_rspq *q, unsigned int len,
+                                    unsigned int drop_thres)
+{
+       struct sk_buff *newskb, *skb;
+       struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+       dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
+
+       newskb = skb = q->pg_skb;
+       if (!skb && (len <= SGE_RX_COPY_THRES)) {
+               newskb = alloc_skb(len, GFP_ATOMIC);
+               if (likely(newskb != NULL)) {
+                       __skb_put(newskb, len);
+                       pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
+                                           PCI_DMA_FROMDEVICE);
+                       memcpy(newskb->data, sd->pg_chunk.va, len);
+                       pci_dma_sync_single_for_device(adap->pdev, dma_addr,
+                                                      len,
+                                                      PCI_DMA_FROMDEVICE);
+               } else if (!drop_thres)
+                       return NULL;
+recycle:
+               fl->credits--;
+               recycle_rx_buf(adap, fl, fl->cidx);
+               q->rx_recycle_buf++;
+               return newskb;
+       }
+
+       if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
+               goto recycle;
+
+       prefetch(sd->pg_chunk.p_cnt);
+
+       if (!skb)
+               newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
+
+       if (unlikely(!newskb)) {
+               if (!drop_thres)
+                       return NULL;
+               goto recycle;
+       }
+
+       pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
+                                   PCI_DMA_FROMDEVICE);
+       (*sd->pg_chunk.p_cnt)--;
+       if (!*sd->pg_chunk.p_cnt)
+               pci_unmap_page(adap->pdev,
+                              sd->pg_chunk.mapping,
+                              fl->alloc_size,
+                              PCI_DMA_FROMDEVICE);
+       if (!skb) {
+               __skb_put(newskb, SGE_RX_PULL_LEN);
+               memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
+               skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
+                                  sd->pg_chunk.offset + SGE_RX_PULL_LEN,
+                                  len - SGE_RX_PULL_LEN);
+               newskb->len = len;
+               newskb->data_len = len - SGE_RX_PULL_LEN;
+               newskb->truesize += newskb->data_len;
+       } else {
+               skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
+                                  sd->pg_chunk.page,
+                                  sd->pg_chunk.offset, len);
+               newskb->len += len;
+               newskb->data_len += len;
+               newskb->truesize += len;
+       }
+
+       fl->credits--;
+       /*
+        * We do not refill FLs here, we let the caller do it to overlap a
+        * prefetch.
+        */
+       return newskb;
+}
+
+/**
  *     get_imm_packet - return the next ingress packet buffer from a response
  *     @resp: the response descriptor containing the packet data
  *
@@ -642,7 +922,7 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
 
        if (skb) {
                __skb_put(skb, IMMED_PKT_SIZE);
-               memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
+               skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
        }
        return skb;
 }
@@ -769,23 +1049,21 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
                             const struct sge_txq *q,
                             const struct sg_ent *sgl,
                             unsigned int flits, unsigned int sgl_flits,
-                            unsigned int gen, unsigned int wr_hi,
-                            unsigned int wr_lo)
+                            unsigned int gen, __be32 wr_hi,
+                            __be32 wr_lo)
 {
        struct work_request_hdr *wrp = (struct work_request_hdr *)d;
        struct tx_sw_desc *sd = &q->sdesc[pidx];
 
        sd->skb = skb;
        if (need_skb_unmap()) {
-               struct unmap_info *ui = (struct unmap_info *)skb->cb;
-
-               ui->fragidx = 0;
-               ui->addr_idx = 0;
-               ui->sflit = flits;
+               sd->fragidx = 0;
+               sd->addr_idx = 0;
+               sd->sflit = flits;
        }
 
        if (likely(ndesc == 1)) {
-               skb->priority = pidx;
+               sd->eop = 1;
                wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
                                   V_WR_SGLSFLT(flits)) | wr_hi;
                wmb();
@@ -813,6 +1091,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
 
                        fp += avail;
                        d++;
+                       sd->eop = 0;
                        sd++;
                        if (++pidx == q->size) {
                                pidx = 0;
@@ -831,7 +1110,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
                        wr_gen2(d, gen);
                        flits = 1;
                }
-               skb->priority = pidx;
+               sd->eop = 1;
                wrp->wr_hi |= htonl(F_WR_EOP);
                wmb();
                wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
@@ -864,7 +1143,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
        struct tx_desc *d = &q->desc[pidx];
        struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
 
-       cpl->len = htonl(skb->len | 0x80000000);
+       cpl->len = htonl(skb->len);
        cntrl = V_TXPKT_INTF(pi->port_id);
 
        if (vlan_tx_tag_present(skb) && pi->vlan_grp)
@@ -878,11 +1157,11 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
                d->flit[2] = 0;
                cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
                hdr->cntrl = htonl(cntrl);
-               eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
+               eth_type = skb_network_offset(skb) == ETH_HLEN ?
                    CPL_ETH_II : CPL_ETH_II_VLAN;
                tso_info |= V_LSO_ETH_TYPE(eth_type) |
-                   V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
-                   V_LSO_TCPHDR_WORDS(skb->h.th->doff);
+                   V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
+                   V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
                hdr->lso_info = htonl(tso_info);
                flits = 3;
        } else {
@@ -894,7 +1173,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
                if (skb->len <= WR_LEN - sizeof(*cpl)) {
                        q->sdesc[pidx].skb = NULL;
                        if (!skb->data_len)
-                               memcpy(&d->flit[2], skb->data, skb->len);
+                               skb_copy_from_linear_data(skb, &d->flit[2],
+                                                         skb->len);
                        else
                                skb_copy_bits(skb, 0, &d->flit[2], skb->len);
 
@@ -915,14 +1195,20 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
 
        sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
        sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
-       if (need_skb_unmap())
-               ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
 
        write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
                         htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
                         htonl(V_WR_TID(q->token)));
 }
 
+static inline void t3_stop_tx_queue(struct netdev_queue *txq,
+                                   struct sge_qset *qs, struct sge_txq *q)
+{
+       netif_tx_stop_queue(txq);
+       set_bit(TXQ_ETH, &qs->txq_stopped);
+       q->stops++;
+}
+
 /**
  *     eth_xmit - add a packet to the Ethernet Tx queue
  *     @skb: the packet
@@ -930,13 +1216,15 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
  *
  *     Add a packet to an SGE Tx queue.  Runs with softirqs disabled.
  */
-int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
+       int qidx;
        unsigned int ndesc, pidx, credits, gen, compl;
        const struct port_info *pi = netdev_priv(dev);
-       struct adapter *adap = dev->priv;
-       struct sge_qset *qs = dev2qset(dev);
-       struct sge_txq *q = &qs->txq[TXQ_ETH];
+       struct adapter *adap = pi->adapter;
+       struct netdev_queue *txq;
+       struct sge_qset *qs;
+       struct sge_txq *q;
 
        /*
         * The chip min packet length is 9 octets but play safe and reject
@@ -947,37 +1235,33 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
-       spin_lock(&q->lock);
-       reclaim_completed_tx(adap, q);
+       qidx = skb_get_queue_mapping(skb);
+       qs = &pi->qs[qidx];
+       q = &qs->txq[TXQ_ETH];
+       txq = netdev_get_tx_queue(dev, qidx);
+
+       reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
 
        credits = q->size - q->in_use;
        ndesc = calc_tx_descs(skb);
 
        if (unlikely(credits < ndesc)) {
-               if (!netif_queue_stopped(dev)) {
-                       netif_stop_queue(dev);
-                       set_bit(TXQ_ETH, &qs->txq_stopped);
-                       q->stops++;
-                       dev_err(&adap->pdev->dev,
-                               "%s: Tx ring %u full while queue awake!\n",
-                               dev->name, q->cntxt_id & 7);
-               }
-               spin_unlock(&q->lock);
+               t3_stop_tx_queue(txq, qs, q);
+               dev_err(&adap->pdev->dev,
+                       "%s: Tx ring %u full while queue awake!\n",
+                       dev->name, q->cntxt_id & 7);
                return NETDEV_TX_BUSY;
        }
 
        q->in_use += ndesc;
        if (unlikely(credits - ndesc < q->stop_thres)) {
-               q->stops++;
-               netif_stop_queue(dev);
-               set_bit(TXQ_ETH, &qs->txq_stopped);
-#if !USE_GTS
+               t3_stop_tx_queue(txq, qs, q);
+
                if (should_restart_tx(q) &&
                    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
                        q->restarts++;
-                       netif_wake_queue(dev);
+                       netif_tx_wake_queue(txq);
                }
-#endif
        }
 
        gen = q->gen;
@@ -999,12 +1283,9 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        if (vlan_tx_tag_present(skb) && pi->vlan_grp)
                qs->port_stats[SGE_PSTAT_VLANINS]++;
 
-       dev->trans_start = jiffies;
-       spin_unlock(&q->lock);
-
        /*
         * We do not use Tx completion interrupts to free DMAd Tx packets.
-        * This is good for performamce but means that we rely on new Tx
+        * This is good for performance but means that we rely on new Tx
         * packets arriving to run the destructors of completed packets,
         * which open up space in their sockets' send queues.  Sometimes
         * we do not get such new packets causing Tx to stall.  A single
@@ -1043,8 +1324,8 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  *
  *     Writes a packet as immediate data into a Tx descriptor.  The packet
  *     contains a work request at its beginning.  We must write the packet
- *     carefully so the SGE doesn't read accidentally before it's written in
- *     its entirety.
+ *     carefully so the SGE doesn't read it accidentally before it's written
+ *     in its entirety.
  */
 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
                             unsigned int len, unsigned int gen)
@@ -1052,7 +1333,11 @@ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
        struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
        struct work_request_hdr *to = (struct work_request_hdr *)d;
 
-       memcpy(&to[1], &from[1], len - sizeof(*from));
+       if (likely(!skb->data_len))
+               memcpy(&to[1], &from[1], len - sizeof(*from));
+       else
+               skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
+
        to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
                                        V_WR_BCNTLFLT(len & 7));
        wmb();
@@ -1122,7 +1407,7 @@ static inline void reclaim_completed_tx_imm(struct sge_txq *q)
 
 static inline int immediate(const struct sk_buff *skb)
 {
-       return skb->len <= WR_LEN && !skb->data_len;
+       return skb->len <= WR_LEN;
 }
 
 /**
@@ -1187,12 +1472,12 @@ static void restart_ctrlq(unsigned long data)
        struct sk_buff *skb;
        struct sge_qset *qs = (struct sge_qset *)data;
        struct sge_txq *q = &qs->txq[TXQ_CTRL];
-       struct adapter *adap = qs->netdev->priv;
 
        spin_lock(&q->lock);
       again:reclaim_completed_tx_imm(q);
 
-       while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
+       while (q->in_use < q->size &&
+              (skb = __skb_dequeue(&q->sendq)) != NULL) {
 
                write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
 
@@ -1214,7 +1499,8 @@ static void restart_ctrlq(unsigned long data)
        }
 
        spin_unlock(&q->lock);
-       t3_write_reg(adap, A_SG_KDOORBELL,
+       wmb();
+       t3_write_reg(qs->adap, A_SG_KDOORBELL,
                     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 }
 
@@ -1223,7 +1509,57 @@ static void restart_ctrlq(unsigned long data)
  */
 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
 {
-       return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+       int ret;
+       local_bh_disable();
+       ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+       local_bh_enable();
+
+       return ret;
+}
+
+/**
+ *     deferred_unmap_destructor - unmap a packet when it is freed
+ *     @skb: the packet
+ *
+ *     This is the packet destructor used for Tx packets that need to remain
+ *     mapped until they are freed rather than until their Tx descriptors are
+ *     freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+       int i;
+       const dma_addr_t *p;
+       const struct skb_shared_info *si;
+       const struct deferred_unmap_info *dui;
+
+       dui = (struct deferred_unmap_info *)skb->head;
+       p = dui->addr;
+
+       if (skb->tail - skb->transport_header)
+               pci_unmap_single(dui->pdev, *p++,
+                                skb->tail - skb->transport_header,
+                                PCI_DMA_TODEVICE);
+
+       si = skb_shinfo(skb);
+       for (i = 0; i < si->nr_frags; i++)
+               pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
+                              PCI_DMA_TODEVICE);
+}
+
+static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
+                                    const struct sg_ent *sgl, int sgl_flits)
+{
+       dma_addr_t *p;
+       struct deferred_unmap_info *dui;
+
+       dui = (struct deferred_unmap_info *)skb->head;
+       dui->pdev = pdev;
+       for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
+               *p++ = be64_to_cpu(sgl->addr[0]);
+               *p++ = be64_to_cpu(sgl->addr[1]);
+       }
+       if (sgl_flits)
+               *p = be64_to_cpu(sgl->addr[0]);
 }
 
 /**
@@ -1256,14 +1592,18 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
        /* Only TX_DATA builds SGLs */
 
        from = (struct work_request_hdr *)skb->data;
-       memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
+       memcpy(&d->flit[1], &from[1],
+              skb_transport_offset(skb) - sizeof(*from));
 
-       flits = (skb->h.raw - skb->data) / 8;
+       flits = skb_transport_offset(skb) / 8;
        sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
-       sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
+       sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+                            skb->tail - skb->transport_header,
                             adap->pdev);
-       if (need_skb_unmap())
-               ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
+       if (need_skb_unmap()) {
+               setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
+               skb->destructor = deferred_unmap_destructor;
+       }
 
        write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
                         gen, from->wr_hi, from->wr_lo);
@@ -1278,13 +1618,14 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
  */
 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
 {
-       unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
+       unsigned int flits, cnt;
 
-       if (skb->len <= WR_LEN && cnt == 0)
+       if (skb->len <= WR_LEN)
                return 1;       /* packet fits as immediate data */
 
-       flits = (skb->h.raw - skb->data) / 8;   /* headers */
-       if (skb->tail != skb->h.raw)
+       flits = skb_transport_offset(skb) / 8;  /* headers */
+       cnt = skb_shinfo(skb)->nr_frags;
+       if (skb->tail != skb->transport_header)
                cnt++;
        return flits_to_desc(flits + sgl_len(cnt));
 }
@@ -1304,7 +1645,7 @@ static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
        unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
 
        spin_lock(&q->lock);
-      again:reclaim_completed_tx(adap, q);
+again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
 
        ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
        if (unlikely(ret)) {
@@ -1342,10 +1683,11 @@ static void restart_offloadq(unsigned long data)
        struct sk_buff *skb;
        struct sge_qset *qs = (struct sge_qset *)data;
        struct sge_txq *q = &qs->txq[TXQ_OFLD];
-       struct adapter *adap = qs->netdev->priv;
+       const struct port_info *pi = netdev_priv(qs->netdev);
+       struct adapter *adap = pi->adapter;
 
        spin_lock(&q->lock);
-      again:reclaim_completed_tx(adap, q);
+again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
 
        while ((skb = skb_peek(&q->sendq)) != NULL) {
                unsigned int gen, pidx;
@@ -1382,6 +1724,7 @@ static void restart_offloadq(unsigned long data)
        set_bit(TXQ_RUNNING, &q->flags);
        set_bit(TXQ_LAST_PKT_DB, &q->flags);
 #endif
+       wmb();
        t3_write_reg(adap, A_SG_KDOORBELL,
                     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 }
@@ -1441,17 +1784,15 @@ int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
  */
 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
 {
-       skb->next = skb->prev = NULL;
-       if (q->rx_tail)
-               q->rx_tail->next = skb;
-       else {
+       int was_empty = skb_queue_empty(&q->rx_queue);
+
+       __skb_queue_tail(&q->rx_queue, skb);
+
+       if (was_empty) {
                struct sge_qset *qs = rspq_to_qset(q);
 
-               if (__netif_rx_schedule_prep(qs->netdev))
-                       __netif_rx_schedule(qs->netdev);
-               q->rx_head = skb;
+               napi_schedule(&qs->napi);
        }
-       q->rx_tail = skb;
 }
 
 /**
@@ -1484,37 +1825,37 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
  *     receive handler.  Batches need to be of modest size as we do prefetches
  *     on the packets in each.
  */
-static int ofld_poll(struct net_device *dev, int *budget)
+static int ofld_poll(struct napi_struct *napi, int budget)
 {
-       struct adapter *adapter = dev->priv;
-       struct sge_qset *qs = dev2qset(dev);
+       struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
        struct sge_rspq *q = &qs->rspq;
-       int work_done, limit = min(*budget, dev->quota), avail = limit;
+       struct adapter *adapter = qs->adap;
+       int work_done = 0;
 
-       while (avail) {
-               struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
+       while (work_done < budget) {
+               struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
+               struct sk_buff_head queue;
                int ngathered;
 
                spin_lock_irq(&q->lock);
-               head = q->rx_head;
-               if (!head) {
-                       work_done = limit - avail;
-                       *budget -= work_done;
-                       dev->quota -= work_done;
-                       __netif_rx_complete(dev);
+               __skb_queue_head_init(&queue);
+               skb_queue_splice_init(&q->rx_queue, &queue);
+               if (skb_queue_empty(&queue)) {
+                       napi_complete(napi);
                        spin_unlock_irq(&q->lock);
-                       return 0;
+                       return work_done;
                }
-
-               tail = q->rx_tail;
-               q->rx_head = q->rx_tail = NULL;
                spin_unlock_irq(&q->lock);
 
-               for (ngathered = 0; avail && head; avail--) {
-                       prefetch(head->data);
-                       skbs[ngathered] = head;
-                       head = head->next;
-                       skbs[ngathered]->next = NULL;
+               ngathered = 0;
+               skb_queue_walk_safe(&queue, skb, tmp) {
+                       if (work_done >= budget)
+                               break;
+                       work_done++;
+
+                       __skb_unlink(skb, &queue);
+                       prefetch(skb->data);
+                       skbs[ngathered] = skb;
                        if (++ngathered == RX_BUNDLE_SIZE) {
                                q->offload_bundles++;
                                adapter->tdev.recv(&adapter->tdev, skbs,
@@ -1522,20 +1863,16 @@ static int ofld_poll(struct net_device *dev, int *budget)
                                ngathered = 0;
                        }
                }
-               if (head) {     /* splice remaining packets back onto Rx queue */
+               if (!skb_queue_empty(&queue)) {
+                       /* splice remaining packets back onto Rx queue */
                        spin_lock_irq(&q->lock);
-                       tail->next = q->rx_head;
-                       if (!q->rx_head)
-                               q->rx_tail = tail;
-                       q->rx_head = head;
+                       skb_queue_splice(&queue, &q->rx_queue);
                        spin_unlock_irq(&q->lock);
                }
                deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
        }
-       work_done = limit - avail;
-       *budget -= work_done;
-       dev->quota -= work_done;
-       return 1;
+
+       return work_done;
 }
 
 /**
@@ -1553,8 +1890,9 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
                             struct sk_buff *skb, struct sk_buff *rx_gather[],
                             unsigned int gather_idx)
 {
-       rq->offload_pkts++;
-       skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
 
        if (rq->polling) {
                rx_gather[gather_idx++] = skb;
@@ -1583,7 +1921,7 @@ static void restart_tx(struct sge_qset *qs)
            test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
                qs->txq[TXQ_ETH].restarts++;
                if (netif_running(qs->netdev))
-                       netif_wake_queue(qs->netdev);
+                       netif_tx_wake_queue(qs->tx_q);
        }
 
        if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
@@ -1601,6 +1939,54 @@ static void restart_tx(struct sge_qset *qs)
 }
 
 /**
+ *     cxgb3_arp_process - process an ARP request probing a private IP address
+ *     @adapter: the adapter
+ *     @skb: the skbuff containing the ARP request
+ *
+ *     Check if the ARP request is probing the private IP address
+ *     dedicated to iSCSI, generate an ARP reply if so.
+ */
+static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       struct port_info *pi;
+       struct arphdr *arp;
+       unsigned char *arp_ptr;
+       unsigned char *sha;
+       __be32 sip, tip;
+
+       if (!dev)
+               return;
+
+       skb_reset_network_header(skb);
+       arp = arp_hdr(skb);
+
+       if (arp->ar_op != htons(ARPOP_REQUEST))
+               return;
+
+       arp_ptr = (unsigned char *)(arp + 1);
+       sha = arp_ptr;
+       arp_ptr += dev->addr_len;
+       memcpy(&sip, arp_ptr, sizeof(sip));
+       arp_ptr += sizeof(sip);
+       arp_ptr += dev->addr_len;
+       memcpy(&tip, arp_ptr, sizeof(tip));
+
+       pi = netdev_priv(dev);
+       if (tip != pi->iscsi_ipv4addr)
+               return;
+
+       arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
+                dev->dev_addr, sha);
+
+}
+
+static inline int is_arp(struct sk_buff *skb)
+{
+       return skb->protocol == htons(ETH_P_ARP);
+}
+
+/**
  *     rx_eth - process an ingress ethernet packet
  *     @adap: the adapter
  *     @rq: the response queue that received the packet
@@ -1612,39 +1998,149 @@ static void restart_tx(struct sge_qset *qs)
  *     if it was immediate data in a response.
  */
 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
-                  struct sk_buff *skb, int pad)
+                  struct sk_buff *skb, int pad, int lro)
 {
        struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
+       struct sge_qset *qs = rspq_to_qset(rq);
        struct port_info *pi;
 
-       rq->eth_pkts++;
        skb_pull(skb, sizeof(*p) + pad);
-       skb->dev = adap->port[p->iff];
-       skb->dev->last_rx = jiffies;
-       skb->protocol = eth_type_trans(skb, skb->dev);
+       skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
        pi = netdev_priv(skb->dev);
-       if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
-           !p->fragment) {
-               rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+       if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid &&
+           p->csum == htons(0xffff) && !p->fragment) {
+               qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        } else
                skb->ip_summed = CHECKSUM_NONE;
+       skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
 
        if (unlikely(p->vlan_valid)) {
                struct vlan_group *grp = pi->vlan_grp;
 
-               rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
+               qs->port_stats[SGE_PSTAT_VLANEX]++;
                if (likely(grp))
-                       __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
-                                         rq->polling);
+                       if (lro)
+                               vlan_gro_receive(&qs->napi, grp,
+                                                ntohs(p->vlan), skb);
+                       else {
+                               if (unlikely(pi->iscsi_ipv4addr &&
+                                   is_arp(skb))) {
+                                       unsigned short vtag = ntohs(p->vlan) &
+                                                               VLAN_VID_MASK;
+                                       skb->dev = vlan_group_get_device(grp,
+                                                                        vtag);
+                                       cxgb3_arp_process(adap, skb);
+                               }
+                               __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
+                                                 rq->polling);
+                       }
                else
                        dev_kfree_skb_any(skb);
-       } else if (rq->polling)
-               netif_receive_skb(skb);
-       else
+       } else if (rq->polling) {
+               if (lro)
+                       napi_gro_receive(&qs->napi, skb);
+               else {
+                       if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
+                               cxgb3_arp_process(adap, skb);
+                       netif_receive_skb(skb);
+               }
+       } else
                netif_rx(skb);
 }
 
+static inline int is_eth_tcp(u32 rss)
+{
+       return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
+}
+
+/**
+ *     lro_add_page - add a page chunk to an LRO session
+ *     @adap: the adapter
+ *     @qs: the associated queue set
+ *     @fl: the free list containing the page chunk to add
+ *     @len: packet length
+ *     @complete: Indicates the last fragment of a frame
+ *
+ *     Add a received packet contained in a page chunk to an existing LRO
+ *     session.
+ */
+static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
+                        struct sge_fl *fl, int len, int complete)
+{
+       struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+       struct sk_buff *skb = NULL;
+       struct cpl_rx_pkt *cpl;
+       struct skb_frag_struct *rx_frag;
+       int nr_frags;
+       int offset = 0;
+
+       if (!qs->nomem) {
+               skb = napi_get_frags(&qs->napi);
+               qs->nomem = !skb;
+       }
+
+       fl->credits--;
+
+       pci_dma_sync_single_for_cpu(adap->pdev,
+                                   pci_unmap_addr(sd, dma_addr),
+                                   fl->buf_size - SGE_PG_RSVD,
+                                   PCI_DMA_FROMDEVICE);
+
+       (*sd->pg_chunk.p_cnt)--;
+       if (!*sd->pg_chunk.p_cnt)
+               pci_unmap_page(adap->pdev,
+                              sd->pg_chunk.mapping,
+                              fl->alloc_size,
+                              PCI_DMA_FROMDEVICE);
+
+       if (!skb) {
+               put_page(sd->pg_chunk.page);
+               if (complete)
+                       qs->nomem = 0;
+               return;
+       }
+
+       rx_frag = skb_shinfo(skb)->frags;
+       nr_frags = skb_shinfo(skb)->nr_frags;
+
+       if (!nr_frags) {
+               offset = 2 + sizeof(struct cpl_rx_pkt);
+               qs->lro_va = sd->pg_chunk.va + 2;
+       }
+       len -= offset;
+
+       prefetch(qs->lro_va);
+
+       rx_frag += nr_frags;
+       rx_frag->page = sd->pg_chunk.page;
+       rx_frag->page_offset = sd->pg_chunk.offset + offset;
+       rx_frag->size = len;
+
+       skb->len += len;
+       skb->data_len += len;
+       skb->truesize += len;
+       skb_shinfo(skb)->nr_frags++;
+
+       if (!complete)
+               return;
+
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+       cpl = qs->lro_va;
+
+       if (unlikely(cpl->vlan_valid)) {
+               struct net_device *dev = qs->netdev;
+               struct port_info *pi = netdev_priv(dev);
+               struct vlan_group *grp = pi->vlan_grp;
+
+               if (likely(grp != NULL)) {
+                       vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan));
+                       return;
+               }
+       }
+       napi_gro_frags(&qs->napi);
+}
+
 /**
  *     handle_rsp_cntrl_info - handles control information in a response
  *     @qs: the queue set corresponding to the response
@@ -1730,6 +2226,12 @@ static inline int is_new_response(const struct rsp_desc *r,
        return (r->intr_gen & F_RSPD_GEN2) == q->gen;
 }
 
+static inline void clear_rspq_bufstate(struct sge_rspq * const q)
+{
+       q->pg_skb = NULL;
+       q->rx_recycle_buf = 0;
+}
+
 #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
                        V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
@@ -1767,10 +2269,11 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
        q->next_holdoff = q->holdoff_tmr;
 
        while (likely(budget_left && is_new_response(r, q))) {
-               int eth, ethpad = 0;
+               int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
                struct sk_buff *skb = NULL;
                u32 len, flags = ntohl(r->flags);
-               u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
+               __be32 rss_hi = *(const __be32 *)r,
+                      rss_lo = r->rss_hdr.rss_hash_val;
 
                eth = r->rss_hdr.opcode == CPL_RX_PKT;
 
@@ -1786,7 +2289,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
                } else if (flags & F_RSPD_IMM_DATA_VALID) {
                        skb = get_imm_packet(r);
                        if (unlikely(!skb)) {
-                             no_mem:
+no_mem:
                                q->next_holdoff = NOMEM_INTR_DELAY;
                                q->nomem++;
                                /* consume one credit since we tried */
@@ -1794,18 +2297,43 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
                                break;
                        }
                        q->imm_data++;
+                       ethpad = 0;
                } else if ((len = ntohl(r->len_cq)) != 0) {
                        struct sge_fl *fl;
 
+                       lro &= eth && is_eth_tcp(rss_hi);
+
                        fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
-                       fl->credits--;
-                       skb = get_packet(adap, fl, G_RSPD_LEN(len),
-                                        eth ? SGE_RX_DROP_THRES : 0);
-                       if (!skb)
+                       if (fl->use_pages) {
+                               void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
+
+                               prefetch(addr);
+#if L1_CACHE_BYTES < 128
+                               prefetch(addr + L1_CACHE_BYTES);
+#endif
+                               __refill_fl(adap, fl);
+                               if (lro > 0) {
+                                       lro_add_page(adap, qs, fl,
+                                                    G_RSPD_LEN(len),
+                                                    flags & F_RSPD_EOP);
+                                        goto next_fl;
+                               }
+
+                               skb = get_packet_pg(adap, fl, q,
+                                                   G_RSPD_LEN(len),
+                                                   eth ?
+                                                   SGE_RX_DROP_THRES : 0);
+                               q->pg_skb = skb;
+                       } else
+                               skb = get_packet(adap, fl, G_RSPD_LEN(len),
+                                                eth ? SGE_RX_DROP_THRES : 0);
+                       if (unlikely(!skb)) {
+                               if (!eth)
+                                       goto no_mem;
                                q->rx_drops++;
-                       else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
+                       } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
                                __skb_pull(skb, 2);
-                       ethpad = 2;
+next_fl:
                        if (++fl->cidx == fl->size)
                                fl->cidx = 0;
                } else
@@ -1829,22 +2357,31 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
                        q->credits = 0;
                }
 
-               if (likely(skb != NULL)) {
+               packet_complete = flags &
+                                 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
+                                  F_RSPD_ASYNC_NOTIF);
+
+               if (skb != NULL && packet_complete) {
                        if (eth)
-                               rx_eth(adap, q, skb, ethpad);
+                               rx_eth(adap, q, skb, ethpad, lro);
                        else {
+                               q->offload_pkts++;
                                /* Preserve the RSS info in csum & priority */
                                skb->csum = rss_hi;
                                skb->priority = rss_lo;
                                ngathered = rx_offload(&adap->tdev, q, skb,
-                                                      offload_skbs, ngathered);
+                                                      offload_skbs,
+                                                      ngathered);
                        }
-               }
 
+                       if (flags & F_RSPD_EOP)
+                               clear_rspq_bufstate(q);
+               }
                --budget_left;
        }
 
        deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
+
        if (sleeping)
                check_ring_db(adap, qs, sleeping);
 
@@ -1858,56 +2395,54 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
 
 static inline int is_pure_response(const struct rsp_desc *r)
 {
-       u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
+       __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
 
        return (n | r->len_cq) == 0;
 }
 
 /**
  *     napi_rx_handler - the NAPI handler for Rx processing
- *     @dev: the net device
+ *     @napi: the napi instance
  *     @budget: how many packets we can process in this round
  *
  *     Handler for new data events when using NAPI.
  */
-static int napi_rx_handler(struct net_device *dev, int *budget)
+static int napi_rx_handler(struct napi_struct *napi, int budget)
 {
-       struct adapter *adap = dev->priv;
-       struct sge_qset *qs = dev2qset(dev);
-       int effective_budget = min(*budget, dev->quota);
-
-       int work_done = process_responses(adap, qs, effective_budget);
-       *budget -= work_done;
-       dev->quota -= work_done;
-
-       if (work_done >= effective_budget)
-               return 1;
+       struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+       struct adapter *adap = qs->adap;
+       int work_done = process_responses(adap, qs, budget);
 
-       netif_rx_complete(dev);
+       if (likely(work_done < budget)) {
+               napi_complete(napi);
 
-       /*
-        * Because we don't atomically flush the following write it is
-        * possible that in very rare cases it can reach the device in a way
-        * that races with a new response being written plus an error interrupt
-        * causing the NAPI interrupt handler below to return unhandled status
-        * to the OS.  To protect against this would require flushing the write
-        * and doing both the write and the flush with interrupts off.  Way too
-        * expensive and unjustifiable given the rarity of the race.
-        *
-        * The race cannot happen at all with MSI-X.
-        */
-       t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
-                    V_NEWTIMER(qs->rspq.next_holdoff) |
-                    V_NEWINDEX(qs->rspq.cidx));
-       return 0;
+               /*
+                * Because we don't atomically flush the following
+                * write it is possible that in very rare cases it can
+                * reach the device in a way that races with a new
+                * response being written plus an error interrupt
+                * causing the NAPI interrupt handler below to return
+                * unhandled status to the OS.  To protect against
+                * this would require flushing the write and doing
+                * both the write and the flush with interrupts off.
+                * Way too expensive and unjustifiable given the
+                * rarity of the race.
+                *
+                * The race cannot happen at all with MSI-X.
+                */
+               t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
+                            V_NEWTIMER(qs->rspq.next_holdoff) |
+                            V_NEWINDEX(qs->rspq.cidx));
+       }
+       return work_done;
 }
 
 /*
  * Returns true if the device is already scheduled for polling.
  */
-static inline int napi_is_scheduled(struct net_device *dev)
+static inline int napi_is_scheduled(struct napi_struct *napi)
 {
-       return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
+       return test_bit(NAPI_STATE_SCHED, &napi->state);
 }
 
 /**
@@ -1990,8 +2525,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
                             V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
                return 0;
        }
-       if (likely(__netif_rx_schedule_prep(qs->netdev)))
-               __netif_rx_schedule(qs->netdev);
+       napi_schedule(&qs->napi);
        return 1;
 }
 
@@ -2002,7 +2536,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
 {
        struct sge_qset *qs = cookie;
-       struct adapter *adap = qs->netdev->priv;
+       struct adapter *adap = qs->adap;
        struct sge_rspq *q = &qs->rspq;
 
        spin_lock(&q->lock);
@@ -2018,16 +2552,14 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  * The MSI-X interrupt handler for an SGE response queue for the NAPI case
  * (i.e., response queue serviced by NAPI polling).
  */
-irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
+static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
 {
        struct sge_qset *qs = cookie;
-       struct adapter *adap = qs->netdev->priv;
        struct sge_rspq *q = &qs->rspq;
 
        spin_lock(&q->lock);
-       BUG_ON(napi_is_scheduled(qs->netdev));
 
-       if (handle_responses(adap, q) < 0)
+       if (handle_responses(qs->adap, q) < 0)
                q->unhandled_irqs++;
        spin_unlock(&q->lock);
        return IRQ_HANDLED;
@@ -2070,11 +2602,13 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
        return IRQ_HANDLED;
 }
 
-static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
+static int rspq_check_napi(struct sge_qset *qs)
 {
-       if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
-               if (likely(__netif_rx_schedule_prep(dev)))
-                       __netif_rx_schedule(dev);
+       struct sge_rspq *q = &qs->rspq;
+
+       if (!napi_is_scheduled(&qs->napi) &&
+           is_new_response(&q->desc[q->cidx], q)) {
+               napi_schedule(&qs->napi);
                return 1;
        }
        return 0;
@@ -2087,7 +2621,7 @@ static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
  * one SGE response queue per port in this mode and protect all response
  * queues with queue 0's lock.
  */
-irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
+static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
 {
        int new_packets;
        struct adapter *adap = cookie;
@@ -2095,10 +2629,9 @@ irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
 
        spin_lock(&q->lock);
 
-       new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
+       new_packets = rspq_check_napi(&adap->sge.qs[0]);
        if (adap->params.nports == 2)
-               new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
-                                              &adap->sge.qs[1].rspq);
+               new_packets += rspq_check_napi(&adap->sge.qs[1]);
        if (!new_packets && t3_slow_intr_handler(adap) == 0)
                q->unhandled_irqs++;
 
@@ -2201,9 +2734,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
 {
        u32 map;
-       struct net_device *dev;
        struct adapter *adap = cookie;
-       struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+       struct sge_qset *qs0 = &adap->sge.qs[0];
+       struct sge_rspq *q0 = &qs0->rspq;
 
        t3_write_reg(adap, A_PL_CLI, 0);
        map = t3_read_reg(adap, A_SG_DATA_INTR);
@@ -2216,18 +2749,11 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
        if (unlikely(map & F_ERRINTR))
                t3_slow_intr_handler(adap);
 
-       if (likely(map & 1)) {
-               dev = adap->sge.qs[0].netdev;
-
-               if (likely(__netif_rx_schedule_prep(dev)))
-                       __netif_rx_schedule(dev);
-       }
-       if (map & 2) {
-               dev = adap->sge.qs[1].netdev;
+       if (likely(map & 1))
+               napi_schedule(&qs0->napi);
 
-               if (likely(__netif_rx_schedule_prep(dev)))
-                       __netif_rx_schedule(dev);
-       }
+       if (map & 2)
+               napi_schedule(&adap->sge.qs[1].napi);
 
        spin_unlock(&q0->lock);
        return IRQ_HANDLED;
@@ -2242,7 +2768,7 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  *     (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
  *     response queues.
  */
-intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
+irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
 {
        if (adap->flags & USING_MSIX)
                return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
@@ -2253,6 +2779,15 @@ intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
        return t3_intr;
 }
 
+#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+                   F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+                   V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+                   F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+                   F_HIRCQPARITYERROR)
+#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
+#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
+                     F_RSPQDISABLED)
+
 /**
  *     t3_sge_err_intr_handler - SGE async event interrupt handler
  *     @adapter: the adapter
@@ -2261,7 +2796,15 @@ intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
  */
 void t3_sge_err_intr_handler(struct adapter *adapter)
 {
-       unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
+       unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
+                                ~F_FLEMPTY;
+
+       if (status & SGE_PARERR)
+               CH_ALERT(adapter, "SGE parity error (0x%x)\n",
+                        status & SGE_PARERR);
+       if (status & SGE_FRAMINGERR)
+               CH_ALERT(adapter, "SGE framing error (0x%x)\n",
+                        status & SGE_FRAMINGERR);
 
        if (status & F_RSPQCREDITOVERFOW)
                CH_ALERT(adapter, "SGE response queue credit overflow\n");
@@ -2274,19 +2817,23 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
                         "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
        }
 
+       if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
+               CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
+                        status & F_HIPIODRBDROPERR ? "high" : "lo");
+
        t3_write_reg(adapter, A_SG_INT_CAUSE, status);
-       if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
+       if (status &  SGE_FATALERR)
                t3_fatal_err(adapter);
 }
 
 /**
- *     sge_timer_cb - perform periodic maintenance of an SGE qset
+ *     sge_timer_tx - perform periodic maintenance of an SGE qset
  *     @data: the SGE queue set to maintain
  *
  *     Runs periodically from a timer to perform maintenance of an SGE queue
  *     set.  It performs two tasks:
  *
- *     a) Cleans up any completed Tx descriptors that may still be pending.
+ *     Cleans up any completed Tx descriptors that may still be pending.
  *     Normal descriptor cleanup happens when new packets are added to a Tx
  *     queue so this timer is relatively infrequent and does any cleanup only
  *     if the Tx queue has not seen any new packets in a while.  We make a
@@ -2296,38 +2843,88 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
  *     up).  Since control queues use immediate data exclusively we don't
  *     bother cleaning them up here.
  *
- *     b) Replenishes Rx queues that have run out due to memory shortage.
+ */
+static void sge_timer_tx(unsigned long data)
+{
+       struct sge_qset *qs = (struct sge_qset *)data;
+       struct port_info *pi = netdev_priv(qs->netdev);
+       struct adapter *adap = pi->adapter;
+       unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
+       unsigned long next_period;
+
+       if (__netif_tx_trylock(qs->tx_q)) {
+                tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
+                                                     TX_RECLAIM_TIMER_CHUNK);
+               __netif_tx_unlock(qs->tx_q);
+       }
+
+       if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
+               tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
+                                                    TX_RECLAIM_TIMER_CHUNK);
+               spin_unlock(&qs->txq[TXQ_OFLD].lock);
+       }
+
+       next_period = TX_RECLAIM_PERIOD >>
+                      (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
+                      TX_RECLAIM_TIMER_CHUNK);
+       mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
+}
+
+/*
+ *     sge_timer_rx - perform periodic maintenance of an SGE qset
+ *     @data: the SGE queue set to maintain
+ *
+ *     a) Replenishes Rx queues that have run out due to memory shortage.
  *     Normally new Rx buffers are added when existing ones are consumed but
  *     when out of memory a queue can become empty.  We try to add only a few
  *     buffers here, the queue will be replenished fully as these new buffers
  *     are used up if memory shortage has subsided.
+ *
+ *     b) Return coalesced response queue credits in case a response queue is
+ *     starved.
+ *
  */
-static void sge_timer_cb(unsigned long data)
+static void sge_timer_rx(unsigned long data)
 {
        spinlock_t *lock;
        struct sge_qset *qs = (struct sge_qset *)data;
-       struct adapter *adap = qs->netdev->priv;
-
-       if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
-               reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-               spin_unlock(&qs->txq[TXQ_ETH].lock);
-       }
-       if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
-               reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
-               spin_unlock(&qs->txq[TXQ_OFLD].lock);
-       }
-       lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
-           &adap->sge.qs[0].rspq.lock;
-       if (spin_trylock_irq(lock)) {
-               if (!napi_is_scheduled(qs->netdev)) {
-                       if (qs->fl[0].credits < qs->fl[0].size)
-                               __refill_fl(adap, &qs->fl[0]);
-                       if (qs->fl[1].credits < qs->fl[1].size)
-                               __refill_fl(adap, &qs->fl[1]);
+       struct port_info *pi = netdev_priv(qs->netdev);
+       struct adapter *adap = pi->adapter;
+       u32 status;
+
+       lock = adap->params.rev > 0 ?
+              &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
+
+       if (!spin_trylock_irq(lock))
+               goto out;
+
+       if (napi_is_scheduled(&qs->napi))
+               goto unlock;
+
+       if (adap->params.rev < 4) {
+               status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
+
+               if (status & (1 << qs->rspq.cntxt_id)) {
+                       qs->rspq.starved++;
+                       if (qs->rspq.credits) {
+                               qs->rspq.credits--;
+                               refill_rspq(adap, &qs->rspq, 1);
+                               qs->rspq.restarted++;
+                               t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
+                                            1 << qs->rspq.cntxt_id);
+                       }
                }
-               spin_unlock_irq(lock);
        }
-       mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+       if (qs->fl[0].credits < qs->fl[0].size)
+               __refill_fl(adap, &qs->fl[0]);
+       if (qs->fl[1].credits < qs->fl[1].size)
+               __refill_fl(adap, &qs->fl[1]);
+
+unlock:
+       spin_unlock_irq(lock);
+out:
+       mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
 }
 
 /**
@@ -2340,12 +2937,9 @@ static void sge_timer_cb(unsigned long data)
  */
 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
 {
-       if (!qs->netdev)
-               return;
-
        qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
        qs->rspq.polling = p->polling;
-       qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
+       qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
 }
 
 /**
@@ -2357,6 +2951,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  *     @p: configuration parameters for this queue set
  *     @ntxq: number of Tx queues for the queue set
  *     @netdev: net device associated with this queue set
+ *     @netdevq: net device TX queue associated with this queue set
  *
  *     Allocate resources and initialize an SGE queue set.  A queue set
  *     comprises a response queue, two Rx free-buffer queues, and up to 3
@@ -2365,15 +2960,15 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  */
 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
                      int irq_vec_idx, const struct qset_params *p,
-                     int ntxq, struct net_device *netdev)
+                     int ntxq, struct net_device *dev,
+                     struct netdev_queue *netdevq)
 {
-       int i, ret = -ENOMEM;
+       int i, avail, ret = -ENOMEM;
        struct sge_qset *q = &adapter->sge.qs[id];
 
        init_qset_cntxt(q, id);
-       init_timer(&q->tx_reclaim_timer);
-       q->tx_reclaim_timer.data = (unsigned long)q;
-       q->tx_reclaim_timer.function = sge_timer_cb;
+       setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
+       setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
 
        q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
                                   sizeof(struct rx_desc),
@@ -2427,36 +3022,45 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
        q->rspq.gen = 1;
        q->rspq.size = p->rspq_size;
        spin_lock_init(&q->rspq.lock);
+       skb_queue_head_init(&q->rspq.rx_queue);
 
        q->txq[TXQ_ETH].stop_thres = nports *
            flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
 
-       if (ntxq == 1) {
-               q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
-                   sizeof(struct cpl_rx_pkt);
-               q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
-                   sizeof(struct cpl_rx_pkt);
-       } else {
-               q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
-                   sizeof(struct cpl_rx_data);
-               q->fl[1].buf_size = (16 * 1024) -
-                   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-       }
+#if FL0_PG_CHUNK_SIZE > 0
+       q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
+#else
+       q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
+#endif
+#if FL1_PG_CHUNK_SIZE > 0
+       q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
+#else
+       q->fl[1].buf_size = is_offload(adapter) ?
+               (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+               MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
+#endif
 
-       spin_lock(&adapter->sge.reg_lock);
+       q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
+       q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
+       q->fl[0].order = FL0_PG_ORDER;
+       q->fl[1].order = FL1_PG_ORDER;
+       q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
+       q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
+
+       spin_lock_irq(&adapter->sge.reg_lock);
 
        /* FL threshold comparison uses < */
        ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
                                   q->rspq.phys_addr, q->rspq.size,
-                                  q->fl[0].buf_size, 1, 0);
+                                  q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
        if (ret)
                goto err_unlock;
 
        for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
                ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
                                          q->fl[i].phys_addr, q->fl[i].size,
-                                         q->fl[i].buf_size, p->cong_thres, 1,
-                                         0);
+                                         q->fl[i].buf_size - SGE_PG_RSVD,
+                                         p->cong_thres, 1, 0);
                if (ret)
                        goto err_unlock;
        }
@@ -2487,36 +3091,84 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
                        goto err_unlock;
        }
 
-       spin_unlock(&adapter->sge.reg_lock);
-       q->netdev = netdev;
-       t3_update_qset_coalesce(q, p);
+       spin_unlock_irq(&adapter->sge.reg_lock);
 
-       /*
-        * We use atalk_ptr as a backpointer to a qset.  In case a device is
-        * associated with multiple queue sets only the first one sets
-        * atalk_ptr.
-        */
-       if (netdev->atalk_ptr == NULL)
-               netdev->atalk_ptr = q;
+       q->adap = adapter;
+       q->netdev = dev;
+       q->tx_q = netdevq;
+       t3_update_qset_coalesce(q, p);
 
-       refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
-       refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
+       avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
+                         GFP_KERNEL | __GFP_COMP);
+       if (!avail) {
+               CH_ALERT(adapter, "free list queue 0 initialization failed\n");
+               goto err;
+       }
+       if (avail < q->fl[0].size)
+               CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
+                       avail);
+
+       avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
+                         GFP_KERNEL | __GFP_COMP);
+       if (avail < q->fl[1].size)
+               CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
+                       avail);
        refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
 
        t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
                     V_NEWTIMER(q->rspq.holdoff_tmr));
 
-       mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
        return 0;
 
-      err_unlock:
-       spin_unlock(&adapter->sge.reg_lock);
-      err:
+err_unlock:
+       spin_unlock_irq(&adapter->sge.reg_lock);
+err:
        t3_free_qset(adapter, q);
        return ret;
 }
 
 /**
+ *      t3_start_sge_timers - start SGE timer call backs
+ *      @adap: the adapter
+ *
+ *      Starts each SGE queue set's timer call back
+ */
+void t3_start_sge_timers(struct adapter *adap)
+{
+       int i;
+
+       for (i = 0; i < SGE_QSETS; ++i) {
+               struct sge_qset *q = &adap->sge.qs[i];
+
+       if (q->tx_reclaim_timer.function)
+               mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+       if (q->rx_reclaim_timer.function)
+               mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+       }
+}
+
+/**
+ *     t3_stop_sge_timers - stop SGE timer call backs
+ *     @adap: the adapter
+ *
+ *     Stops each SGE queue set's timer call back
+ */
+void t3_stop_sge_timers(struct adapter *adap)
+{
+       int i;
+
+       for (i = 0; i < SGE_QSETS; ++i) {
+               struct sge_qset *q = &adap->sge.qs[i];
+
+               if (q->tx_reclaim_timer.function)
+                       del_timer_sync(&q->tx_reclaim_timer);
+               if (q->rx_reclaim_timer.function)
+                       del_timer_sync(&q->rx_reclaim_timer);
+       }
+}
+
+/**
  *     t3_free_sge_resources - free SGE resources
  *     @adap: the adapter
  *
@@ -2585,7 +3237,7 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
        unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
 
        ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
-           F_CQCRDTCTRL |
+           F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
            V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
            V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
 #if SGE_NUM_GENBITS == 1
@@ -2594,7 +3246,6 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
        if (adap->params.rev > 0) {
                if (!(adap->flags & (USING_MSIX | USING_MSI)))
                        ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
-               ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
        }
        t3_write_reg(adap, A_SG_CONTROL, ctrl);
        t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
@@ -2602,7 +3253,8 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
        t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
        t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
                     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
-       t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
+       t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
+                    adap->params.rev < T3_REV_C ? 1000 : 500);
        t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
        t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
        t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
@@ -2619,7 +3271,7 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
  *     defaults for the assorted SGE parameters, which admins can change until
  *     they are used to initialize the SGE.
  */
-void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
+void t3_sge_prep(struct adapter *adap, struct sge_params *p)
 {
        int i;
 
@@ -2632,8 +3284,8 @@ void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
                q->polling = adap->params.rev > 0;
                q->coalesce_usecs = 5;
                q->rspq_size = 1024;
-               q->fl_size = 4096;
-               q->jumbo_size = 512;
+               q->fl_size = 1024;
+               q->jumbo_size = 512;
                q->txq_size[TXQ_ETH] = 1024;
                q->txq_size[TXQ_OFLD] = 1024;
                q->txq_size[TXQ_CTRL] = 256;