tg3: Fix std rx prod ring handling
[safe/jmp/linux-2.6] / drivers / net / tg3.c
index beda9bf..4e8b87d 100644 (file)
@@ -68,8 +68,8 @@
 
 #define DRV_MODULE_NAME                "tg3"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "3.103"
-#define DRV_MODULE_RELDATE     "November 2, 2009"
+#define DRV_MODULE_VERSION     "3.105"
+#define DRV_MODULE_RELDATE     "December 2, 2009"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
 #define TG3_RX_STD_MAP_SZ              TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
 #define TG3_RX_JMB_MAP_SZ              TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
 
+#define TG3_RX_STD_BUFF_RING_SIZE \
+       (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
+
+#define TG3_RX_JMB_BUFF_RING_SIZE \
+       (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
+
 /* minimum number of free TX descriptors required to wake up TX process */
 #define TG3_TX_WAKEUP_THRESH(tnapi)            ((tnapi)->tx_pending / 4)
 
@@ -235,6 +241,9 @@ static struct pci_device_id tg3_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
        {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -396,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
                                       TG3_64BIT_REG_LOW, val);
                return;
        }
-       if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
+       if (off == TG3_RX_STD_PROD_IDX_REG) {
                pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
                                       TG3_64BIT_REG_LOW, val);
                return;
@@ -4342,13 +4351,13 @@ static void tg3_tx(struct tg3_napi *tnapi)
        struct netdev_queue *txq;
        int index = tnapi - tp->napi;
 
-       if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+       if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
                index--;
 
        txq = netdev_get_tx_queue(tp->dev, index);
 
        while (sw_idx != hw_idx) {
-               struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
+               struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
                struct sk_buff *skb = ri->skb;
                int i, tx_bug = 0;
 
@@ -4357,7 +4366,10 @@ static void tg3_tx(struct tg3_napi *tnapi)
                        return;
                }
 
-               skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
+               pci_unmap_single(tp->pdev,
+                                pci_unmap_addr(ri, mapping),
+                                skb_headlen(skb),
+                                PCI_DMA_TODEVICE);
 
                ri->skb = NULL;
 
@@ -4367,6 +4379,11 @@ static void tg3_tx(struct tg3_napi *tnapi)
                        ri = &tnapi->tx_buffers[sw_idx];
                        if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
                                tx_bug = 1;
+
+                       pci_unmap_page(tp->pdev,
+                                      pci_unmap_addr(ri, mapping),
+                                      skb_shinfo(skb)->frags[i].size,
+                                      PCI_DMA_TODEVICE);
                        sw_idx = NEXT_TX(sw_idx);
                }
 
@@ -4397,6 +4414,17 @@ static void tg3_tx(struct tg3_napi *tnapi)
        }
 }
 
+static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+{
+       if (!ri->skb)
+               return;
+
+       pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
+                        map_sz, PCI_DMA_FROMDEVICE);
+       dev_kfree_skb_any(ri->skb);
+       ri->skb = NULL;
+}
+
 /* Returns size of skb allocated or < 0 on error.
  *
  * We only need to fill in the address because the other members
@@ -4541,7 +4569,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
        u32 sw_idx = tnapi->rx_rcb_ptr;
        u16 hw_idx;
        int received;
-       struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+       struct tg3_rx_prodring_set *tpr = tnapi->prodring;
 
        hw_idx = *(tnapi->rx_rcb_prod_idx);
        /*
@@ -4564,13 +4592,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
                opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
                if (opaque_key == RXD_OPAQUE_RING_STD) {
-                       ri = &tpr->rx_std_buffers[desc_idx];
+                       ri = &tp->prodring[0].rx_std_buffers[desc_idx];
                        dma_addr = pci_unmap_addr(ri, mapping);
                        skb = ri->skb;
                        post_ptr = &std_prod_idx;
                        rx_std_posted++;
                } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
-                       ri = &tpr->rx_jmb_buffers[desc_idx];
+                       ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
                        dma_addr = pci_unmap_addr(ri, mapping);
                        skb = ri->skb;
                        post_ptr = &jmb_prod_idx;
@@ -4593,13 +4621,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
                      ETH_FCS_LEN;
 
-               if (len > RX_COPY_THRESHOLD
-                       && tp->rx_offset == NET_IP_ALIGN
-                       /* rx_offset will likely not equal NET_IP_ALIGN
-                        * if this is a 5701 card running in PCI-X mode
-                        * [see tg3_get_invariants()]
-                        */
-               ) {
+               if (len > RX_COPY_THRESHOLD &&
+                   tp->rx_offset == NET_IP_ALIGN) {
+                   /* rx_offset will likely not equal NET_IP_ALIGN
+                    * if this is a 5701 card running in PCI-X mode
+                    * [see tg3_get_invariants()]
+                    */
                        int skb_size;
 
                        skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
@@ -4666,10 +4693,9 @@ next_pkt:
                (*post_ptr)++;
 
                if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
-                       u32 idx = *post_ptr % TG3_RX_RING_SIZE;
-
-                       tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
-                                    TG3_64BIT_REG_LOW, idx);
+                       tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+                       tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+                                    tpr->rx_std_prod_idx);
                        work_mask &= ~RXD_OPAQUE_RING_STD;
                        rx_std_posted = 0;
                }
@@ -4689,17 +4715,30 @@ next_pkt_nopost:
        tw32_rx_mbox(tnapi->consmbox, sw_idx);
 
        /* Refill RX ring(s). */
-       if (work_mask & RXD_OPAQUE_RING_STD) {
+       if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
+               if (work_mask & RXD_OPAQUE_RING_STD) {
+                       tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+                       tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+                                    tpr->rx_std_prod_idx);
+               }
+               if (work_mask & RXD_OPAQUE_RING_JUMBO) {
+                       tpr->rx_jmb_prod_idx = jmb_prod_idx %
+                                              TG3_RX_JUMBO_RING_SIZE;
+                       tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+                                    tpr->rx_jmb_prod_idx);
+               }
+               mmiowb();
+       } else if (work_mask) {
+               /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
+                * updated before the producer indices can be updated.
+                */
+               smp_wmb();
+
                tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
-               tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
-                            tpr->rx_std_prod_idx);
-       }
-       if (work_mask & RXD_OPAQUE_RING_JUMBO) {
                tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
-               tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
-                            tpr->rx_jmb_prod_idx);
+
+               napi_schedule(&tp->napi[1].napi);
        }
-       mmiowb();
 
        return received;
 }
@@ -4730,6 +4769,93 @@ static void tg3_poll_link(struct tg3 *tp)
        }
 }
 
+static void tg3_rx_prodring_xfer(struct tg3 *tp,
+                                struct tg3_rx_prodring_set *dpr,
+                                struct tg3_rx_prodring_set *spr)
+{
+       u32 si, di, cpycnt, src_prod_idx;
+       int i;
+
+       while (1) {
+               src_prod_idx = spr->rx_std_prod_idx;
+
+               /* Make sure updates to the rx_std_buffers[] entries and the
+                * standard producer index are seen in the correct order.
+                */
+               smp_rmb();
+
+               if (spr->rx_std_cons_idx == src_prod_idx)
+                       break;
+
+               if (spr->rx_std_cons_idx < src_prod_idx)
+                       cpycnt = src_prod_idx - spr->rx_std_cons_idx;
+               else
+                       cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
+
+               cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
+
+               si = spr->rx_std_cons_idx;
+               di = dpr->rx_std_prod_idx;
+
+               memcpy(&dpr->rx_std_buffers[di],
+                      &spr->rx_std_buffers[si],
+                      cpycnt * sizeof(struct ring_info));
+
+               for (i = 0; i < cpycnt; i++, di++, si++) {
+                       struct tg3_rx_buffer_desc *sbd, *dbd;
+                       sbd = &spr->rx_std[si];
+                       dbd = &dpr->rx_std[di];
+                       dbd->addr_hi = sbd->addr_hi;
+                       dbd->addr_lo = sbd->addr_lo;
+               }
+
+               spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
+                                      TG3_RX_RING_SIZE;
+               dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
+                                      TG3_RX_RING_SIZE;
+       }
+
+       while (1) {
+               src_prod_idx = spr->rx_jmb_prod_idx;
+
+               /* Make sure updates to the rx_jmb_buffers[] entries and
+                * the jumbo producer index are seen in the correct order.
+                */
+               smp_rmb();
+
+               if (spr->rx_jmb_cons_idx == src_prod_idx)
+                       break;
+
+               if (spr->rx_jmb_cons_idx < src_prod_idx)
+                       cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
+               else
+                       cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
+
+               cpycnt = min(cpycnt,
+                            TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
+
+               si = spr->rx_jmb_cons_idx;
+               di = dpr->rx_jmb_prod_idx;
+
+               memcpy(&dpr->rx_jmb_buffers[di],
+                      &spr->rx_jmb_buffers[si],
+                      cpycnt * sizeof(struct ring_info));
+
+               for (i = 0; i < cpycnt; i++, di++, si++) {
+                       struct tg3_rx_buffer_desc *sbd, *dbd;
+                       sbd = &spr->rx_jmb[si].std;
+                       dbd = &dpr->rx_jmb[di].std;
+                       dbd->addr_hi = sbd->addr_hi;
+                       dbd->addr_lo = sbd->addr_lo;
+               }
+
+               spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
+                                      TG3_RX_JUMBO_RING_SIZE;
+               dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
+                                      TG3_RX_JUMBO_RING_SIZE;
+       }
+}
+
 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
 {
        struct tg3 *tp = tnapi->tp;
@@ -4748,6 +4874,30 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
        if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
                work_done += tg3_rx(tnapi, budget - work_done);
 
+       if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
+               int i;
+               u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
+               u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
+
+               for (i = 2; i < tp->irq_cnt; i++)
+                       tg3_rx_prodring_xfer(tp, tnapi->prodring,
+                                            tp->napi[i].prodring);
+
+               wmb();
+
+               if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
+                       u32 mbox = TG3_RX_STD_PROD_IDX_REG;
+                       tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
+               }
+
+               if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
+                       u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
+                       tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
+               }
+
+               mmiowb();
+       }
+
        return work_done;
 }
 
@@ -5192,17 +5342,21 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
        } else {
                /* New SKB is guaranteed to be linear. */
                entry = *start;
-               ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
-               new_addr = skb_shinfo(new_skb)->dma_head;
+               new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
+                                         PCI_DMA_TODEVICE);
+               /* Make sure the mapping succeeded */
+               if (pci_dma_mapping_error(tp->pdev, new_addr)) {
+                       ret = -1;
+                       dev_kfree_skb(new_skb);
+                       new_skb = NULL;
 
                /* Make sure new skb does not cross any 4G boundaries.
                 * Drop the packet if it does.
                 */
-               if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
-                           tg3_4g_overflow_test(new_addr, new_skb->len))) {
-                       if (!ret)
-                               skb_dma_unmap(&tp->pdev->dev, new_skb,
-                                             DMA_TO_DEVICE);
+               } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
+                           tg3_4g_overflow_test(new_addr, new_skb->len)) {
+                       pci_unmap_single(tp->pdev, new_addr, new_skb->len,
+                                        PCI_DMA_TODEVICE);
                        ret = -1;
                        dev_kfree_skb(new_skb);
                        new_skb = NULL;
@@ -5216,15 +5370,28 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
        /* Now clean up the sw ring entries. */
        i = 0;
        while (entry != last_plus_one) {
+               int len;
+
                if (i == 0)
-                       tnapi->tx_buffers[entry].skb = new_skb;
+                       len = skb_headlen(skb);
                else
+                       len = skb_shinfo(skb)->frags[i-1].size;
+
+               pci_unmap_single(tp->pdev,
+                                pci_unmap_addr(&tnapi->tx_buffers[entry],
+                                               mapping),
+                                len, PCI_DMA_TODEVICE);
+               if (i == 0) {
+                       tnapi->tx_buffers[entry].skb = new_skb;
+                       pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+                                          new_addr);
+               } else {
                        tnapi->tx_buffers[entry].skb = NULL;
+               }
                entry = NEXT_TX(entry);
                i++;
        }
 
-       skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
        dev_kfree_skb(skb);
 
        return ret;
@@ -5261,14 +5428,15 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
 {
        struct tg3 *tp = netdev_priv(dev);
        u32 len, entry, base_flags, mss;
-       struct skb_shared_info *sp;
        dma_addr_t mapping;
        struct tg3_napi *tnapi;
        struct netdev_queue *txq;
+       unsigned int i, last;
+
 
        txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
        tnapi = &tp->napi[skb_get_queue_mapping(skb)];
-       if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+       if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
                tnapi++;
 
        /* We are running in BH disabled context with netif_tx_lock
@@ -5335,20 +5503,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
                               (vlan_tx_tag_get(skb) << 16));
 #endif
 
-       if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
+       len = skb_headlen(skb);
+
+       /* Queue skb data, a.k.a. the main skb fragment. */
+       mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(tp->pdev, mapping)) {
                dev_kfree_skb(skb);
                goto out_unlock;
        }
 
-       sp = skb_shinfo(skb);
-
-       mapping = sp->dma_head;
-
        tnapi->tx_buffers[entry].skb = skb;
+       pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
 
-       len = skb_headlen(skb);
-
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+       if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
            !mss && skb->len > ETH_DATA_LEN)
                base_flags |= TXD_FLAG_JMB_PKT;
 
@@ -5359,15 +5526,21 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
 
        /* Now loop through additional data fragments, and queue them. */
        if (skb_shinfo(skb)->nr_frags > 0) {
-               unsigned int i, last;
-
                last = skb_shinfo(skb)->nr_frags - 1;
                for (i = 0; i <= last; i++) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                        len = frag->size;
-                       mapping = sp->dma_maps[i];
+                       mapping = pci_map_page(tp->pdev,
+                                              frag->page,
+                                              frag->page_offset,
+                                              len, PCI_DMA_TODEVICE);
+                       if (pci_dma_mapping_error(tp->pdev, mapping))
+                               goto dma_error;
+
                        tnapi->tx_buffers[entry].skb = NULL;
+                       pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+                                          mapping);
 
                        tg3_set_txd(tnapi, entry, mapping, len,
                                    base_flags, (i == last) | (mss << 1));
@@ -5390,6 +5563,27 @@ out_unlock:
        mmiowb();
 
        return NETDEV_TX_OK;
+
+dma_error:
+       last = i;
+       entry = tnapi->tx_prod;
+       tnapi->tx_buffers[entry].skb = NULL;
+       pci_unmap_single(tp->pdev,
+                        pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+                        skb_headlen(skb),
+                        PCI_DMA_TODEVICE);
+       for (i = 0; i <= last; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               entry = NEXT_TX(entry);
+
+               pci_unmap_page(tp->pdev,
+                              pci_unmap_addr(&tnapi->tx_buffers[entry],
+                                             mapping),
+                              frag->size, PCI_DMA_TODEVICE);
+       }
+
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 
 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
@@ -5437,15 +5631,16 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 {
        struct tg3 *tp = netdev_priv(dev);
        u32 len, entry, base_flags, mss;
-       struct skb_shared_info *sp;
        int would_hit_hwbug;
        dma_addr_t mapping;
        struct tg3_napi *tnapi;
        struct netdev_queue *txq;
+       unsigned int i, last;
+
 
        txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
        tnapi = &tp->napi[skb_get_queue_mapping(skb)];
-       if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+       if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
                tnapi++;
 
        /* We are running in BH disabled context with netif_tx_lock
@@ -5532,25 +5727,23 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
                               (vlan_tx_tag_get(skb) << 16));
 #endif
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+       if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
            !mss && skb->len > ETH_DATA_LEN)
                base_flags |= TXD_FLAG_JMB_PKT;
 
-       if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
+       len = skb_headlen(skb);
+
+       mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(tp->pdev, mapping)) {
                dev_kfree_skb(skb);
                goto out_unlock;
        }
 
-       sp = skb_shinfo(skb);
-
-       mapping = sp->dma_head;
-
        tnapi->tx_buffers[entry].skb = skb;
+       pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
 
        would_hit_hwbug = 0;
 
-       len = skb_headlen(skb);
-
        if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
                would_hit_hwbug = 1;
 
@@ -5572,16 +5765,21 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 
        /* Now loop through additional data fragments, and queue them. */
        if (skb_shinfo(skb)->nr_frags > 0) {
-               unsigned int i, last;
-
                last = skb_shinfo(skb)->nr_frags - 1;
                for (i = 0; i <= last; i++) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                        len = frag->size;
-                       mapping = sp->dma_maps[i];
+                       mapping = pci_map_page(tp->pdev,
+                                              frag->page,
+                                              frag->page_offset,
+                                              len, PCI_DMA_TODEVICE);
 
                        tnapi->tx_buffers[entry].skb = NULL;
+                       pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+                                          mapping);
+                       if (pci_dma_mapping_error(tp->pdev, mapping))
+                               goto dma_error;
 
                        if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
                            len <= 8)
@@ -5637,6 +5835,27 @@ out_unlock:
        mmiowb();
 
        return NETDEV_TX_OK;
+
+dma_error:
+       last = i;
+       entry = tnapi->tx_prod;
+       tnapi->tx_buffers[entry].skb = NULL;
+       pci_unmap_single(tp->pdev,
+                        pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+                        skb_headlen(skb),
+                        PCI_DMA_TODEVICE);
+       for (i = 0; i <= last; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               entry = NEXT_TX(entry);
+
+               pci_unmap_page(tp->pdev,
+                              pci_unmap_addr(&tnapi->tx_buffers[entry],
+                                             mapping),
+                              frag->size, PCI_DMA_TODEVICE);
+       }
+
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 
 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
@@ -5701,36 +5920,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
                                 struct tg3_rx_prodring_set *tpr)
 {
        int i;
-       struct ring_info *rxp;
 
-       for (i = 0; i < TG3_RX_RING_SIZE; i++) {
-               rxp = &tpr->rx_std_buffers[i];
-
-               if (rxp->skb == NULL)
-                       continue;
+       if (tpr != &tp->prodring[0]) {
+               for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
+                    i = (i + 1) % TG3_RX_RING_SIZE)
+                       tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+                                       tp->rx_pkt_map_sz);
+
+               if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+                       for (i = tpr->rx_jmb_cons_idx;
+                            i != tpr->rx_jmb_prod_idx;
+                            i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
+                               tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+                                               TG3_RX_JMB_MAP_SZ);
+                       }
+               }
 
-               pci_unmap_single(tp->pdev,
-                                pci_unmap_addr(rxp, mapping),
-                                tp->rx_pkt_map_sz,
-                                PCI_DMA_FROMDEVICE);
-               dev_kfree_skb_any(rxp->skb);
-               rxp->skb = NULL;
+               return;
        }
 
-       if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
-               for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
-                       rxp = &tpr->rx_jmb_buffers[i];
+       for (i = 0; i < TG3_RX_RING_SIZE; i++)
+               tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+                               tp->rx_pkt_map_sz);
 
-                       if (rxp->skb == NULL)
-                               continue;
-
-                       pci_unmap_single(tp->pdev,
-                                        pci_unmap_addr(rxp, mapping),
-                                        TG3_RX_JMB_MAP_SZ,
-                                        PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb_any(rxp->skb);
-                       rxp->skb = NULL;
-               }
+       if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+               for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
+                       tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+                                       TG3_RX_JMB_MAP_SZ);
        }
 }
 
@@ -5746,6 +5962,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
 {
        u32 i, rx_pkt_dma_sz;
 
+       tpr->rx_std_cons_idx = 0;
+       tpr->rx_std_prod_idx = 0;
+       tpr->rx_jmb_cons_idx = 0;
+       tpr->rx_jmb_prod_idx = 0;
+
+       if (tpr != &tp->prodring[0]) {
+               memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
+               if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
+                       memset(&tpr->rx_jmb_buffers[0], 0,
+                              TG3_RX_JMB_BUFF_RING_SIZE);
+               goto done;
+       }
+
        /* Zero out all descriptors. */
        memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
 
@@ -5847,8 +6076,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
 static int tg3_rx_prodring_init(struct tg3 *tp,
                                struct tg3_rx_prodring_set *tpr)
 {
-       tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
-                                     TG3_RX_RING_SIZE, GFP_KERNEL);
+       tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
        if (!tpr->rx_std_buffers)
                return -ENOMEM;
 
@@ -5858,8 +6086,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
                goto err_out;
 
        if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
-               tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
-                                             TG3_RX_JUMBO_RING_SIZE,
+               tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
                                              GFP_KERNEL);
                if (!tpr->rx_jmb_buffers)
                        goto err_out;
@@ -5896,8 +6123,9 @@ static void tg3_free_rings(struct tg3 *tp)
                        continue;
 
                for (i = 0; i < TG3_TX_RING_SIZE; ) {
-                       struct tx_ring_info *txp;
+                       struct ring_info *txp;
                        struct sk_buff *skb;
+                       unsigned int k;
 
                        txp = &tnapi->tx_buffers[i];
                        skb = txp->skb;
@@ -5907,17 +6135,29 @@ static void tg3_free_rings(struct tg3 *tp)
                                continue;
                        }
 
-                       skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
-
+                       pci_unmap_single(tp->pdev,
+                                        pci_unmap_addr(txp, mapping),
+                                        skb_headlen(skb),
+                                        PCI_DMA_TODEVICE);
                        txp->skb = NULL;
 
-                       i += skb_shinfo(skb)->nr_frags + 1;
+                       i++;
+
+                       for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
+                               txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
+                               pci_unmap_page(tp->pdev,
+                                              pci_unmap_addr(txp, mapping),
+                                              skb_shinfo(skb)->frags[k].size,
+                                              PCI_DMA_TODEVICE);
+                               i++;
+                       }
 
                        dev_kfree_skb_any(skb);
                }
-       }
 
-       tg3_rx_prodring_free(tp, &tp->prodring[0]);
+               if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
+                       tg3_rx_prodring_free(tp, &tp->prodring[j]);
+       }
 }
 
 /* Initialize tx/rx rings for packet processing.
@@ -5951,9 +6191,13 @@ static int tg3_init_rings(struct tg3 *tp)
                tnapi->rx_rcb_ptr = 0;
                if (tnapi->rx_rcb)
                        memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+
+               if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
+                       tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
+                       return -ENOMEM;
        }
 
-       return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
+       return 0;
 }
 
 /*
@@ -5997,7 +6241,8 @@ static void tg3_free_consistent(struct tg3 *tp)
                tp->hw_stats = NULL;
        }
 
-       tg3_rx_prodring_fini(tp, &tp->prodring[0]);
+       for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
+               tg3_rx_prodring_fini(tp, &tp->prodring[i]);
 }
 
 /*
@@ -6008,8 +6253,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
 {
        int i;
 
-       if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
-               return -ENOMEM;
+       for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
+               if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
+                       goto err_out;
+       }
 
        tp->hw_stats = pci_alloc_consistent(tp->pdev,
                                            sizeof(struct tg3_hw_stats),
@@ -6032,6 +6279,24 @@ static int tg3_alloc_consistent(struct tg3 *tp)
                memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
                sblk = tnapi->hw_status;
 
+               /* If multivector TSS is enabled, vector 0 does not handle
+                * tx interrupts.  Don't allocate any resources for it.
+                */
+               if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
+                   (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
+                       tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
+                                                   TG3_TX_RING_SIZE,
+                                                   GFP_KERNEL);
+                       if (!tnapi->tx_buffers)
+                               goto err_out;
+
+                       tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
+                                                             TG3_TX_RING_BYTES,
+                                                      &tnapi->tx_desc_mapping);
+                       if (!tnapi->tx_ring)
+                               goto err_out;
+               }
+
                /*
                 * When RSS is enabled, the status block format changes
                 * slightly.  The "rx_jumbo_consumer", "reserved",
@@ -6053,6 +6318,11 @@ static int tg3_alloc_consistent(struct tg3 *tp)
                        break;
                }
 
+               if (tp->irq_cnt == 1)
+                       tnapi->prodring = &tp->prodring[0];
+               else if (i)
+                       tnapi->prodring = &tp->prodring[i - 1];
+
                /*
                 * If multivector RSS is enabled, vector 0 does not handle
                 * rx or tx interrupts.  Don't allocate any resources for it.
@@ -6067,17 +6337,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
                        goto err_out;
 
                memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
-
-               tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
-                                           TG3_TX_RING_SIZE, GFP_KERNEL);
-               if (!tnapi->tx_buffers)
-                       goto err_out;
-
-               tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
-                                                     TG3_TX_RING_BYTES,
-                                                     &tnapi->tx_desc_mapping);
-               if (!tnapi->tx_ring)
-                       goto err_out;
        }
 
        return 0;
@@ -6713,7 +6972,8 @@ static int tg3_chip_reset(struct tg3 *tp)
        if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
            tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
                val = tr32(0x7c00);
 
                tw32(0x7c00, val | (1 << 25));
@@ -7065,19 +7325,21 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
 {
        int i;
 
-       if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
+       if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
                tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
                tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
                tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
-
-               tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
-               tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
-               tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
        } else {
                tw32(HOSTCC_TXCOL_TICKS, 0);
                tw32(HOSTCC_TXMAX_FRAMES, 0);
                tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+       }
 
+       if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
+               tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
+               tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
+               tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+       } else {
                tw32(HOSTCC_RXCOL_TICKS, 0);
                tw32(HOSTCC_RXMAX_FRAMES, 0);
                tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
@@ -7100,25 +7362,31 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
 
                reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
                tw32(reg, ec->rx_coalesce_usecs);
-               reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
-               tw32(reg, ec->tx_coalesce_usecs);
                reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
                tw32(reg, ec->rx_max_coalesced_frames);
-               reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
-               tw32(reg, ec->tx_max_coalesced_frames);
                reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
                tw32(reg, ec->rx_max_coalesced_frames_irq);
-               reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
-               tw32(reg, ec->tx_max_coalesced_frames_irq);
+
+               if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
+                       reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+                       tw32(reg, ec->tx_coalesce_usecs);
+                       reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+                       tw32(reg, ec->tx_max_coalesced_frames);
+                       reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+                       tw32(reg, ec->tx_max_coalesced_frames_irq);
+               }
        }
 
        for (; i < tp->irq_max - 1; i++) {
                tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
-               tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
                tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
-               tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
                tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
-               tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+
+               if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
+                       tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+                       tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+                       tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+               }
        }
 }
 
@@ -7132,6 +7400,8 @@ static void tg3_rings_reset(struct tg3 *tp)
        /* Disable all transmit rings but the first. */
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+               limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
        else
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
 
@@ -7146,7 +7416,8 @@ static void tg3_rings_reset(struct tg3 *tp)
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
        else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+                GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
        else
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
@@ -7219,17 +7490,19 @@ static void tg3_rings_reset(struct tg3 *tp)
                /* Clear status block in ram. */
                memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 
-               tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
-                              (TG3_TX_RING_SIZE <<
-                               BDINFO_FLAGS_MAXLEN_SHIFT),
-                              NIC_SRAM_TX_BUFFER_DESC);
+               if (tnapi->tx_ring) {
+                       tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+                                      (TG3_TX_RING_SIZE <<
+                                       BDINFO_FLAGS_MAXLEN_SHIFT),
+                                      NIC_SRAM_TX_BUFFER_DESC);
+                       txrcb += TG3_BDINFO_SIZE;
+               }
 
                tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
                               (TG3_RX_RCB_RING_SIZE(tp) <<
                                BDINFO_FLAGS_MAXLEN_SHIFT), 0);
 
                stblk += 8;
-               txrcb += TG3_BDINFO_SIZE;
                rxrcb += TG3_BDINFO_SIZE;
        }
 }
@@ -7341,7 +7614,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (err)
                return err;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
                val = tr32(TG3PCI_DMA_RW_CTRL) &
                      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
                tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
@@ -7469,7 +7743,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
             ((u64) tpr->rx_std_mapping >> 32));
        tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
             ((u64) tpr->rx_std_mapping & 0xffffffff));
-       if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
                tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
                     NIC_SRAM_RX_BUFFER_DESC);
 
@@ -7502,7 +7776,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                             BDINFO_FLAGS_DISABLED);
                }
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                        val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
                              (RX_STD_MAX_SIZE << 2);
                else
@@ -7513,15 +7788,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
 
        tpr->rx_std_prod_idx = tp->rx_pending;
-       tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
-                    tpr->rx_std_prod_idx);
+       tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
 
        tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
                          tp->rx_jumbo_pending : 0;
-       tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
-                    tpr->rx_jmb_prod_idx);
+       tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
                tw32(STD_REPLENISH_LWM, 32);
                tw32(JMB_REPLENISH_LWM, 16);
        }
@@ -7774,7 +8048,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
                tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
        val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
-       if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+       if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
                val |= SNDBDI_MODE_MULTI_TXQ_EN;
        tw32(SNDBDI_MODE, val);
        tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
@@ -8198,7 +8472,8 @@ static int tg3_test_interrupt(struct tg3 *tp)
         * Turn off MSI one shot mode.  Otherwise this test has no
         * observable way to know whether the interrupt was delivered.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
            (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
                val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
                tw32(MSGINT_MODE, val);
@@ -8241,7 +8516,8 @@ static int tg3_test_interrupt(struct tg3 *tp)
 
        if (intr_ok) {
                /* Reenable MSI one shot mode. */
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+               if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
                    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
                        val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
                        tw32(MSGINT_MODE, val);
@@ -8382,7 +8658,11 @@ static bool tg3_enable_msix(struct tg3 *tp)
        for (i = 0; i < tp->irq_max; i++)
                tp->napi[i].irq_vec = msix_ent[i].vector;
 
-       tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+               tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
+               tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
+       } else
+               tp->dev->real_num_tx_queues = 1;
 
        return true;
 }
@@ -8533,6 +8813,7 @@ static int tg3_open(struct net_device *dev)
                }
 
                if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
                    (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
                    (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
                        u32 val = tr32(PCIE_TRANSACTION_CFG);
@@ -10476,7 +10757,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
        for (i = 14; i < tx_len; i++)
                tx_data[i] = (u8) (i & 0xff);
 
-       if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
+       map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(tp->pdev, map)) {
                dev_kfree_skb(skb);
                return -EIO;
        }
@@ -10490,8 +10772,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
 
        num_pkts = 0;
 
-       tg3_set_txd(tnapi, tnapi->tx_prod,
-                   skb_shinfo(skb)->dma_head, tx_len, 0, 1);
+       tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
 
        tnapi->tx_prod++;
        num_pkts++;
@@ -10515,7 +10796,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
                        break;
        }
 
-       skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
+       pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
        dev_kfree_skb(skb);
 
        if (tx_idx != tnapi->tx_prod)
@@ -11427,7 +11708,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
                        tg3_get_5761_nvram_info(tp);
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                        tg3_get_5906_nvram_info(tp);
-               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+                        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                        tg3_get_57780_nvram_info(tp);
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
                        tg3_get_5717_nvram_info(tp);
@@ -12152,7 +12434,7 @@ skip_phy_reset:
 
 static void __devinit tg3_read_partno(struct tg3 *tp)
 {
-       unsigned char vpd_data[256];   /* in little-endian format */
+       unsigned char vpd_data[TG3_NVM_VPD_LEN];   /* in little-endian format */
        unsigned int i;
        u32 magic;
 
@@ -12161,48 +12443,37 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
                goto out_not_found;
 
        if (magic == TG3_EEPROM_MAGIC) {
-               for (i = 0; i < 256; i += 4) {
+               for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
                        u32 tmp;
 
                        /* The data is in little-endian format in NVRAM.
                         * Use the big-endian read routines to preserve
                         * the byte order as it exists in NVRAM.
                         */
-                       if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
+                       if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
                                goto out_not_found;
 
                        memcpy(&vpd_data[i], &tmp, sizeof(tmp));
                }
        } else {
-               int vpd_cap;
-
-               vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
-               for (i = 0; i < 256; i += 4) {
-                       u32 tmp, j = 0;
-                       __le32 v;
-                       u16 tmp16;
-
-                       pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
-                                             i);
-                       while (j++ < 100) {
-                               pci_read_config_word(tp->pdev, vpd_cap +
-                                                    PCI_VPD_ADDR, &tmp16);
-                               if (tmp16 & 0x8000)
-                                       break;
-                               msleep(1);
-                       }
-                       if (!(tmp16 & 0x8000))
+               ssize_t cnt;
+               unsigned int pos = 0, i = 0;
+
+               for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
+                       cnt = pci_read_vpd(tp->pdev, pos,
+                                          TG3_NVM_VPD_LEN - pos,
+                                          &vpd_data[pos]);
+                       if (cnt == -ETIMEDOUT || -EINTR)
+                               cnt = 0;
+                       else if (cnt < 0)
                                goto out_not_found;
-
-                       pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
-                                             &tmp);
-                       v = cpu_to_le32(tmp);
-                       memcpy(&vpd_data[i], &v, sizeof(v));
                }
+               if (pos != TG3_NVM_VPD_LEN)
+                       goto out_not_found;
        }
 
        /* Now parse and find the part number. */
-       for (i = 0; i < 254; ) {
+       for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) {
                unsigned char val = vpd_data[i];
                unsigned int block_end;
 
@@ -12221,7 +12492,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
                              (vpd_data[i + 2] << 8)));
                i += 3;
 
-               if (block_end > 256)
+               if (block_end > TG3_NVM_VPD_LEN)
                        goto out_not_found;
 
                while (i < (block_end - 2)) {
@@ -12230,7 +12501,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
                                int partno_len = vpd_data[i + 2];
 
                                i += 3;
-                               if (partno_len > 24 || (partno_len + i) > 256)
+                               if (partno_len > TG3_BPN_SIZE ||
+                                   (partno_len + i) > TG3_NVM_VPD_LEN)
                                        goto out_not_found;
 
                                memcpy(tp->board_part_number,
@@ -12261,6 +12533,8 @@ out_not_found:
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
                 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
                strcpy(tp->board_part_number, "BCM57788");
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+               strcpy(tp->board_part_number, "BCM57765");
        else
                strcpy(tp->board_part_number, "none");
 }
@@ -12544,13 +12818,21 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
                u32 prod_id_asic_rev;
 
-               if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C ||
-                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S ||
-                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C ||
-                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
+               if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
                        pci_read_config_dword(tp->pdev,
                                              TG3PCI_GEN2_PRODID_ASICREV,
                                              &prod_id_asic_rev);
+               else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+                       pci_read_config_dword(tp->pdev,
+                                             TG3PCI_GEN15_PRODID_ASICREV,
+                                             &prod_id_asic_rev);
                else
                        pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
                                              &prod_id_asic_rev);
@@ -12704,7 +12986,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
@@ -12731,7 +13014,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        }
 
        /* Determine TSO capabilities */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
        else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -12767,7 +13051,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                        tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
                }
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
                        tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
                        tp->irq_max = TG3_IRQ_MAX_VECS;
                }
@@ -12781,9 +13066,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
        }
 
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+               tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
+
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+                (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
                tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
 
        pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
@@ -12976,7 +13265,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
 
        /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
@@ -13055,7 +13345,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
            !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -13379,7 +13670,8 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
 #endif
 #endif
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
                val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
                goto out;
        }
@@ -13591,7 +13883,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
 
        tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                goto out;
 
        if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
@@ -13784,7 +14077,8 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
 {
        if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
                tp->bufmgr_config.mbuf_read_dma_low_water =
                        DEFAULT_MB_RDMA_LOW_WATER_5705;
                tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14189,7 +14483,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
        if (err) {
                printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
                       "aborting.\n");
-               goto err_out_fw;
+               goto err_out_iounmap;
        }
 
        if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
@@ -14198,7 +14492,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                        printk(KERN_ERR PFX "Cannot map APE registers, "
                               "aborting.\n");
                        err = -ENOMEM;
-                       goto err_out_fw;
+                       goto err_out_iounmap;
                }
 
                tg3_ape_lock_init(tp);
@@ -14329,10 +14623,6 @@ err_out_apeunmap:
                tp->aperegs = NULL;
        }
 
-err_out_fw:
-       if (tp->fw)
-               release_firmware(tp->fw);
-
 err_out_iounmap:
        if (tp->regs) {
                iounmap(tp->regs);