ixgbe: correctly add and remove napi queues
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
index 4360528..883e0a7 100644 (file)
@@ -35,8 +35,8 @@
 #include <linux/time.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
-#ifdef NETIF_F_HW_VLAN_TX
 #include <linux/if_vlan.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 #define BCM_VLAN 1
 #endif
 #include <net/ip.h>
@@ -47,6 +47,7 @@
 #include <linux/prefetch.h>
 #include <linux/cache.h>
 #include <linux/zlib.h>
+#include <linux/log2.h>
 
 #include "bnx2.h"
 #include "bnx2_fw.h"
@@ -56,8 +57,8 @@
 
 #define DRV_MODULE_NAME                "bnx2"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "1.7.6"
-#define DRV_MODULE_RELDATE     "May 16, 2008"
+#define DRV_MODULE_VERSION     "1.8.0"
+#define DRV_MODULE_RELDATE     "Aug 14, 2008"
 
 #define RUN_AT(x) (jiffies + (x))
 
@@ -68,7 +69,7 @@ static char version[] __devinitdata =
        "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 
@@ -87,6 +88,7 @@ typedef enum {
        BCM5708S,
        BCM5709,
        BCM5709S,
+       BCM5716,
 } board_t;
 
 /* indexed by board_t, above */
@@ -102,9 +104,10 @@ static struct {
        { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
        { "Broadcom NetXtreme II BCM5709 1000Base-T" },
        { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
+       { "Broadcom NetXtreme II BCM5716 1000Base-T" },
        };
 
-static struct pci_device_id bnx2_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
          PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
@@ -123,6 +126,8 @@ static struct pci_device_id bnx2_pci_tbl[] = {
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
+       { PCI_VENDOR_ID_BROADCOM, 0x163b,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
        { 0, }
 };
 
@@ -289,7 +294,6 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
                REG_WR(bp, BNX2_CTX_CTX_CTRL,
                       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
                for (i = 0; i < 5; i++) {
-                       u32 val;
                        val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
                        if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
                                break;
@@ -488,7 +492,7 @@ bnx2_netif_start(struct bnx2 *bp)
 {
        if (atomic_dec_and_test(&bp->intr_sem)) {
                if (netif_running(bp->dev)) {
-                       netif_wake_queue(bp->dev);
+                       netif_tx_wake_all_queues(bp->dev);
                        bnx2_napi_enable(bp);
                        bnx2_enable_int(bp);
                }
@@ -624,6 +628,7 @@ static void
 bnx2_free_mem(struct bnx2 *bp)
 {
        int i;
+       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 
        bnx2_free_tx_mem(bp);
        bnx2_free_rx_mem(bp);
@@ -636,10 +641,11 @@ bnx2_free_mem(struct bnx2 *bp)
                        bp->ctx_blk[i] = NULL;
                }
        }
-       if (bp->status_blk) {
+       if (bnapi->status_blk.msi) {
                pci_free_consistent(bp->pdev, bp->status_stats_size,
-                                   bp->status_blk, bp->status_blk_mapping);
-               bp->status_blk = NULL;
+                                   bnapi->status_blk.msi,
+                                   bp->status_blk_mapping);
+               bnapi->status_blk.msi = NULL;
                bp->stats_blk = NULL;
        }
 }
@@ -648,6 +654,8 @@ static int
 bnx2_alloc_mem(struct bnx2 *bp)
 {
        int i, status_blk_size, err;
+       struct bnx2_napi *bnapi;
+       void *status_blk;
 
        /* Combine status and statistics blocks into one allocation. */
        status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
@@ -657,27 +665,37 @@ bnx2_alloc_mem(struct bnx2 *bp)
        bp->status_stats_size = status_blk_size +
                                sizeof(struct statistics_block);
 
-       bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
-                                             &bp->status_blk_mapping);
-       if (bp->status_blk == NULL)
+       status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
+                                         &bp->status_blk_mapping);
+       if (status_blk == NULL)
                goto alloc_mem_err;
 
-       memset(bp->status_blk, 0, bp->status_stats_size);
+       memset(status_blk, 0, bp->status_stats_size);
 
-       bp->bnx2_napi[0].status_blk = bp->status_blk;
+       bnapi = &bp->bnx2_napi[0];
+       bnapi->status_blk.msi = status_blk;
+       bnapi->hw_tx_cons_ptr =
+               &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
+       bnapi->hw_rx_cons_ptr =
+               &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
        if (bp->flags & BNX2_FLAG_MSIX_CAP) {
                for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
-                       struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+                       struct status_block_msix *sblk;
 
-                       bnapi->status_blk_msix = (void *)
-                               ((unsigned long) bp->status_blk +
-                                BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+                       bnapi = &bp->bnx2_napi[i];
+
+                       sblk = (void *) (status_blk +
+                                        BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+                       bnapi->status_blk.msix = sblk;
+                       bnapi->hw_tx_cons_ptr =
+                               &sblk->status_tx_quick_consumer_index;
+                       bnapi->hw_rx_cons_ptr =
+                               &sblk->status_rx_quick_consumer_index;
                        bnapi->int_num = i << 24;
                }
        }
 
-       bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
-                                 status_blk_size);
+       bp->stats_blk = status_blk + status_blk_size;
 
        bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
 
@@ -1109,7 +1127,7 @@ bnx2_init_all_rx_contexts(struct bnx2 *bp)
        }
 }
 
-static int
+static void
 bnx2_set_mac_link(struct bnx2 *bp)
 {
        u32 val;
@@ -1175,8 +1193,6 @@ bnx2_set_mac_link(struct bnx2 *bp)
 
        if (CHIP_NUM(bp) == CHIP_NUM_5709)
                bnx2_init_all_rx_contexts(bp);
-
-       return 0;
 }
 
 static void
@@ -1473,7 +1489,7 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp)
        return adv;
 }
 
-static int bnx2_fw_sync(struct bnx2 *, u32, int);
+static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
 
 static int
 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
@@ -1526,7 +1542,7 @@ bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
        bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
 
        spin_unlock_bh(&bp->phy_lock);
-       bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
+       bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
        spin_lock_bh(&bp->phy_lock);
 
        return 0;
@@ -2244,7 +2260,7 @@ bnx2_set_phy_loopback(struct bnx2 *bp)
 }
 
 static int
-bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
+bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
 {
        int i;
        u32 val;
@@ -2254,6 +2270,9 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
 
        bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
 
+       if (!ack)
+               return 0;
+
        /* wait for an acknowledgement. */
        for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
                msleep(10);
@@ -2430,19 +2449,18 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
 }
 
 static void
-bnx2_set_mac_addr(struct bnx2 *bp)
+bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
 {
        u32 val;
-       u8 *mac_addr = bp->dev->dev_addr;
 
        val = (mac_addr[0] << 8) | mac_addr[1];
 
-       REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
+       REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
 
        val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
                (mac_addr[4] << 8) | mac_addr[5];
 
-       REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
+       REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
 }
 
 static inline int
@@ -2515,7 +2533,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
 static int
 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
 {
-       struct status_block *sblk = bnapi->status_blk;
+       struct status_block *sblk = bnapi->status_blk.msi;
        u32 new_link_state, old_link_state;
        int is_set = 1;
 
@@ -2551,11 +2569,9 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
 {
        u16 cons;
 
-       if (bnapi->int_num == 0)
-               cons = bnapi->status_blk->status_tx_quick_consumer_index0;
-       else
-               cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
-
+       /* Tell compiler that status block fields can change. */
+       barrier();
+       cons = *bnapi->hw_tx_cons_ptr;
        if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
                cons++;
        return cons;
@@ -2566,7 +2582,11 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
 {
        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
        u16 hw_cons, sw_cons, sw_ring_cons;
-       int tx_pkt = 0;
+       int tx_pkt = 0, index;
+       struct netdev_queue *txq;
+
+       index = (bnapi - bp->bnx2_napi);
+       txq = netdev_get_tx_queue(bp->dev, index);
 
        hw_cons = bnx2_get_hw_tx_cons(bnapi);
        sw_cons = txr->tx_cons;
@@ -2626,21 +2646,23 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
 
        txr->hw_tx_cons = hw_cons;
        txr->tx_cons = sw_cons;
+
        /* Need to make the tx_cons update visible to bnx2_start_xmit()
-        * before checking for netif_queue_stopped().  Without the
+        * before checking for netif_tx_queue_stopped().  Without the
         * memory barrier, there is a small possibility that bnx2_start_xmit()
         * will miss it and cause the queue to be stopped forever.
         */
        smp_mb();
 
-       if (unlikely(netif_queue_stopped(bp->dev)) &&
+       if (unlikely(netif_tx_queue_stopped(txq)) &&
                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
-               netif_tx_lock(bp->dev);
-               if ((netif_queue_stopped(bp->dev)) &&
+               __netif_tx_lock(txq, smp_processor_id());
+               if ((netif_tx_queue_stopped(txq)) &&
                    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
-                       netif_wake_queue(bp->dev);
-               netif_tx_unlock(bp->dev);
+                       netif_tx_wake_queue(txq);
+               __netif_tx_unlock(txq);
        }
+
        return tx_pkt;
 }
 
@@ -2822,11 +2844,9 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
 {
        u16 cons;
 
-       if (bnapi->int_num == 0)
-               cons = bnapi->status_blk->status_rx_quick_consumer_index0;
-       else
-               cons = bnapi->status_blk_msix->status_rx_quick_consumer_index;
-
+       /* Tell compiler that status block fields can change. */
+       barrier();
+       cons = *bnapi->hw_rx_cons_ptr;
        if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
                cons++;
        return cons;
@@ -2854,6 +2874,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                struct sw_bd *rx_buf;
                struct sk_buff *skb;
                dma_addr_t dma_addr;
+               u16 vtag = 0;
+               int hw_vlan __maybe_unused = 0;
 
                sw_ring_cons = RX_RING_IDX(sw_cons);
                sw_ring_prod = RX_RING_IDX(sw_prod);
@@ -2897,7 +2919,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                if (len <= bp->rx_copy_thresh) {
                        struct sk_buff *new_skb;
 
-                       new_skb = netdev_alloc_skb(bp->dev, len + 2);
+                       new_skb = netdev_alloc_skb(bp->dev, len + 6);
                        if (new_skb == NULL) {
                                bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
                                                  sw_ring_prod);
@@ -2906,9 +2928,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
 
                        /* aligned copy */
                        skb_copy_from_linear_data_offset(skb,
-                                                        BNX2_RX_OFFSET - 2,
-                                     new_skb->data, len + 2);
-                       skb_reserve(new_skb, 2);
+                                                        BNX2_RX_OFFSET - 6,
+                                     new_skb->data, len + 6);
+                       skb_reserve(new_skb, 6);
                        skb_put(new_skb, len);
 
                        bnx2_reuse_rx_skb(bp, rxr, skb,
@@ -2919,6 +2941,25 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                           dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
                        goto next_rx;
 
+               if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
+                   !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
+                       vtag = rx_hdr->l2_fhdr_vlan_tag;
+#ifdef BCM_VLAN
+                       if (bp->vlgrp)
+                               hw_vlan = 1;
+                       else
+#endif
+                       {
+                               struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
+                                       __skb_push(skb, 4);
+
+                               memmove(ve, skb->data + 4, ETH_ALEN * 2);
+                               ve->h_vlan_proto = htons(ETH_P_8021Q);
+                               ve->h_vlan_TCI = htons(vtag);
+                               len += 4;
+                       }
+               }
+
                skb->protocol = eth_type_trans(skb, bp->dev);
 
                if ((len > (bp->dev->mtu + ETH_HLEN)) &&
@@ -2940,10 +2981,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                }
 
 #ifdef BCM_VLAN
-               if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
-                       vlan_hwaccel_receive_skb(skb, bp->vlgrp,
-                               rx_hdr->l2_fhdr_vlan_tag);
-               }
+               if (hw_vlan)
+                       vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
                else
 #endif
                        netif_receive_skb(skb);
@@ -2986,11 +3025,11 @@ next_rx:
 static irqreturn_t
 bnx2_msi(int irq, void *dev_instance)
 {
-       struct net_device *dev = dev_instance;
-       struct bnx2 *bp = netdev_priv(dev);
-       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+       struct bnx2_napi *bnapi = dev_instance;
+       struct bnx2 *bp = bnapi->bp;
+       struct net_device *dev = bp->dev;
 
-       prefetch(bnapi->status_blk);
+       prefetch(bnapi->status_blk.msi);
        REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
                BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
@@ -3007,11 +3046,11 @@ bnx2_msi(int irq, void *dev_instance)
 static irqreturn_t
 bnx2_msi_1shot(int irq, void *dev_instance)
 {
-       struct net_device *dev = dev_instance;
-       struct bnx2 *bp = netdev_priv(dev);
-       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+       struct bnx2_napi *bnapi = dev_instance;
+       struct bnx2 *bp = bnapi->bp;
+       struct net_device *dev = bp->dev;
 
-       prefetch(bnapi->status_blk);
+       prefetch(bnapi->status_blk.msi);
 
        /* Return here if interrupt is disabled. */
        if (unlikely(atomic_read(&bp->intr_sem) != 0))
@@ -3025,10 +3064,10 @@ bnx2_msi_1shot(int irq, void *dev_instance)
 static irqreturn_t
 bnx2_interrupt(int irq, void *dev_instance)
 {
-       struct net_device *dev = dev_instance;
-       struct bnx2 *bp = netdev_priv(dev);
-       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
-       struct status_block *sblk = bnapi->status_blk;
+       struct bnx2_napi *bnapi = dev_instance;
+       struct bnx2 *bp = bnapi->bp;
+       struct net_device *dev = bp->dev;
+       struct status_block *sblk = bnapi->status_blk.msi;
 
        /* When using INTx, it is possible for the interrupt to arrive
         * at the CPU before the status block posted prior to the
@@ -3062,21 +3101,16 @@ bnx2_interrupt(int irq, void *dev_instance)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t
-bnx2_tx_msix(int irq, void *dev_instance)
+static inline int
+bnx2_has_fast_work(struct bnx2_napi *bnapi)
 {
-       struct net_device *dev = dev_instance;
-       struct bnx2 *bp = netdev_priv(dev);
-       struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
-
-       prefetch(bnapi->status_blk_msix);
-
-       /* Return here if interrupt is disabled. */
-       if (unlikely(atomic_read(&bp->intr_sem) != 0))
-               return IRQ_HANDLED;
+       struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+       struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 
-       netif_rx_schedule(dev, &bnapi->napi);
-       return IRQ_HANDLED;
+       if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
+           (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
+               return 1;
+       return 0;
 }
 
 #define STATUS_ATTN_EVENTS     (STATUS_ATTN_BITS_LINK_STATE | \
@@ -3085,12 +3119,9 @@ bnx2_tx_msix(int irq, void *dev_instance)
 static inline int
 bnx2_has_work(struct bnx2_napi *bnapi)
 {
-       struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
-       struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
-       struct status_block *sblk = bnapi->status_blk;
+       struct status_block *sblk = bnapi->status_blk.msi;
 
-       if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
-           (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
+       if (bnx2_has_fast_work(bnapi))
                return 1;
 
        if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
@@ -3100,36 +3131,9 @@ bnx2_has_work(struct bnx2_napi *bnapi)
        return 0;
 }
 
-static int bnx2_tx_poll(struct napi_struct *napi, int budget)
-{
-       struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
-       struct bnx2 *bp = bnapi->bp;
-       struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
-       int work_done = 0;
-       struct status_block_msix *sblk = bnapi->status_blk_msix;
-
-       do {
-               work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
-               if (unlikely(work_done >= budget))
-                       return work_done;
-
-               bnapi->last_status_idx = sblk->status_idx;
-               rmb();
-       } while (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons);
-
-       netif_rx_complete(bp->dev, napi);
-       REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
-              BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
-              bnapi->last_status_idx);
-       return work_done;
-}
-
-static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
-                         int work_done, int budget)
+static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
 {
-       struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
-       struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
-       struct status_block *sblk = bnapi->status_blk;
+       struct status_block *sblk = bnapi->status_blk.msi;
        u32 status_attn_bits = sblk->status_attn_bits;
        u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
 
@@ -3145,6 +3149,13 @@ static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
                       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
                REG_RD(bp, BNX2_HC_COMMAND);
        }
+}
+
+static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
+                         int work_done, int budget)
+{
+       struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+       struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 
        if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
                bnx2_tx_int(bp, bnapi, 0);
@@ -3155,14 +3166,43 @@ static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
        return work_done;
 }
 
+static int bnx2_poll_msix(struct napi_struct *napi, int budget)
+{
+       struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+       struct bnx2 *bp = bnapi->bp;
+       int work_done = 0;
+       struct status_block_msix *sblk = bnapi->status_blk.msix;
+
+       while (1) {
+               work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
+               if (unlikely(work_done >= budget))
+                       break;
+
+               bnapi->last_status_idx = sblk->status_idx;
+               /* status idx must be read before checking for more work. */
+               rmb();
+               if (likely(!bnx2_has_fast_work(bnapi))) {
+
+                       netif_rx_complete(bp->dev, napi);
+                       REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+                              BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+                              bnapi->last_status_idx);
+                       break;
+               }
+       }
+       return work_done;
+}
+
 static int bnx2_poll(struct napi_struct *napi, int budget)
 {
        struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
        struct bnx2 *bp = bnapi->bp;
        int work_done = 0;
-       struct status_block *sblk = bnapi->status_blk;
+       struct status_block *sblk = bnapi->status_blk.msi;
 
        while (1) {
+               bnx2_poll_link(bp, bnapi);
+
                work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
 
                if (unlikely(work_done >= budget))
@@ -3205,6 +3245,7 @@ bnx2_set_rx_mode(struct net_device *dev)
 {
        struct bnx2 *bp = netdev_priv(dev);
        u32 rx_mode, sort_mode;
+       struct dev_addr_list *uc_ptr;
        int i;
 
        spin_lock_bh(&bp->phy_lock);
@@ -3213,10 +3254,10 @@ bnx2_set_rx_mode(struct net_device *dev)
                                  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
        sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
 #ifdef BCM_VLAN
-       if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
+       if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
                rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
 #else
-       if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
+       if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
                rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
 #endif
        if (dev->flags & IFF_PROMISC) {
@@ -3260,6 +3301,25 @@ bnx2_set_rx_mode(struct net_device *dev)
                sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
        }
 
+       uc_ptr = NULL;
+       if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
+               rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
+               sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+                            BNX2_RPM_SORT_USER0_PROM_VLAN;
+       } else if (!(dev->flags & IFF_PROMISC)) {
+               uc_ptr = dev->uc_list;
+
+               /* Add all entries into to the match filter list */
+               for (i = 0; i < dev->uc_count; i++) {
+                       bnx2_set_mac_addr(bp, uc_ptr->da_addr,
+                                         i + BNX2_START_UNICAST_ADDRESS_INDEX);
+                       sort_mode |= (1 <<
+                                     (i + BNX2_START_UNICAST_ADDRESS_INDEX));
+                       uc_ptr = uc_ptr->next;
+               }
+
+       }
+
        if (rx_mode != bp->rx_mode) {
                bp->rx_mode = rx_mode;
                REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
@@ -3544,7 +3604,7 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
                        bp->autoneg = autoneg;
                        bp->advertising = advertising;
 
-                       bnx2_set_mac_addr(bp);
+                       bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
 
                        val = REG_RD(bp, BNX2_EMAC_MODE);
 
@@ -3595,7 +3655,8 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
                }
 
                if (!(bp->flags & BNX2_FLAG_NO_WOL))
-                       bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
+                       bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
+                                    1, 0);
 
                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
                if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
@@ -4236,35 +4297,43 @@ nvram_write_end:
 }
 
 static void
-bnx2_init_remote_phy(struct bnx2 *bp)
+bnx2_init_fw_cap(struct bnx2 *bp)
 {
-       u32 val;
+       u32 val, sig = 0;
 
        bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
-       if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
-               return;
+       bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
+
+       if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
+               bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
 
        val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
        if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
                return;
 
-       if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
+       if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
+               bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
+               sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
+       }
+
+       if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+           (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
+               u32 link;
+
                bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
 
-               val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
-               if (val & BNX2_LINK_STATUS_SERDES_LINK)
+               link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
+               if (link & BNX2_LINK_STATUS_SERDES_LINK)
                        bp->phy_port = PORT_FIBRE;
                else
                        bp->phy_port = PORT_TP;
 
-               if (netif_running(bp->dev)) {
-                       u32 sig;
-
-                       sig = BNX2_DRV_ACK_CAP_SIGNATURE |
-                             BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
-                       bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
-               }
+               sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
+                      BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
        }
+
+       if (netif_running(bp->dev) && sig)
+               bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
 }
 
 static void
@@ -4294,7 +4363,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
        udelay(5);
 
        /* Wait for the firmware to tell us it is ok to issue a reset. */
-       bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
+       bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
 
        /* Deposit a driver reset signature so the firmware knows that
         * this is a soft reset. */
@@ -4355,13 +4424,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
        }
 
        /* Wait for the firmware to finish its initialization. */
-       rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
+       rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
        if (rc)
                return rc;
 
        spin_lock_bh(&bp->phy_lock);
        old_port = bp->phy_port;
-       bnx2_init_remote_phy(bp);
+       bnx2_init_fw_cap(bp);
        if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
            old_port != bp->phy_port)
                bnx2_set_default_remote_link(bp);
@@ -4445,7 +4514,7 @@ bnx2_init_chip(struct bnx2 *bp)
 
        bnx2_init_nvram(bp);
 
-       bnx2_set_mac_addr(bp);
+       bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
 
        val = REG_RD(bp, BNX2_MQ_CONFIG);
        val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
@@ -4531,15 +4600,25 @@ bnx2_init_chip(struct bnx2 *bp)
                      BNX2_HC_CONFIG_COLLECT_STATS;
        }
 
-       if (bp->flags & BNX2_FLAG_USING_MSIX) {
-               u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
-                          BNX2_HC_SB_CONFIG_1;
-
+       if (bp->irq_nvecs > 1) {
                REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
                       BNX2_HC_MSIX_BIT_VECTOR_VAL);
 
+               val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
+       }
+
+       if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
+               val |= BNX2_HC_CONFIG_ONE_SHOT;
+
+       REG_WR(bp, BNX2_HC_CONFIG, val);
+
+       for (i = 1; i < bp->irq_nvecs; i++) {
+               u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+                          BNX2_HC_SB_CONFIG_1;
+
                REG_WR(bp, base,
                        BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
+                       BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
                        BNX2_HC_SB_CONFIG_1_ONE_SHOT);
 
                REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
@@ -4549,13 +4628,13 @@ bnx2_init_chip(struct bnx2 *bp)
                REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
                        (bp->tx_ticks_int << 16) | bp->tx_ticks);
 
-               val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
-       }
-
-       if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
-               val |= BNX2_HC_CONFIG_ONE_SHOT;
+               REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
+                      (bp->rx_quick_cons_trip_int << 16) |
+                       bp->rx_quick_cons_trip);
 
-       REG_WR(bp, BNX2_HC_CONFIG, val);
+               REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
+                       (bp->rx_ticks_int << 16) | bp->rx_ticks);
+       }
 
        /* Clear internal stats counters. */
        REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
@@ -4571,7 +4650,7 @@ bnx2_init_chip(struct bnx2 *bp)
                REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
        }
        rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
-                         0);
+                         1, 0);
 
        REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
        REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
@@ -4726,7 +4805,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
                val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
-                      BNX2_L2CTX_RBDC_JUMBO_KEY);
+                      BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
 
                val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
                bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
@@ -4776,6 +4855,7 @@ static void
 bnx2_init_all_rings(struct bnx2 *bp)
 {
        int i;
+       u32 val;
 
        bnx2_clear_ring_states(bp);
 
@@ -4787,8 +4867,33 @@ bnx2_init_all_rings(struct bnx2 *bp)
                REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
                       (TX_TSS_CID << 7));
 
+       REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
+       bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
+
        for (i = 0; i < bp->num_rx_rings; i++)
                bnx2_init_rx_ring(bp, i);
+
+       if (bp->num_rx_rings > 1) {
+               u32 tbl_32;
+               u8 *tbl = (u8 *) &tbl_32;
+
+               bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
+                               BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
+
+               for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
+                       tbl[i % 4] = i % (bp->num_rx_rings - 1);
+                       if ((i % 4) == 3)
+                               bnx2_reg_wr_ind(bp,
+                                               BNX2_RXP_SCRATCH_RSS_TBL + i,
+                                               cpu_to_be32(tbl_32));
+               }
+
+               val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
+                     BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
+
+               REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
+
+       }
 }
 
 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
@@ -5493,7 +5598,7 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
        } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
                u32 bmcr;
 
-               bp->current_interval = bp->timer_interval;
+               bp->current_interval = BNX2_TIMER_INTERVAL;
 
                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 
@@ -5522,7 +5627,7 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
                        bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
                }
        } else
-               bp->current_interval = bp->timer_interval;
+               bp->current_interval = BNX2_TIMER_INTERVAL;
 
        if (check_link) {
                u32 val;
@@ -5567,11 +5672,11 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
                } else {
                        bnx2_disable_forced_2g5(bp);
                        bp->serdes_an_pending = 2;
-                       bp->current_interval = bp->timer_interval;
+                       bp->current_interval = BNX2_TIMER_INTERVAL;
                }
 
        } else
-               bp->current_interval = bp->timer_interval;
+               bp->current_interval = BNX2_TIMER_INTERVAL;
 
        spin_unlock(&bp->phy_lock);
 }
@@ -5611,7 +5716,6 @@ bnx2_restart_timer:
 static int
 bnx2_request_irq(struct bnx2 *bp)
 {
-       struct net_device *dev = bp->dev;
        unsigned long flags;
        struct bnx2_irq *irq;
        int rc = 0, i;
@@ -5624,7 +5728,7 @@ bnx2_request_irq(struct bnx2 *bp)
        for (i = 0; i < bp->irq_nvecs; i++) {
                irq = &bp->irq_tbl[i];
                rc = request_irq(irq->vector, irq->handler, flags, irq->name,
-                                dev);
+                                &bp->bnx2_napi[i]);
                if (rc)
                        break;
                irq->requested = 1;
@@ -5635,14 +5739,13 @@ bnx2_request_irq(struct bnx2 *bp)
 static void
 bnx2_free_irq(struct bnx2 *bp)
 {
-       struct net_device *dev = bp->dev;
        struct bnx2_irq *irq;
        int i;
 
        for (i = 0; i < bp->irq_nvecs; i++) {
                irq = &bp->irq_tbl[i];
                if (irq->requested)
-                       free_irq(irq->vector, dev);
+                       free_irq(irq->vector, &bp->bnx2_napi[i]);
                irq->requested = 0;
        }
        if (bp->flags & BNX2_FLAG_USING_MSI)
@@ -5654,7 +5757,7 @@ bnx2_free_irq(struct bnx2 *bp)
 }
 
 static void
-bnx2_enable_msix(struct bnx2 *bp)
+bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
 {
        int i, rc;
        struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
@@ -5669,17 +5772,14 @@ bnx2_enable_msix(struct bnx2 *bp)
                msix_ent[i].vector = 0;
 
                strcpy(bp->irq_tbl[i].name, bp->dev->name);
-               if (i == 0)
-                       bp->irq_tbl[i].handler = bnx2_msi_1shot;
-               else
-                       bp->irq_tbl[i].handler = bnx2_tx_msix;
+               bp->irq_tbl[i].handler = bnx2_msi_1shot;
        }
 
        rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
        if (rc != 0)
                return;
 
-       bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
+       bp->irq_nvecs = msix_vecs;
        bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
                bp->irq_tbl[i].vector = msix_ent[i].vector;
@@ -5688,13 +5788,16 @@ bnx2_enable_msix(struct bnx2 *bp)
 static void
 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
 {
+       int cpus = num_online_cpus();
+       int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
+
        bp->irq_tbl[0].handler = bnx2_interrupt;
        strcpy(bp->irq_tbl[0].name, bp->dev->name);
        bp->irq_nvecs = 1;
        bp->irq_tbl[0].vector = bp->pdev->irq;
 
-       if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
-               bnx2_enable_msix(bp);
+       if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
+               bnx2_enable_msix(bp, msix_vecs);
 
        if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
            !(bp->flags & BNX2_FLAG_USING_MSIX)) {
@@ -5709,8 +5812,11 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
                        bp->irq_tbl[0].vector = bp->pdev->irq;
                }
        }
-       bp->num_tx_rings = 1;
-       bp->num_rx_rings = 1;
+
+       bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
+       bp->dev->real_num_tx_queues = bp->num_tx_rings;
+
+       bp->num_rx_rings = bp->irq_nvecs;
 }
 
 /* Called with rtnl_lock */
@@ -5728,29 +5834,16 @@ bnx2_open(struct net_device *dev)
        bnx2_setup_int_mode(bp, disable_msi);
        bnx2_napi_enable(bp);
        rc = bnx2_alloc_mem(bp);
-       if (rc) {
-               bnx2_napi_disable(bp);
-               bnx2_free_mem(bp);
-               return rc;
-       }
+       if (rc)
+               goto open_err;
 
        rc = bnx2_request_irq(bp);
-
-       if (rc) {
-               bnx2_napi_disable(bp);
-               bnx2_free_mem(bp);
-               return rc;
-       }
+       if (rc)
+               goto open_err;
 
        rc = bnx2_init_nic(bp, 1);
-
-       if (rc) {
-               bnx2_napi_disable(bp);
-               bnx2_free_irq(bp);
-               bnx2_free_skbs(bp);
-               bnx2_free_mem(bp);
-               return rc;
-       }
+       if (rc)
+               goto open_err;
 
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 
@@ -5780,11 +5873,8 @@ bnx2_open(struct net_device *dev)
                                rc = bnx2_request_irq(bp);
 
                        if (rc) {
-                               bnx2_napi_disable(bp);
-                               bnx2_free_skbs(bp);
-                               bnx2_free_mem(bp);
                                del_timer_sync(&bp->timer);
-                               return rc;
+                               goto open_err;
                        }
                        bnx2_enable_int(bp);
                }
@@ -5794,9 +5884,16 @@ bnx2_open(struct net_device *dev)
        else if (bp->flags & BNX2_FLAG_USING_MSIX)
                printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
 
-       netif_start_queue(dev);
+       netif_tx_start_all_queues(dev);
 
        return 0;
+
+open_err:
+       bnx2_napi_disable(bp);
+       bnx2_free_skbs(bp);
+       bnx2_free_irq(bp);
+       bnx2_free_mem(bp);
+       return rc;
 }
 
 static void
@@ -5835,6 +5932,8 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
 
        bp->vlgrp = vlgrp;
        bnx2_set_rx_mode(dev);
+       if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
+               bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
 
        bnx2_netif_start(bp);
 }
@@ -5854,12 +5953,19 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        u32 len, vlan_tag_flags, last_frag, mss;
        u16 prod, ring_prod;
        int i;
-       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
-       struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+       struct bnx2_napi *bnapi;
+       struct bnx2_tx_ring_info *txr;
+       struct netdev_queue *txq;
+
+       /*  Determine which tx ring we will be placed on */
+       i = skb_get_queue_mapping(skb);
+       bnapi = &bp->bnx2_napi[i];
+       txr = &bnapi->tx_ring;
+       txq = netdev_get_tx_queue(dev, i);
 
        if (unlikely(bnx2_tx_avail(bp, txr) <
            (skb_shinfo(skb)->nr_frags + 1))) {
-               netif_stop_queue(dev);
+               netif_tx_stop_queue(txq);
                printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
                        dev->name);
 
@@ -5874,10 +5980,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
                vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
        }
 
+#ifdef BCM_VLAN
        if (bp->vlgrp && vlan_tx_tag_present(skb)) {
                vlan_tag_flags |=
                        (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
        }
+#endif
        if ((mss = skb_shinfo(skb)->gso_size)) {
                u32 tcp_opt_len, ip_tcp_len;
                struct iphdr *iph;
@@ -5974,9 +6082,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dev->trans_start = jiffies;
 
        if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
-               netif_stop_queue(dev);
+               netif_tx_stop_queue(txq);
                if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
-                       netif_wake_queue(dev);
+                       netif_tx_wake_queue(txq);
        }
 
        return NETDEV_TX_OK;
@@ -6179,6 +6287,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
            !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
                goto err_out_unlock;
 
+       /* If device is down, we can store the settings only if the user
+        * is setting the currently active port.
+        */
+       if (!netif_running(dev) && cmd->port != bp->phy_port)
+               goto err_out_unlock;
+
        if (cmd->autoneg == AUTONEG_ENABLE) {
                autoneg |= AUTONEG_SPEED;
 
@@ -6236,7 +6350,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        bp->req_line_speed = req_line_speed;
        bp->req_duplex = req_duplex;
 
-       err = bnx2_setup_phy(bp, cmd->port);
+       err = 0;
+       /* If device is down, the new settings will be picked up when it is
+        * brought up.
+        */
+       if (netif_running(dev))
+               err = bnx2_setup_phy(bp, cmd->port);
 
 err_out_unlock:
        spin_unlock_bh(&bp->phy_lock);
@@ -7035,7 +7154,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
 
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
        if (netif_running(dev))
-               bnx2_set_mac_addr(bp);
+               bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
 
        return 0;
 }
@@ -7210,7 +7329,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-       mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
+       mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
        dev->mem_end = dev->mem_start + mem_len;
        dev->irq = pdev->irq;
 
@@ -7357,7 +7476,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        reg &= BNX2_CONDITION_MFW_RUN_MASK;
        if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
            reg != BNX2_CONDITION_MFW_RUN_NONE) {
-               int i;
                u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
 
                bp->fw_version[j++] = ' ';
@@ -7396,8 +7514,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
 
-       bp->timer_interval =  HZ;
-       bp->current_interval =  HZ;
+       bp->current_interval = BNX2_TIMER_INTERVAL;
 
        bp->phy_addr = 1;
 
@@ -7428,8 +7545,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                        if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
                                bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
                }
-               bnx2_init_remote_phy(bp);
-
        } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
                   CHIP_NUM(bp) == CHIP_NUM_5708)
                bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
@@ -7438,6 +7553,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                  CHIP_REV(bp) == CHIP_REV_Bx))
                bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
 
+       bnx2_init_fw_cap(bp);
+
        if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
            (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
            (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
@@ -7487,7 +7604,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
 
        init_timer(&bp->timer);
-       bp->timer.expires = RUN_AT(bp->timer_interval);
+       bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
        bp->timer.data = (unsigned long) bp;
        bp->timer.function = bnx2_timer;
 
@@ -7542,7 +7659,7 @@ bnx2_init_napi(struct bnx2 *bp)
                if (i == 0)
                        poll = bnx2_poll;
                else
-                       poll = bnx2_tx_poll;
+                       poll = bnx2_poll_msix;
 
                netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
                bnapi->bp = bp;
@@ -7563,7 +7680,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                printk(KERN_INFO "%s", version);
 
        /* dev zeroed in init_etherdev */
-       dev = alloc_etherdev(sizeof(*bp));
+       dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
 
        if (!dev)
                return -ENOMEM;
@@ -7578,7 +7695,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->hard_start_xmit = bnx2_start_xmit;
        dev->stop = bnx2_close;
        dev->get_stats = bnx2_get_stats;
-       dev->set_multicast_list = bnx2_set_rx_mode;
+       dev->set_rx_mode = bnx2_set_rx_mode;
        dev->do_ioctl = bnx2_ioctl;
        dev->set_mac_address = bnx2_change_mac_addr;
        dev->change_mtu = bnx2_change_mtu;
@@ -7600,7 +7717,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        memcpy(dev->dev_addr, bp->mac_addr, 6);
        memcpy(dev->perm_addr, bp->mac_addr, 6);
-       bp->name = board_info[ent->driver_data].name;
 
        dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
        if (CHIP_NUM(bp) == CHIP_NUM_5709)
@@ -7627,7 +7743,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
                "IRQ %d, node addr %s\n",
                dev->name,
-               bp->name,
+               board_info[ent->driver_data].name,
                ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
                ((CHIP_ID(bp) & 0x0ff0) >> 4),
                bnx2_bus_string(bp, str),