get rid of insanity with namespace roots in tomoyo
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
index d478391..65df1de 100644 (file)
 #include <linux/cache.h>
 #include <linux/firmware.h>
 #include <linux/log2.h>
+#include <linux/list.h>
 
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "cnic_if.h"
+#endif
 #include "bnx2.h"
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME                "bnx2"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "2.0.0"
-#define DRV_MODULE_RELDATE     "April 2, 2009"
-#define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-4.6.16.fw"
-#define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-4.6.16.fw"
-#define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-4.6.17.fw"
-#define FW_RV2P_FILE_09                "bnx2/bnx2-rv2p-09-4.6.15.fw"
+#define DRV_MODULE_VERSION     "2.0.3"
+#define DRV_MODULE_RELDATE     "Dec 03, 2009"
+#define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-5.0.0.j3.fw"
+#define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
+#define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-5.0.0.j3.fw"
+#define FW_RV2P_FILE_09_Ax     "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
+#define FW_RV2P_FILE_09                "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
 
 #define RUN_AT(x) (jiffies + (x))
 
@@ -77,6 +83,7 @@ MODULE_FIRMWARE(FW_MIPS_FILE_06);
 MODULE_FIRMWARE(FW_RV2P_FILE_06);
 MODULE_FIRMWARE(FW_MIPS_FILE_09);
 MODULE_FIRMWARE(FW_RV2P_FILE_09);
+MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
 
 static int disable_msi = 0;
 
@@ -140,7 +147,7 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
        { 0, }
 };
 
-static struct flash_spec flash_table[] =
+static const struct flash_spec flash_table[] =
 {
 #define BUFFERED_FLAGS         (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
 #define NONBUFFERED_FLAGS      (BNX2_NV_WREN)
@@ -229,7 +236,7 @@ static struct flash_spec flash_table[] =
         "Buffered flash (256kB)"},
 };
 
-static struct flash_spec flash_5709 = {
+static const struct flash_spec flash_5709 = {
        .flags          = BNX2_NV_BUFFERED,
        .page_bits      = BCM5709_FLASH_PAGE_BITS,
        .page_size      = BCM5709_FLASH_PAGE_SIZE,
@@ -315,6 +322,160 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
        spin_unlock_bh(&bp->indirect_lock);
 }
 
+#ifdef BCM_CNIC
+static int
+bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
+{
+       struct bnx2 *bp = netdev_priv(dev);
+       struct drv_ctl_io *io = &info->data.io;
+
+       switch (info->cmd) {
+       case DRV_CTL_IO_WR_CMD:
+               bnx2_reg_wr_ind(bp, io->offset, io->data);
+               break;
+       case DRV_CTL_IO_RD_CMD:
+               io->data = bnx2_reg_rd_ind(bp, io->offset);
+               break;
+       case DRV_CTL_CTX_WR_CMD:
+               bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
+{
+       struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+       int sb_id;
+
+       if (bp->flags & BNX2_FLAG_USING_MSIX) {
+               cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+               bnapi->cnic_present = 0;
+               sb_id = bp->irq_nvecs;
+               cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+       } else {
+               cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+               bnapi->cnic_tag = bnapi->last_status_idx;
+               bnapi->cnic_present = 1;
+               sb_id = 0;
+               cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+       }
+
+       cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
+       cp->irq_arr[0].status_blk = (void *)
+               ((unsigned long) bnapi->status_blk.msi +
+               (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+       cp->irq_arr[0].status_blk_num = sb_id;
+       cp->num_irq = 1;
+}
+
+static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+                             void *data)
+{
+       struct bnx2 *bp = netdev_priv(dev);
+       struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+       if (ops == NULL)
+               return -EINVAL;
+
+       if (cp->drv_state & CNIC_DRV_STATE_REGD)
+               return -EBUSY;
+
+       bp->cnic_data = data;
+       rcu_assign_pointer(bp->cnic_ops, ops);
+
+       cp->num_irq = 0;
+       cp->drv_state = CNIC_DRV_STATE_REGD;
+
+       bnx2_setup_cnic_irq_info(bp);
+
+       return 0;
+}
+
+static int bnx2_unregister_cnic(struct net_device *dev)
+{
+       struct bnx2 *bp = netdev_priv(dev);
+       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+       struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+       mutex_lock(&bp->cnic_lock);
+       cp->drv_state = 0;
+       bnapi->cnic_present = 0;
+       rcu_assign_pointer(bp->cnic_ops, NULL);
+       mutex_unlock(&bp->cnic_lock);
+       synchronize_rcu();
+       return 0;
+}
+
+struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
+{
+       struct bnx2 *bp = netdev_priv(dev);
+       struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+       cp->drv_owner = THIS_MODULE;
+       cp->chip_id = bp->chip_id;
+       cp->pdev = bp->pdev;
+       cp->io_base = bp->regview;
+       cp->drv_ctl = bnx2_drv_ctl;
+       cp->drv_register_cnic = bnx2_register_cnic;
+       cp->drv_unregister_cnic = bnx2_unregister_cnic;
+
+       return cp;
+}
+EXPORT_SYMBOL(bnx2_cnic_probe);
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+       struct cnic_ops *c_ops;
+       struct cnic_ctl_info info;
+
+       mutex_lock(&bp->cnic_lock);
+       c_ops = bp->cnic_ops;
+       if (c_ops) {
+               info.cmd = CNIC_CTL_STOP_CMD;
+               c_ops->cnic_ctl(bp->cnic_data, &info);
+       }
+       mutex_unlock(&bp->cnic_lock);
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+       struct cnic_ops *c_ops;
+       struct cnic_ctl_info info;
+
+       mutex_lock(&bp->cnic_lock);
+       c_ops = bp->cnic_ops;
+       if (c_ops) {
+               if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
+                       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+                       bnapi->cnic_tag = bnapi->last_status_idx;
+               }
+               info.cmd = CNIC_CTL_START_CMD;
+               c_ops->cnic_ctl(bp->cnic_data, &info);
+       }
+       mutex_unlock(&bp->cnic_lock);
+}
+
+#else
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+}
+
+#endif
+
 static int
 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 {
@@ -462,6 +623,9 @@ bnx2_disable_int_sync(struct bnx2 *bp)
        int i;
 
        atomic_inc(&bp->intr_sem);
+       if (!netif_running(bp->dev))
+               return;
+
        bnx2_disable_int(bp);
        for (i = 0; i < bp->irq_nvecs; i++)
                synchronize_irq(bp->irq_tbl[i].vector);
@@ -488,12 +652,21 @@ bnx2_napi_enable(struct bnx2 *bp)
 static void
 bnx2_netif_stop(struct bnx2 *bp)
 {
-       bnx2_disable_int_sync(bp);
+       bnx2_cnic_stop(bp);
        if (netif_running(bp->dev)) {
+               int i;
+
                bnx2_napi_disable(bp);
                netif_tx_disable(bp->dev);
-               bp->dev->trans_start = jiffies; /* prevent tx timeout */
+               /* prevent tx timeout */
+               for (i = 0; i <  bp->dev->num_tx_queues; i++) {
+                       struct netdev_queue *txq;
+
+                       txq = netdev_get_tx_queue(bp->dev, i);
+                       txq->trans_start = jiffies;
+               }
        }
+       bnx2_disable_int_sync(bp);
 }
 
 static void
@@ -504,6 +677,7 @@ bnx2_netif_start(struct bnx2 *bp)
                        netif_tx_wake_all_queues(bp->dev);
                        bnx2_napi_enable(bp);
                        bnx2_enable_int(bp);
+                       bnx2_cnic_start(bp);
                }
        }
 }
@@ -545,8 +719,7 @@ bnx2_free_rx_mem(struct bnx2 *bp)
                                                    rxr->rx_desc_mapping[j]);
                        rxr->rx_desc_ring[j] = NULL;
                }
-               if (rxr->rx_buf_ring)
-                       vfree(rxr->rx_buf_ring);
+               vfree(rxr->rx_buf_ring);
                rxr->rx_buf_ring = NULL;
 
                for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -556,8 +729,7 @@ bnx2_free_rx_mem(struct bnx2 *bp)
                                                    rxr->rx_pg_desc_mapping[j]);
                        rxr->rx_pg_desc_ring[j] = NULL;
                }
-               if (rxr->rx_pg_ring)
-                       vfree(rxr->rx_pg_ring);
+               vfree(rxr->rx_pg_ring);
                rxr->rx_pg_ring = NULL;
        }
 }
@@ -1302,6 +1474,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
        } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
                bmcr |= BCM5708S_BMCR_FORCE_2500;
+       } else {
+               return;
        }
 
        if (bp->autoneg & AUTONEG_SPEED) {
@@ -1336,6 +1510,8 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
        } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
                bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
                bmcr &= ~BCM5708S_BMCR_FORCE_2500;
+       } else {
+               return;
        }
 
        if (bp->autoneg & AUTONEG_SPEED)
@@ -2600,6 +2776,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
        /* Tell compiler that status block fields can change. */
        barrier();
        cons = *bnapi->hw_tx_cons_ptr;
+       barrier();
        if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
                cons++;
        return cons;
@@ -2629,14 +2806,15 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                tx_buf = &txr->tx_buf_ring[sw_ring_cons];
                skb = tx_buf->skb;
 
+               /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
+               prefetch(&skb->end);
+
                /* partial BD completions possible with TSO packets */
-               if (skb_is_gso(skb)) {
+               if (tx_buf->is_gso) {
                        u16 last_idx, last_ring_idx;
 
-                       last_idx = sw_cons +
-                               skb_shinfo(skb)->nr_frags + 1;
-                       last_ring_idx = sw_ring_cons +
-                               skb_shinfo(skb)->nr_frags + 1;
+                       last_idx = sw_cons + tx_buf->nr_frags + 1;
+                       last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
                        if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
                                last_idx++;
                        }
@@ -2645,13 +2823,21 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                        }
                }
 
-               skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
+               pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+                       skb_headlen(skb), PCI_DMA_TODEVICE);
 
                tx_buf->skb = NULL;
-               last = skb_shinfo(skb)->nr_frags;
+               last = tx_buf->nr_frags;
 
                for (i = 0; i < last; i++) {
                        sw_cons = NEXT_TX_BD(sw_cons);
+
+                       pci_unmap_page(bp->pdev,
+                               pci_unmap_addr(
+                                       &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
+                                       mapping),
+                               skb_shinfo(skb)->frags[i].size,
+                               PCI_DMA_TODEVICE);
                }
 
                sw_cons = NEXT_TX_BD(sw_cons);
@@ -2661,7 +2847,8 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                if (tx_pkt == budget)
                        break;
 
-               hw_cons = bnx2_get_hw_tx_cons(bnapi);
+               if (hw_cons == sw_cons)
+                       hw_cons = bnx2_get_hw_tx_cons(bnapi);
        }
 
        txr->hw_tx_cons = hw_cons;
@@ -2879,6 +3066,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
        /* Tell compiler that status block fields can change. */
        barrier();
        cons = *bnapi->hw_rx_cons_ptr;
+       barrier();
        if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
                cons++;
        return cons;
@@ -3162,6 +3350,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
        if (bnx2_has_fast_work(bnapi))
                return 1;
 
+#ifdef BCM_CNIC
+       if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
+               return 1;
+#endif
+
        if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
            (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
                return 1;
@@ -3191,6 +3384,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
        bp->idle_chk_status_idx = bnapi->last_status_idx;
 }
 
+#ifdef BCM_CNIC
+static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+       struct cnic_ops *c_ops;
+
+       if (!bnapi->cnic_present)
+               return;
+
+       rcu_read_lock();
+       c_ops = rcu_dereference(bp->cnic_ops);
+       if (c_ops)
+               bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
+                                                     bnapi->status_blk.msi);
+       rcu_read_unlock();
+}
+#endif
+
 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
 {
        struct status_block *sblk = bnapi->status_blk.msi;
@@ -3265,6 +3475,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
 
                work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
 
+#ifdef BCM_CNIC
+               bnx2_poll_cnic(bp, bnapi);
+#endif
+
                /* bnapi->last_status_idx is used below to tell the hw how
                 * much work has been processed, so we must read it before
                 * checking for more work.
@@ -3306,7 +3520,7 @@ bnx2_set_rx_mode(struct net_device *dev)
 {
        struct bnx2 *bp = netdev_priv(dev);
        u32 rx_mode, sort_mode;
-       struct dev_addr_list *uc_ptr;
+       struct netdev_hw_addr *ha;
        int i;
 
        if (!netif_running(dev))
@@ -3365,21 +3579,19 @@ bnx2_set_rx_mode(struct net_device *dev)
                sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
        }
 
-       uc_ptr = NULL;
-       if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
+       if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
                rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
                sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
                             BNX2_RPM_SORT_USER0_PROM_VLAN;
        } else if (!(dev->flags & IFF_PROMISC)) {
-               uc_ptr = dev->uc_list;
-
                /* Add all entries into to the match filter list */
-               for (i = 0; i < dev->uc_count; i++) {
-                       bnx2_set_mac_addr(bp, uc_ptr->da_addr,
+               i = 0;
+               list_for_each_entry(ha, &dev->uc.list, list) {
+                       bnx2_set_mac_addr(bp, ha->addr,
                                          i + BNX2_START_UNICAST_ADDRESS_INDEX);
                        sort_mode |= (1 <<
                                      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
-                       uc_ptr = uc_ptr->next;
+                       i++;
                }
 
        }
@@ -3433,7 +3645,11 @@ bnx2_request_firmware(struct bnx2 *bp)
 
        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
                mips_fw_file = FW_MIPS_FILE_09;
-               rv2p_fw_file = FW_RV2P_FILE_09;
+               if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
+                   (CHIP_ID(bp) == CHIP_ID_5709_A1))
+                       rv2p_fw_file = FW_RV2P_FILE_09_Ax;
+               else
+                       rv2p_fw_file = FW_RV2P_FILE_09;
        } else {
                mips_fw_file = FW_MIPS_FILE_06;
                rv2p_fw_file = FW_RV2P_FILE_06;
@@ -4039,7 +4255,7 @@ bnx2_init_nvram(struct bnx2 *bp)
 {
        u32 val;
        int j, entry_count, rc = 0;
-       struct flash_spec *flash;
+       const struct flash_spec *flash;
 
        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
                bp->flash_info = &flash_5709;
@@ -4630,8 +4846,11 @@ bnx2_init_chip(struct bnx2 *bp)
        val = REG_RD(bp, BNX2_MQ_CONFIG);
        val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
        val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
-       if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
-               val |= BNX2_MQ_CONFIG_HALT_DIS;
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
+               if (CHIP_REV(bp) == CHIP_REV_Ax)
+                       val |= BNX2_MQ_CONFIG_HALT_DIS;
+       }
 
        REG_WR(bp, BNX2_MQ_CONFIG, val);
 
@@ -4670,6 +4889,7 @@ bnx2_init_chip(struct bnx2 *bp)
        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
        bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
 
+       memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
        for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
                bp->bnx2_napi[i].last_status_idx = 0;
 
@@ -4708,7 +4928,7 @@ bnx2_init_chip(struct bnx2 *bp)
        REG_WR(bp, BNX2_HC_CMD_TICKS,
               (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
 
-       if (CHIP_NUM(bp) == CHIP_NUM_5708)
+       if (bp->flags & BNX2_FLAG_BROKEN_STATS)
                REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
        else
                REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
@@ -4729,7 +4949,7 @@ bnx2_init_chip(struct bnx2 *bp)
        }
 
        if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
-               val |= BNX2_HC_CONFIG_ONE_SHOT;
+               val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
 
        REG_WR(bp, BNX2_HC_CONFIG, val);
 
@@ -4946,8 +5166,12 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
 
        ring_prod = prod = rxr->rx_pg_prod;
        for (i = 0; i < bp->rx_pg_ring_size; i++) {
-               if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
+               if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
+                       printk(KERN_WARNING PFX "%s: init'ed rx page ring %d "
+                                               "with %d/%d pages only\n",
+                              bp->dev->name, ring_num, i, bp->rx_pg_ring_size);
                        break;
+               }
                prod = NEXT_RX_BD(prod);
                ring_prod = RX_PG_RING_IDX(prod);
        }
@@ -4955,8 +5179,12 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
 
        ring_prod = prod = rxr->rx_prod;
        for (i = 0; i < bp->rx_ring_size; i++) {
-               if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
+               if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
+                       printk(KERN_WARNING PFX "%s: init'ed rx ring %d with "
+                                               "%d/%d skbs only\n",
+                              bp->dev->name, ring_num, i, bp->rx_ring_size);
                        break;
+               }
                prod = NEXT_RX_BD(prod);
                ring_prod = RX_RING_IDX(prod);
        }
@@ -5091,17 +5319,29 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                for (j = 0; j < TX_DESC_CNT; ) {
                        struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
                        struct sk_buff *skb = tx_buf->skb;
+                       int k, last;
 
                        if (skb == NULL) {
                                j++;
                                continue;
                        }
 
-                       skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
+                       pci_unmap_single(bp->pdev,
+                                        pci_unmap_addr(tx_buf, mapping),
+                                        skb_headlen(skb),
+                                        PCI_DMA_TODEVICE);
 
                        tx_buf->skb = NULL;
 
-                       j += skb_shinfo(skb)->nr_frags + 1;
+                       last = tx_buf->nr_frags;
+                       j++;
+                       for (k = 0; k < last; k++, j++) {
+                               tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
+                               pci_unmap_page(bp->pdev,
+                                       pci_unmap_addr(tx_buf, mapping),
+                                       skb_shinfo(skb)->frags[k].size,
+                                       PCI_DMA_TODEVICE);
+                       }
                        dev_kfree_skb(skb);
                }
        }
@@ -5480,11 +5720,12 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        for (i = 14; i < pkt_size; i++)
                packet[i] = (unsigned char) (i & 0xff);
 
-       if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
+       map = pci_map_single(bp->pdev, skb->data, pkt_size,
+               PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(bp->pdev, map)) {
                dev_kfree_skb(skb);
                return -EIO;
        }
-       map = skb_shinfo(skb)->dma_maps[0];
 
        REG_WR(bp, BNX2_HC_COMMAND,
               bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
@@ -5519,7 +5760,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
 
        udelay(5);
 
-       skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
+       pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
        dev_kfree_skb(skb);
 
        if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
@@ -5833,7 +6074,7 @@ bnx2_timer(unsigned long data)
                bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
 
        /* workaround occasional corrupted counters */
-       if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
+       if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
                REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
                                            BNX2_HC_COMMAND_STATS_NOW);
 
@@ -6038,8 +6279,11 @@ bnx2_reset_task(struct work_struct *work)
 {
        struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
 
-       if (!netif_running(bp->dev))
+       rtnl_lock();
+       if (!netif_running(bp->dev)) {
+               rtnl_unlock();
                return;
+       }
 
        bnx2_netif_stop(bp);
 
@@ -6047,6 +6291,28 @@ bnx2_reset_task(struct work_struct *work)
 
        atomic_set(&bp->intr_sem, 1);
        bnx2_netif_start(bp);
+       rtnl_unlock();
+}
+
+static void
+bnx2_dump_state(struct bnx2 *bp)
+{
+       struct net_device *dev = bp->dev;
+
+       printk(KERN_ERR PFX "%s DEBUG: intr_sem[%x]\n", dev->name,
+               atomic_read(&bp->intr_sem));
+       printk(KERN_ERR PFX "%s DEBUG: EMAC_TX_STATUS[%08x] "
+                           "RPM_MGMT_PKT_CTRL[%08x]\n", dev->name,
+               REG_RD(bp, BNX2_EMAC_TX_STATUS),
+               REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+       printk(KERN_ERR PFX "%s DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
+               dev->name, bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
+               bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
+       printk(KERN_ERR PFX "%s DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
+               dev->name, REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
+       if (bp->flags & BNX2_FLAG_USING_MSIX)
+               printk(KERN_ERR PFX "%s DEBUG: PBA[%08x]\n", dev->name,
+                       REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
 }
 
 static void
@@ -6054,6 +6320,8 @@ bnx2_tx_timeout(struct net_device *dev)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
+       bnx2_dump_state(bp);
+
        /* This allows the netif to be shutdown gracefully before resetting */
        schedule_work(&bp->reset_task);
 }
@@ -6065,9 +6333,14 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
-       bnx2_netif_stop(bp);
+       if (netif_running(dev))
+               bnx2_netif_stop(bp);
 
        bp->vlgrp = vlgrp;
+
+       if (!netif_running(dev))
+               return;
+
        bnx2_set_rx_mode(dev);
        if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
                bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
@@ -6080,7 +6353,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
  * netif_wake_queue().
  */
-static int
+static netdev_tx_t
 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct bnx2 *bp = netdev_priv(dev);
@@ -6093,7 +6366,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct bnx2_napi *bnapi;
        struct bnx2_tx_ring_info *txr;
        struct netdev_queue *txq;
-       struct skb_shared_info *sp;
 
        /*  Determine which tx ring we will be placed on */
        i = skb_get_queue_mapping(skb);
@@ -6158,16 +6430,15 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        } else
                mss = 0;
 
-       if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
+       mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(bp->pdev, mapping)) {
                dev_kfree_skb(skb);
                return NETDEV_TX_OK;
        }
 
-       sp = skb_shinfo(skb);
-       mapping = sp->dma_maps[0];
-
        tx_buf = &txr->tx_buf_ring[ring_prod];
        tx_buf->skb = skb;
+       pci_unmap_addr_set(tx_buf, mapping, mapping);
 
        txbd = &txr->tx_desc_ring[ring_prod];
 
@@ -6177,6 +6448,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
 
        last_frag = skb_shinfo(skb)->nr_frags;
+       tx_buf->nr_frags = last_frag;
+       tx_buf->is_gso = skb_is_gso(skb);
 
        for (i = 0; i < last_frag; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -6186,7 +6459,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
                txbd = &txr->tx_desc_ring[ring_prod];
 
                len = frag->size;
-               mapping = sp->dma_maps[i + 1];
+               mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
+                       len, PCI_DMA_TODEVICE);
+               if (pci_dma_mapping_error(bp->pdev, mapping))
+                       goto dma_error;
+               pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
+                                  mapping);
 
                txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
                txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -6205,7 +6483,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        mmiowb();
 
        txr->tx_prod = prod;
-       dev->trans_start = jiffies;
 
        if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
                netif_tx_stop_queue(txq);
@@ -6214,6 +6491,30 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        return NETDEV_TX_OK;
+dma_error:
+       /* save value of frag that failed */
+       last_frag = i;
+
+       /* start back at beginning and unmap skb */
+       prod = txr->tx_prod;
+       ring_prod = TX_RING_IDX(prod);
+       tx_buf = &txr->tx_buf_ring[ring_prod];
+       tx_buf->skb = NULL;
+       pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+                        skb_headlen(skb), PCI_DMA_TODEVICE);
+
+       /* unmap remaining mapped pages */
+       for (i = 0; i < last_frag; i++) {
+               prod = NEXT_TX_BD(prod);
+               ring_prod = TX_RING_IDX(prod);
+               tx_buf = &txr->tx_buf_ring[ring_prod];
+               pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+                              skb_shinfo(skb)->frags[i].size,
+                              PCI_DMA_TODEVICE);
+       }
+
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 
 /* Called with rtnl_lock */
@@ -6287,7 +6588,8 @@ bnx2_get_stats(struct net_device *dev)
                stats_blk->stat_EtherStatsOverrsizePkts);
 
        net_stats->rx_over_errors =
-               (unsigned long) stats_blk->stat_IfInMBUFDiscards;
+               (unsigned long) (stats_blk->stat_IfInFTQDiscards +
+               stats_blk->stat_IfInMBUFDiscards);
 
        net_stats->rx_frame_errors =
                (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
@@ -6320,8 +6622,8 @@ bnx2_get_stats(struct net_device *dev)
                net_stats->tx_carrier_errors;
 
        net_stats->rx_missed_errors =
-               (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
-               stats_blk->stat_FwRxDrop);
+               (unsigned long) (stats_blk->stat_IfInFTQDiscards +
+               stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
 
        return net_stats;
 }
@@ -6636,6 +6938,14 @@ bnx2_nway_reset(struct net_device *dev)
        return 0;
 }
 
+static u32
+bnx2_get_link(struct net_device *dev)
+{
+       struct bnx2 *bp = netdev_priv(dev);
+
+       return bp->link_up;
+}
+
 static int
 bnx2_get_eeprom_len(struct net_device *dev)
 {
@@ -6735,7 +7045,7 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
                0xff;
 
        bp->stats_ticks = coal->stats_block_coalesce_usecs;
-       if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+       if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
                if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
                        bp->stats_ticks = USEC_PER_SEC;
        }
@@ -6786,9 +7096,14 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
                int rc;
 
                rc = bnx2_alloc_mem(bp);
-               if (rc)
+               if (!rc)
+                       rc = bnx2_init_nic(bp, 0);
+
+               if (rc) {
+                       bnx2_napi_enable(bp);
+                       dev_close(bp->dev);
                        return rc;
-               bnx2_init_nic(bp, 0);
+               }
                bnx2_netif_start(bp);
        }
        return 0;
@@ -6879,11 +7194,9 @@ bnx2_set_tso(struct net_device *dev, u32 data)
        return 0;
 }
 
-#define BNX2_NUM_STATS 46
-
 static struct {
        char string[ETH_GSTRING_LEN];
-} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
+} bnx2_stats_str_arr[] = {
        { "rx_bytes" },
        { "rx_error_bytes" },
        { "tx_bytes" },
@@ -6928,10 +7241,14 @@ static struct {
        { "tx_xoff_frames" },
        { "rx_mac_ctrl_frames" },
        { "rx_filtered_packets" },
+       { "rx_ftq_discards" },
        { "rx_discards" },
        { "rx_fw_discards" },
 };
 
+#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
+                       sizeof(bnx2_stats_str_arr[0]))
+
 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
 
 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
@@ -6979,6 +7296,7 @@ static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
     STATS_OFFSET32(stat_OutXoffSent),
     STATS_OFFSET32(stat_MacControlFramesReceived),
     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
+    STATS_OFFSET32(stat_IfInFTQDiscards),
     STATS_OFFSET32(stat_IfInMBUFDiscards),
     STATS_OFFSET32(stat_FwRxDrop),
 };
@@ -6991,7 +7309,7 @@ static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
        4,0,4,4,4,4,4,4,4,4,
        4,4,4,4,4,4,4,4,4,4,
        4,4,4,4,4,4,4,4,4,4,
-       4,4,4,4,4,4,
+       4,4,4,4,4,4,4,
 };
 
 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
@@ -6999,7 +7317,7 @@ static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
        4,4,4,4,4,4,4,4,4,4,
        4,4,4,4,4,4,4,4,4,4,
        4,4,4,4,4,4,4,4,4,4,
-       4,4,4,4,4,4,
+       4,4,4,4,4,4,4,
 };
 
 #define BNX2_NUM_TESTS 6
@@ -7203,7 +7521,7 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
        .get_wol                = bnx2_get_wol,
        .set_wol                = bnx2_set_wol,
        .nway_reset             = bnx2_nway_reset,
-       .get_link               = ethtool_op_get_link,
+       .get_link               = bnx2_get_link,
        .get_eeprom_len         = bnx2_get_eeprom_len,
        .get_eeprom             = bnx2_get_eeprom,
        .set_eeprom             = bnx2_set_eeprom,
@@ -7257,9 +7575,6 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        }
 
        case SIOCSMIIREG:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-
                if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
                        return -EOPNOTSUPP;
 
@@ -7415,6 +7730,86 @@ bnx2_get_pci_speed(struct bnx2 *bp)
 
 }
 
+static void __devinit
+bnx2_read_vpd_fw_ver(struct bnx2 *bp)
+{
+       int rc, i, v0_len = 0;
+       u8 *data;
+       u8 *v0_str = NULL;
+       bool mn_match = false;
+
+#define BNX2_VPD_NVRAM_OFFSET  0x300
+#define BNX2_VPD_LEN           128
+#define BNX2_MAX_VER_SLEN      30
+
+       data = kmalloc(256, GFP_KERNEL);
+       if (!data)
+               return;
+
+       rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
+                            BNX2_VPD_LEN);
+       if (rc)
+               goto vpd_done;
+
+       for (i = 0; i < BNX2_VPD_LEN; i += 4) {
+               data[i] = data[i + BNX2_VPD_LEN + 3];
+               data[i + 1] = data[i + BNX2_VPD_LEN + 2];
+               data[i + 2] = data[i + BNX2_VPD_LEN + 1];
+               data[i + 3] = data[i + BNX2_VPD_LEN];
+       }
+
+       for (i = 0; i <= BNX2_VPD_LEN - 3; ) {
+               unsigned char val = data[i];
+               unsigned int block_end;
+
+               if (val == 0x82 || val == 0x91) {
+                       i = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
+                       continue;
+               }
+
+               if (val != 0x90)
+                       goto vpd_done;
+
+               block_end = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
+               i += 3;
+
+               if (block_end > BNX2_VPD_LEN)
+                       goto vpd_done;
+
+               while (i < (block_end - 2)) {
+                       int len = data[i + 2];
+
+                       if (i + 3 + len > block_end)
+                               goto vpd_done;
+
+                       if (data[i] == 'M' && data[i + 1] == 'N') {
+                               if (len != 4 ||
+                                   memcmp(&data[i + 3], "1028", 4))
+                                       goto vpd_done;
+                               mn_match = true;
+
+                       } else if (data[i] == 'V' && data[i + 1] == '0') {
+                               if (len > BNX2_MAX_VER_SLEN)
+                                       goto vpd_done;
+
+                               v0_len = len;
+                               v0_str = &data[i + 3];
+                       }
+                       i += 3 + len;
+
+                       if (mn_match && v0_str) {
+                               memcpy(bp->fw_version, v0_str, v0_len);
+                               bp->fw_version[v0_len] = ' ';
+                               goto vpd_done;
+                       }
+               }
+               goto vpd_done;
+       }
+
+vpd_done:
+       kfree(data);
+}
+
 static int __devinit
 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 {
@@ -7466,10 +7861,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        spin_lock_init(&bp->phy_lock);
        spin_lock_init(&bp->indirect_lock);
+#ifdef BCM_CNIC
+       mutex_init(&bp->cnic_lock);
+#endif
        INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-       mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
+       mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
        dev->mem_end = dev->mem_start + mem_len;
        dev->irq = pdev->irq;
 
@@ -7511,6 +7909,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                        rc = -EIO;
                        goto err_out_unmap;
                }
+               bp->flags |= BNX2_FLAG_BROKEN_STATS;
        }
 
        if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
@@ -7584,10 +7983,18 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                goto err_out_unmap;
        }
 
+       bnx2_read_vpd_fw_ver(bp);
+
+       j = strlen(bp->fw_version);
        reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
-       for (i = 0, j = 0; i < 3; i++) {
+       for (i = 0; i < 3 && j < 24; i++) {
                u8 num, k, skip0;
 
+               if (i == 0) {
+                       bp->fw_version[j++] = 'b';
+                       bp->fw_version[j++] = 'c';
+                       bp->fw_version[j++] = ' ';
+               }
                num = (u8) (reg >> (24 - (i * 8)));
                for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
                        if (num >= k || !skip0 || k == 1) {
@@ -7618,8 +8025,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
            reg != BNX2_CONDITION_MFW_RUN_NONE) {
                u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
 
-               bp->fw_version[j++] = ' ';
-               for (i = 0; i < 3; i++) {
+               if (j < 32)
+                       bp->fw_version[j++] = ' ';
+               for (i = 0; i < 3 && j < 28; i++) {
                        reg = bnx2_reg_rd_ind(bp, addr + i * 4);
                        reg = swab32(reg);
                        memcpy(&bp->fw_version[j], &reg, 4);
@@ -7642,13 +8050,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        bp->rx_csum = 1;
 
-       bp->tx_quick_cons_trip_int = 20;
+       bp->tx_quick_cons_trip_int = 2;
        bp->tx_quick_cons_trip = 20;
-       bp->tx_ticks_int = 80;
+       bp->tx_ticks_int = 18;
        bp->tx_ticks = 80;
 
-       bp->rx_quick_cons_trip_int = 6;
-       bp->rx_quick_cons_trip = 6;
+       bp->rx_quick_cons_trip_int = 2;
+       bp->rx_quick_cons_trip = 12;
        bp->rx_ticks_int = 18;
        bp->rx_ticks = 18;
 
@@ -7826,6 +8234,13 @@ static const struct net_device_ops bnx2_netdev_ops = {
 #endif
 };
 
+static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
+{
+#ifdef BCM_VLAN
+       dev->vlan_features |= flags;
+#endif
+}
+
 static int __devinit
 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -7867,16 +8282,20 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        memcpy(dev->perm_addr, bp->mac_addr, 6);
 
        dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
-       if (CHIP_NUM(bp) == CHIP_NUM_5709)
+       vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
                dev->features |= NETIF_F_IPV6_CSUM;
-
+               vlan_features_add(dev, NETIF_F_IPV6_CSUM);
+       }
 #ifdef BCM_VLAN
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 #endif
        dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
-       if (CHIP_NUM(bp) == CHIP_NUM_5709)
+       vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
                dev->features |= NETIF_F_TSO6;
-
+               vlan_features_add(dev, NETIF_F_TSO6);
+       }
        if ((rc = register_netdev(dev))) {
                dev_err(&pdev->dev, "Cannot register net device\n");
                goto error;
@@ -7991,6 +8410,11 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
        rtnl_lock();
        netif_device_detach(dev);
 
+       if (state == pci_channel_io_perm_failure) {
+               rtnl_unlock();
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
        if (netif_running(dev)) {
                bnx2_netif_stop(bp);
                del_timer_sync(&bp->timer);
@@ -8024,6 +8448,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
        }
        pci_set_master(pdev);
        pci_restore_state(pdev);
+       pci_save_state(pdev);
 
        if (netif_running(dev)) {
                bnx2_set_power_state(bp, PCI_D0);