[BNX2]: Add 5709 init code.
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
index c6510e1..baad015 100644 (file)
@@ -9,7 +9,6 @@
  * Written by: Michael Chan  (mchan@broadcom.com)
  */
 
-#include <linux/config.h>
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -32,6 +31,7 @@
 #include <asm/irq.h>
 #include <linux/delay.h>
 #include <asm/byteorder.h>
+#include <asm/page.h>
 #include <linux/time.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
@@ -56,8 +56,8 @@
 
 #define DRV_MODULE_NAME                "bnx2"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "1.4.40"
-#define DRV_MODULE_RELDATE     "May 22, 2006"
+#define DRV_MODULE_VERSION     "1.4.45"
+#define DRV_MODULE_RELDATE     "September 29, 2006"
 
 #define RUN_AT(x) (jiffies + (x))
 
@@ -148,7 +148,7 @@ static struct flash_spec flash_table[] =
         SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
         "Entry 0100"},
        /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
-       {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,        
+       {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
         0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
         ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
         "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 
 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
 {
-       u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
+       u32 diff;
 
+       smp_mb();
+       diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
        if (diff > MAX_TX_DESC_CNT)
                diff = (diff & MAX_TX_DESC_CNT) - 1;
        return (bp->tx_ring_size - diff);
@@ -234,8 +236,23 @@ static void
 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
 {
        offset += cid_addr;
-       REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
-       REG_WR(bp, BNX2_CTX_DATA, val);
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               int i;
+
+               REG_WR(bp, BNX2_CTX_CTX_DATA, val);
+               REG_WR(bp, BNX2_CTX_CTX_CTRL,
+                      offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
+               for (i = 0; i < 5; i++) {
+                       u32 val;
+                       val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
+                       if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
+                               break;
+                       udelay(5);
+               }
+       } else {
+               REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
+               REG_WR(bp, BNX2_CTX_DATA, val);
+       }
 }
 
 static int
@@ -315,7 +332,7 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
                BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
                BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
        REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
-    
+
        for (i = 0; i < 50; i++) {
                udelay(10);
 
@@ -401,6 +418,14 @@ bnx2_free_mem(struct bnx2 *bp)
 {
        int i;
 
+       for (i = 0; i < bp->ctx_pages; i++) {
+               if (bp->ctx_blk[i]) {
+                       pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
+                                           bp->ctx_blk[i],
+                                           bp->ctx_blk_mapping[i]);
+                       bp->ctx_blk[i] = NULL;
+               }
+       }
        if (bp->status_blk) {
                pci_free_consistent(bp->pdev, bp->status_stats_size,
                                    bp->status_blk, bp->status_blk_mapping);
@@ -479,6 +504,18 @@ bnx2_alloc_mem(struct bnx2 *bp)
 
        bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
 
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
+               if (bp->ctx_pages == 0)
+                       bp->ctx_pages = 1;
+               for (i = 0; i < bp->ctx_pages; i++) {
+                       bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
+                                               BCM_PAGE_SIZE,
+                                               &bp->ctx_blk_mapping[i]);
+                       if (bp->ctx_blk[i] == NULL)
+                               goto alloc_mem_err;
+               }
+       }
        return 0;
 
 alloc_mem_err:
@@ -583,7 +620,7 @@ bnx2_resolve_flow_ctrl(struct bnx2 *bp)
        u32 local_adv, remote_adv;
 
        bp->flow_ctrl = 0;
-       if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != 
+       if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
                (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
 
                if (bp->duplex == DUPLEX_FULL) {
@@ -801,13 +838,13 @@ bnx2_set_mac_link(struct bnx2 *bp)
 
        val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
                BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
-               BNX2_EMAC_MODE_25G);
+               BNX2_EMAC_MODE_25G_MODE);
 
        if (bp->link_up) {
                switch (bp->line_speed) {
                        case SPEED_10:
-                               if (CHIP_NUM(bp) == CHIP_NUM_5708) {
-                                       val |= BNX2_EMAC_MODE_PORT_MII_10;
+                               if (CHIP_NUM(bp) != CHIP_NUM_5706) {
+                                       val |= BNX2_EMAC_MODE_PORT_MII_10M;
                                        break;
                                }
                                /* fall through */
@@ -815,7 +852,7 @@ bnx2_set_mac_link(struct bnx2 *bp)
                                val |= BNX2_EMAC_MODE_PORT_MII;
                                break;
                        case SPEED_2500:
-                               val |= BNX2_EMAC_MODE_25G;
+                               val |= BNX2_EMAC_MODE_25G_MODE;
                                /* fall through */
                        case SPEED_1000:
                                val |= BNX2_EMAC_MODE_PORT_GMII;
@@ -858,7 +895,7 @@ bnx2_set_link(struct bnx2 *bp)
        u32 bmsr;
        u8 link_up;
 
-       if (bp->loopback == MAC_LOOPBACK) {
+       if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
                bp->link_up = 1;
                return 0;
        }
@@ -900,6 +937,7 @@ bnx2_set_link(struct bnx2 *bp)
                        u32 bmcr;
 
                        bnx2_read_phy(bp, MII_BMCR, &bmcr);
+                       bmcr &= ~BCM5708S_BMCR_FORCE_2500;
                        if (!(bmcr & BMCR_ANENABLE)) {
                                bnx2_write_phy(bp, MII_BMCR, bmcr |
                                        BMCR_ANENABLE);
@@ -986,7 +1024,21 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
                u32 new_bmcr;
                int force_link_down = 0;
 
-               if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+               bnx2_read_phy(bp, MII_ADVERTISE, &adv);
+               adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
+
+               bnx2_read_phy(bp, MII_BMCR, &bmcr);
+               new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
+               new_bmcr |= BMCR_SPEED1000;
+               if (bp->req_line_speed == SPEED_2500) {
+                       new_bmcr |= BCM5708S_BMCR_FORCE_2500;
+                       bnx2_read_phy(bp, BCM5708S_UP1, &up1);
+                       if (!(up1 & BCM5708S_UP1_2G5)) {
+                               up1 |= BCM5708S_UP1_2G5;
+                               bnx2_write_phy(bp, BCM5708S_UP1, up1);
+                               force_link_down = 1;
+                       }
+               } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
                        bnx2_read_phy(bp, BCM5708S_UP1, &up1);
                        if (up1 & BCM5708S_UP1_2G5) {
                                up1 &= ~BCM5708S_UP1_2G5;
@@ -995,12 +1047,6 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
                        }
                }
 
-               bnx2_read_phy(bp, MII_ADVERTISE, &adv);
-               adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
-
-               bnx2_read_phy(bp, MII_BMCR, &bmcr);
-               new_bmcr = bmcr & ~BMCR_ANENABLE;
-               new_bmcr |= BMCR_SPEED1000;
                if (bp->req_duplex == DUPLEX_FULL) {
                        adv |= ADVERTISE_1000XFULL;
                        new_bmcr |= BMCR_FULLDPLX;
@@ -1021,6 +1067,7 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
                                bp->link_up = 0;
                                netif_carrier_off(bp->dev);
                                bnx2_write_phy(bp, MII_BMCR, new_bmcr);
+                               bnx2_report_link(bp);
                        }
                        bnx2_write_phy(bp, MII_ADVERTISE, adv);
                        bnx2_write_phy(bp, MII_BMCR, new_bmcr);
@@ -1046,30 +1093,26 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
        if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
                /* Force a link down visible on the other side */
                if (bp->link_up) {
-                       int i;
-
                        bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
-                       for (i = 0; i < 110; i++) {
-                               udelay(100);
-                       }
+                       spin_unlock_bh(&bp->phy_lock);
+                       msleep(20);
+                       spin_lock_bh(&bp->phy_lock);
                }
 
                bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
                bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
                        BMCR_ANENABLE);
-               if (CHIP_NUM(bp) == CHIP_NUM_5706) {
-                       /* Speed up link-up time when the link partner
-                        * does not autonegotiate which is very common
-                        * in blade servers. Some blade servers use
-                        * IPMI for kerboard input and it's important
-                        * to minimize link disruptions. Autoneg. involves
-                        * exchanging base pages plus 3 next pages and
-                        * normally completes in about 120 msec.
-                        */
-                       bp->current_interval = SERDES_AN_TIMEOUT;
-                       bp->serdes_an_pending = 1;
-                       mod_timer(&bp->timer, jiffies + bp->current_interval);
-               }
+               /* Speed up link-up time when the link partner
+                * does not autonegotiate which is very common
+                * in blade servers. Some blade servers use
+                * IPMI for kerboard input and it's important
+                * to minimize link disruptions. Autoneg. involves
+                * exchanging base pages plus 3 next pages and
+                * normally completes in about 120 msec.
+                */
+               bp->current_interval = SERDES_AN_TIMEOUT;
+               bp->serdes_an_pending = 1;
+               mod_timer(&bp->timer, jiffies + bp->current_interval);
        }
 
        return 0;
@@ -1085,7 +1128,7 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
 
 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
        ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
-       
+
 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
 
 static int
@@ -1118,7 +1161,7 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
                        new_adv_reg |= ADVERTISE_100FULL;
                if (bp->advertising & ADVERTISED_1000baseT_Full)
                        new_adv1000_reg |= ADVERTISE_1000FULL;
-               
+
                new_adv_reg |= ADVERTISE_CSMA;
 
                new_adv_reg |= bnx2_phy_get_pause_adv(bp);
@@ -1151,20 +1194,19 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
        }
        if (new_bmcr != bmcr) {
                u32 bmsr;
-               int i = 0;
 
                bnx2_read_phy(bp, MII_BMSR, &bmsr);
                bnx2_read_phy(bp, MII_BMSR, &bmsr);
-               
+
                if (bmsr & BMSR_LSTATUS) {
                        /* Force link down */
                        bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
-                       do {
-                               udelay(100);
-                               bnx2_read_phy(bp, MII_BMSR, &bmsr);
-                               bnx2_read_phy(bp, MII_BMSR, &bmsr);
-                               i++;
-                       } while ((bmsr & BMSR_LSTATUS) && (i < 620));
+                       spin_unlock_bh(&bp->phy_lock);
+                       msleep(50);
+                       spin_lock_bh(&bp->phy_lock);
+
+                       bnx2_read_phy(bp, MII_BMSR, &bmsr);
+                       bnx2_read_phy(bp, MII_BMSR, &bmsr);
                }
 
                bnx2_write_phy(bp, MII_BMCR, new_bmcr);
@@ -1256,9 +1298,8 @@ bnx2_init_5706s_phy(struct bnx2 *bp)
 {
        bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
 
-       if (CHIP_NUM(bp) == CHIP_NUM_5706) {
-               REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
-       }
+       if (CHIP_NUM(bp) == CHIP_NUM_5706)
+               REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
 
        if (bp->dev->mtu > 1500) {
                u32 val;
@@ -1395,13 +1436,13 @@ bnx2_set_phy_loopback(struct bnx2 *bp)
        for (i = 0; i < 10; i++) {
                if (bnx2_test_link(bp) == 0)
                        break;
-               udelay(10);
+               msleep(100);
        }
 
        mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
        mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
                      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
-                     BNX2_EMAC_MODE_25G);
+                     BNX2_EMAC_MODE_25G_MODE);
 
        mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
        REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
@@ -1452,6 +1493,40 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
        return 0;
 }
 
+static int
+bnx2_init_5709_context(struct bnx2 *bp)
+{
+       int i, ret = 0;
+       u32 val;
+
+       val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
+       val |= (BCM_PAGE_BITS - 8) << 16;
+       REG_WR(bp, BNX2_CTX_COMMAND, val);
+       for (i = 0; i < bp->ctx_pages; i++) {
+               int j;
+
+               REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+                      (bp->ctx_blk_mapping[i] & 0xffffffff) |
+                      BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
+               REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+                      (u64) bp->ctx_blk_mapping[i] >> 32);
+               REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
+                      BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+               for (j = 0; j < 10; j++) {
+
+                       val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+                       if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+                               break;
+                       udelay(5);
+               }
+               if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+                       ret = -EBUSY;
+                       break;
+               }
+       }
+       return ret;
+}
+
 static void
 bnx2_init_context(struct bnx2 *bp)
 {
@@ -1545,7 +1620,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
 }
 
 static void
-bnx2_set_mac_addr(struct bnx2 *bp) 
+bnx2_set_mac_addr(struct bnx2 *bp)
 {
        u32 val;
        u8 *mac_addr = bp->dev->dev_addr;
@@ -1554,7 +1629,7 @@ bnx2_set_mac_addr(struct bnx2 *bp)
 
        REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
 
-       val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 
+       val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
                (mac_addr[4] << 8) | mac_addr[5];
 
        REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
@@ -1569,16 +1644,14 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
        struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
        unsigned long align;
 
-       skb = dev_alloc_skb(bp->rx_buf_size);
+       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
        if (skb == NULL) {
                return -ENOMEM;
        }
 
-       if (unlikely((align = (unsigned long) skb->data & 0x7))) {
-               skb_reserve(skb, 8 - align);
-       }
+       if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
+               skb_reserve(skb, BNX2_RX_ALIGN - align);
 
-       skb->dev = bp->dev;
        mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
                PCI_DMA_FROMDEVICE);
 
@@ -1637,9 +1710,9 @@ bnx2_tx_int(struct bnx2 *bp)
 
                tx_buf = &bp->tx_buf_ring[sw_ring_cons];
                skb = tx_buf->skb;
-#ifdef BCM_TSO 
+#ifdef BCM_TSO
                /* partial BD completions possible with TSO packets */
-               if (skb_shinfo(skb)->tso_size) {
+               if (skb_is_gso(skb)) {
                        u16 last_idx, last_ring_idx;
 
                        last_idx = sw_cons +
@@ -1675,7 +1748,7 @@ bnx2_tx_int(struct bnx2 *bp)
 
                tx_free_bd += last + 1;
 
-               dev_kfree_skb_irq(skb);
+               dev_kfree_skb(skb);
 
                hw_cons = bp->hw_tx_cons =
                        sblk->status_tx_quick_consumer_index0;
@@ -1686,15 +1759,20 @@ bnx2_tx_int(struct bnx2 *bp)
        }
 
        bp->tx_cons = sw_cons;
+       /* Need to make the tx_cons update visible to bnx2_start_xmit()
+        * before checking for netif_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that bnx2_start_xmit()
+        * will miss it and cause the queue to be stopped forever.
+        */
+       smp_mb();
 
-       if (unlikely(netif_queue_stopped(bp->dev))) {
-               spin_lock(&bp->tx_lock);
+       if (unlikely(netif_queue_stopped(bp->dev)) &&
+                    (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
+               netif_tx_lock(bp->dev);
                if ((netif_queue_stopped(bp->dev)) &&
-                   (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
-
+                   (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
                        netif_wake_queue(bp->dev);
-               }
-               spin_unlock(&bp->tx_lock);
+               netif_tx_unlock(bp->dev);
        }
 }
 
@@ -1786,7 +1864,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
                if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
                        struct sk_buff *new_skb;
 
-                       new_skb = dev_alloc_skb(len + 2);
+                       new_skb = netdev_alloc_skb(bp->dev, len + 2);
                        if (new_skb == NULL)
                                goto reuse_rx;
 
@@ -1797,7 +1875,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
 
                        skb_reserve(new_skb, 2);
                        skb_put(new_skb, len);
-                       new_skb->dev = bp->dev;
 
                        bnx2_reuse_rx_skb(bp, skb,
                                sw_ring_cons, sw_ring_prod);
@@ -1823,7 +1900,7 @@ reuse_rx:
                if ((len > (bp->dev->mtu + ETH_HLEN)) &&
                        (ntohs(skb->protocol) != 0x8100)) {
 
-                       dev_kfree_skb_irq(skb);
+                       dev_kfree_skb(skb);
                        goto next_rx;
 
                }
@@ -1883,7 +1960,7 @@ next_rx:
  * is that the MSI interrupt is always serviced.
  */
 static irqreturn_t
-bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
+bnx2_msi(int irq, void *dev_instance)
 {
        struct net_device *dev = dev_instance;
        struct bnx2 *bp = netdev_priv(dev);
@@ -1903,7 +1980,7 @@ bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
 }
 
 static irqreturn_t
-bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+bnx2_interrupt(int irq, void *dev_instance)
 {
        struct net_device *dev = dev_instance;
        struct bnx2 *bp = netdev_priv(dev);
@@ -1979,12 +2056,12 @@ bnx2_poll(struct net_device *dev, int *budget)
 
                if (orig_budget > dev->quota)
                        orig_budget = dev->quota;
-               
+
                work_done = bnx2_rx_int(bp, orig_budget);
                *budget -= work_done;
                dev->quota -= work_done;
        }
-       
+
        bp->last_status_idx = bp->status_blk->status_idx;
        rmb();
 
@@ -2035,7 +2112,8 @@ bnx2_set_rx_mode(struct net_device *dev)
        if (dev->flags & IFF_PROMISC) {
                /* Promiscuous mode. */
                rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
-               sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
+               sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+                            BNX2_RPM_SORT_USER0_PROM_VLAN;
        }
        else if (dev->flags & IFF_ALLMULTI) {
                for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
@@ -2203,11 +2281,12 @@ load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
        }
 }
 
-static void
+static int
 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
 {
        u32 offset;
        u32 val;
+       int rc;
 
        /* Halt the CPU. */
        val = REG_RD_IND(bp, cpu_reg->mode);
@@ -2217,7 +2296,18 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
 
        /* Load the Text area. */
        offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
-       if (fw->text) {
+       if (fw->gz_text) {
+               u32 text_len;
+               void *text;
+
+               rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
+                                &text_len);
+               if (rc)
+                       return rc;
+
+               fw->text = text;
+       }
+       if (fw->gz_text) {
                int j;
 
                for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
@@ -2275,13 +2365,15 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
        val &= ~cpu_reg->mode_value_halt;
        REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
        REG_WR_IND(bp, cpu_reg->mode, val);
+
+       return 0;
 }
 
 static int
 bnx2_init_cpus(struct bnx2 *bp)
 {
        struct cpu_reg cpu_reg;
-       struct fw_info fw;
+       struct fw_info *fw;
        int rc = 0;
        void *text;
        u32 text_len;
@@ -2317,44 +2409,12 @@ bnx2_init_cpus(struct bnx2 *bp)
        cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
        cpu_reg.spad_base = BNX2_RXP_SCRATCH;
        cpu_reg.mips_view_base = 0x8000000;
-    
-       fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
-       fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
-       fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
-       fw.start_addr = bnx2_RXP_b06FwStartAddr;
-
-       fw.text_addr = bnx2_RXP_b06FwTextAddr;
-       fw.text_len = bnx2_RXP_b06FwTextLen;
-       fw.text_index = 0;
-
-       rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
-                        &text, &text_len);
-       if (rc)
-               goto init_cpu_err;
-
-       fw.text = text;
-
-       fw.data_addr = bnx2_RXP_b06FwDataAddr;
-       fw.data_len = bnx2_RXP_b06FwDataLen;
-       fw.data_index = 0;
-       fw.data = bnx2_RXP_b06FwData;
 
-       fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
-       fw.sbss_len = bnx2_RXP_b06FwSbssLen;
-       fw.sbss_index = 0;
-       fw.sbss = bnx2_RXP_b06FwSbss;
+       fw = &bnx2_rxp_fw_06;
 
-       fw.bss_addr = bnx2_RXP_b06FwBssAddr;
-       fw.bss_len = bnx2_RXP_b06FwBssLen;
-       fw.bss_index = 0;
-       fw.bss = bnx2_RXP_b06FwBss;
-
-       fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
-       fw.rodata_len = bnx2_RXP_b06FwRodataLen;
-       fw.rodata_index = 0;
-       fw.rodata = bnx2_RXP_b06FwRodata;
-
-       load_cpu_fw(bp, &cpu_reg, &fw);
+       rc = load_cpu_fw(bp, &cpu_reg, fw);
+       if (rc)
+               goto init_cpu_err;
 
        /* Initialize the TX Processor. */
        cpu_reg.mode = BNX2_TXP_CPU_MODE;
@@ -2369,44 +2429,12 @@ bnx2_init_cpus(struct bnx2 *bp)
        cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
        cpu_reg.spad_base = BNX2_TXP_SCRATCH;
        cpu_reg.mips_view_base = 0x8000000;
-    
-       fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
-       fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
-       fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
-       fw.start_addr = bnx2_TXP_b06FwStartAddr;
-
-       fw.text_addr = bnx2_TXP_b06FwTextAddr;
-       fw.text_len = bnx2_TXP_b06FwTextLen;
-       fw.text_index = 0;
-
-       rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
-                        &text, &text_len);
-       if (rc)
-               goto init_cpu_err;
-
-       fw.text = text;
-
-       fw.data_addr = bnx2_TXP_b06FwDataAddr;
-       fw.data_len = bnx2_TXP_b06FwDataLen;
-       fw.data_index = 0;
-       fw.data = bnx2_TXP_b06FwData;
-
-       fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
-       fw.sbss_len = bnx2_TXP_b06FwSbssLen;
-       fw.sbss_index = 0;
-       fw.sbss = bnx2_TXP_b06FwSbss;
-
-       fw.bss_addr = bnx2_TXP_b06FwBssAddr;
-       fw.bss_len = bnx2_TXP_b06FwBssLen;
-       fw.bss_index = 0;
-       fw.bss = bnx2_TXP_b06FwBss;
 
-       fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
-       fw.rodata_len = bnx2_TXP_b06FwRodataLen;
-       fw.rodata_index = 0;
-       fw.rodata = bnx2_TXP_b06FwRodata;
+       fw = &bnx2_txp_fw_06;
 
-       load_cpu_fw(bp, &cpu_reg, &fw);
+       rc = load_cpu_fw(bp, &cpu_reg, fw);
+       if (rc)
+               goto init_cpu_err;
 
        /* Initialize the TX Patch-up Processor. */
        cpu_reg.mode = BNX2_TPAT_CPU_MODE;
@@ -2421,44 +2449,12 @@ bnx2_init_cpus(struct bnx2 *bp)
        cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
        cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
        cpu_reg.mips_view_base = 0x8000000;
-    
-       fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
-       fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
-       fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
-       fw.start_addr = bnx2_TPAT_b06FwStartAddr;
-
-       fw.text_addr = bnx2_TPAT_b06FwTextAddr;
-       fw.text_len = bnx2_TPAT_b06FwTextLen;
-       fw.text_index = 0;
-
-       rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
-                        &text, &text_len);
-       if (rc)
-               goto init_cpu_err;
-
-       fw.text = text;
-
-       fw.data_addr = bnx2_TPAT_b06FwDataAddr;
-       fw.data_len = bnx2_TPAT_b06FwDataLen;
-       fw.data_index = 0;
-       fw.data = bnx2_TPAT_b06FwData;
-
-       fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
-       fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
-       fw.sbss_index = 0;
-       fw.sbss = bnx2_TPAT_b06FwSbss;
 
-       fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
-       fw.bss_len = bnx2_TPAT_b06FwBssLen;
-       fw.bss_index = 0;
-       fw.bss = bnx2_TPAT_b06FwBss;
+       fw = &bnx2_tpat_fw_06;
 
-       fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
-       fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
-       fw.rodata_index = 0;
-       fw.rodata = bnx2_TPAT_b06FwRodata;
-
-       load_cpu_fw(bp, &cpu_reg, &fw);
+       rc = load_cpu_fw(bp, &cpu_reg, fw);
+       if (rc)
+               goto init_cpu_err;
 
        /* Initialize the Completion Processor. */
        cpu_reg.mode = BNX2_COM_CPU_MODE;
@@ -2473,44 +2469,12 @@ bnx2_init_cpus(struct bnx2 *bp)
        cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
        cpu_reg.spad_base = BNX2_COM_SCRATCH;
        cpu_reg.mips_view_base = 0x8000000;
-    
-       fw.ver_major = bnx2_COM_b06FwReleaseMajor;
-       fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
-       fw.ver_fix = bnx2_COM_b06FwReleaseFix;
-       fw.start_addr = bnx2_COM_b06FwStartAddr;
-
-       fw.text_addr = bnx2_COM_b06FwTextAddr;
-       fw.text_len = bnx2_COM_b06FwTextLen;
-       fw.text_index = 0;
-
-       rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
-                        &text, &text_len);
-       if (rc)
-               goto init_cpu_err;
-
-       fw.text = text;
-
-       fw.data_addr = bnx2_COM_b06FwDataAddr;
-       fw.data_len = bnx2_COM_b06FwDataLen;
-       fw.data_index = 0;
-       fw.data = bnx2_COM_b06FwData;
 
-       fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
-       fw.sbss_len = bnx2_COM_b06FwSbssLen;
-       fw.sbss_index = 0;
-       fw.sbss = bnx2_COM_b06FwSbss;
+       fw = &bnx2_com_fw_06;
 
-       fw.bss_addr = bnx2_COM_b06FwBssAddr;
-       fw.bss_len = bnx2_COM_b06FwBssLen;
-       fw.bss_index = 0;
-       fw.bss = bnx2_COM_b06FwBss;
-
-       fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
-       fw.rodata_len = bnx2_COM_b06FwRodataLen;
-       fw.rodata_index = 0;
-       fw.rodata = bnx2_COM_b06FwRodata;
-
-       load_cpu_fw(bp, &cpu_reg, &fw);
+       rc = load_cpu_fw(bp, &cpu_reg, fw);
+       if (rc)
+               goto init_cpu_err;
 
 init_cpu_err:
        bnx2_gunzip_end(bp);
@@ -2736,7 +2700,7 @@ bnx2_enable_nvram_access(struct bnx2 *bp)
 
        val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
        /* Enable both bits, even on read. */
-       REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
+       REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
               val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
 }
 
@@ -2747,7 +2711,7 @@ bnx2_disable_nvram_access(struct bnx2 *bp)
 
        val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
        /* Disable both bits, even after read. */
-       REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
+       REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
                val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
                        BNX2_NVM_ACCESS_ENABLE_WR_EN));
 }
@@ -3138,7 +3102,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
                /* Find the data_start addr */
                data_start = (written == 0) ? offset32 : page_start;
                /* Find the data_end addr */
-               data_end = (page_end > offset32 + len32) ? 
+               data_end = (page_end > offset32 + len32) ?
                        (offset32 + len32) : page_end;
 
                /* Request access to the flash interface. */
@@ -3159,8 +3123,8 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
                                        cmd_flags |= BNX2_NVM_COMMAND_LAST;
                                }
                                rc = bnx2_nvram_read_dword(bp,
-                                       page_start + j, 
-                                       &flash_buffer[j], 
+                                       page_start + j,
+                                       &flash_buffer[j],
                                        cmd_flags);
 
                                if (rc)
@@ -3187,7 +3151,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
                if (bp->flash_info->buffered == 0) {
                        for (addr = page_start; addr < data_start;
                                addr += 4, i += 4) {
-                               
+
                                rc = bnx2_nvram_write_dword(bp, addr,
                                        &flash_buffer[i], cmd_flags);
 
@@ -3221,7 +3185,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
                if (bp->flash_info->buffered == 0) {
                        for (addr = data_end; addr < page_end;
                                addr += 4, i += 4) {
-                       
+
                                if (addr == page_end-4) {
                                        cmd_flags = BNX2_NVM_COMMAND_LAST;
                                }
@@ -3346,9 +3310,9 @@ bnx2_init_chip(struct bnx2 *bp)
        val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
              BNX2_DMA_CONFIG_DATA_WORD_SWAP |
 #ifdef __BIG_ENDIAN
-             BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | 
+             BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
 #endif
-             BNX2_DMA_CONFIG_CNTL_WORD_SWAP | 
+             BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
              DMA_READ_CHANS << 12 |
              DMA_WRITE_CHANS << 16;
 
@@ -3385,7 +3349,10 @@ bnx2_init_chip(struct bnx2 *bp)
 
        /* Initialize context mapping and zero out the quick contexts.  The
         * context block must have already been enabled. */
-       bnx2_init_context(bp);
+       if (CHIP_NUM(bp) == CHIP_NUM_5709)
+               bnx2_init_5709_context(bp);
+       else
+               bnx2_init_context(bp);
 
        if ((rc = bnx2_init_cpus(bp)) != 0)
                return rc;
@@ -3441,7 +3408,7 @@ bnx2_init_chip(struct bnx2 *bp)
        REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
               (u64) bp->stats_blk_mapping >> 32);
 
-       REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 
+       REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
               (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
 
        REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
@@ -3496,15 +3463,45 @@ bnx2_init_chip(struct bnx2 *bp)
        return rc;
 }
 
+static void
+bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
+{
+       u32 val, offset0, offset1, offset2, offset3;
+
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               offset0 = BNX2_L2CTX_TYPE_XI;
+               offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+               offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+               offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+       } else {
+               offset0 = BNX2_L2CTX_TYPE;
+               offset1 = BNX2_L2CTX_CMD_TYPE;
+               offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+               offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+       }
+       val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+       CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
+
+       val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+       CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
+
+       val = (u64) bp->tx_desc_mapping >> 32;
+       CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
+
+       val = (u64) bp->tx_desc_mapping & 0xffffffff;
+       CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
+}
 
 static void
 bnx2_init_tx_ring(struct bnx2 *bp)
 {
        struct tx_bd *txbd;
-       u32 val;
+       u32 cid;
+
+       bp->tx_wake_thresh = bp->tx_ring_size / 2;
 
        txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
-               
+
        txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
        txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
 
@@ -3512,20 +3509,12 @@ bnx2_init_tx_ring(struct bnx2 *bp)
        bp->tx_cons = 0;
        bp->hw_tx_cons = 0;
        bp->tx_prod_bseq = 0;
-       
-       val = BNX2_L2CTX_TYPE_TYPE_L2;
-       val |= BNX2_L2CTX_TYPE_SIZE_L2;
-       CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
 
-       val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
-       val |= 8 << 16;
-       CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
-
-       val = (u64) bp->tx_desc_mapping >> 32;
-       CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
+       cid = TX_CID;
+       bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
+       bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
 
-       val = (u64) bp->tx_desc_mapping & 0xffffffff;
-       CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
+       bnx2_init_tx_context(bp, cid);
 }
 
 static void
@@ -3533,19 +3522,19 @@ bnx2_init_rx_ring(struct bnx2 *bp)
 {
        struct rx_bd *rxbd;
        int i;
-       u16 prod, ring_prod; 
+       u16 prod, ring_prod;
        u32 val;
 
        /* 8 for CRC and VLAN */
        bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
-       /* 8 for alignment */
-       bp->rx_buf_size = bp->rx_buf_use_size + 8;
+       /* hw alignment */
+       bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
 
        ring_prod = prod = bp->rx_prod = 0;
        bp->rx_cons = 0;
        bp->hw_rx_cons = 0;
        bp->rx_prod_bseq = 0;
-               
+
        for (i = 0; i < bp->rx_max_ring; i++) {
                int j;
 
@@ -3642,7 +3631,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                                skb_shinfo(skb)->frags[j].size,
                                PCI_DMA_TODEVICE);
                }
-               dev_kfree_skb_any(skb);
+               dev_kfree_skb(skb);
                i += j + 1;
        }
 
@@ -3668,7 +3657,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
 
                rx_buf->skb = NULL;
 
-               dev_kfree_skb_any(skb);
+               dev_kfree_skb(skb);
        }
 }
 
@@ -3705,7 +3694,9 @@ bnx2_init_nic(struct bnx2 *bp)
        if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
                return rc;
 
+       spin_lock_bh(&bp->phy_lock);
        bnx2_init_phy(bp);
+       spin_unlock_bh(&bp->phy_lock);
        bnx2_set_link(bp);
        return 0;
 }
@@ -3920,7 +3911,7 @@ bnx2_test_memory(struct bnx2 *bp)
                        return ret;
                }
        }
-       
+
        return ret;
 }
 
@@ -3945,14 +3936,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
                bnx2_set_mac_loopback(bp);
        }
        else if (loopback_mode == BNX2_PHY_LOOPBACK) {
-               bp->loopback = 0;
+               bp->loopback = PHY_LOOPBACK;
                bnx2_set_phy_loopback(bp);
        }
        else
                return -EINVAL;
 
        pkt_size = 1514;
-       skb = dev_alloc_skb(pkt_size);
+       skb = netdev_alloc_skb(bp->dev, pkt_size);
        if (!skb)
                return -ENOMEM;
        packet = skb_put(skb, pkt_size);
@@ -3998,7 +3989,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        udelay(5);
 
        pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
-       dev_kfree_skb_irq(skb);
+       dev_kfree_skb(skb);
 
        if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
                goto loopback_test_done;
@@ -4117,7 +4108,7 @@ bnx2_test_link(struct bnx2 *bp)
        bnx2_read_phy(bp, MII_BMSR, &bmsr);
        bnx2_read_phy(bp, MII_BMSR, &bmsr);
        spin_unlock_bh(&bp->phy_lock);
-               
+
        if (bmsr & BMSR_LSTATUS) {
                return 0;
        }
@@ -4155,80 +4146,117 @@ bnx2_test_intr(struct bnx2 *bp)
 }
 
 static void
-bnx2_timer(unsigned long data)
+bnx2_5706_serdes_timer(struct bnx2 *bp)
 {
-       struct bnx2 *bp = (struct bnx2 *) data;
-       u32 msg;
+       spin_lock(&bp->phy_lock);
+       if (bp->serdes_an_pending)
+               bp->serdes_an_pending--;
+       else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+               u32 bmcr;
 
-       if (!netif_running(bp->dev))
-               return;
+               bp->current_interval = bp->timer_interval;
 
-       if (atomic_read(&bp->intr_sem) != 0)
-               goto bnx2_restart_timer;
+               bnx2_read_phy(bp, MII_BMCR, &bmcr);
 
-       msg = (u32) ++bp->fw_drv_pulse_wr_seq;
-       REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
+               if (bmcr & BMCR_ANENABLE) {
+                       u32 phy1, phy2;
 
-       bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
+                       bnx2_write_phy(bp, 0x1c, 0x7c00);
+                       bnx2_read_phy(bp, 0x1c, &phy1);
 
-       if ((bp->phy_flags & PHY_SERDES_FLAG) &&
-           (CHIP_NUM(bp) == CHIP_NUM_5706)) {
+                       bnx2_write_phy(bp, 0x17, 0x0f01);
+                       bnx2_read_phy(bp, 0x15, &phy2);
+                       bnx2_write_phy(bp, 0x17, 0x0f01);
+                       bnx2_read_phy(bp, 0x15, &phy2);
 
-               spin_lock(&bp->phy_lock);
-               if (bp->serdes_an_pending) {
-                       bp->serdes_an_pending--;
+                       if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
+                               !(phy2 & 0x20)) {       /* no CONFIG */
+
+                               bmcr &= ~BMCR_ANENABLE;
+                               bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+                               bnx2_write_phy(bp, MII_BMCR, bmcr);
+                               bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
+                       }
                }
-               else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
-                       u32 bmcr;
+       }
+       else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
+                (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
+               u32 phy2;
 
-                       bp->current_interval = bp->timer_interval;
+               bnx2_write_phy(bp, 0x17, 0x0f01);
+               bnx2_read_phy(bp, 0x15, &phy2);
+               if (phy2 & 0x20) {
+                       u32 bmcr;
 
                        bnx2_read_phy(bp, MII_BMCR, &bmcr);
+                       bmcr |= BMCR_ANENABLE;
+                       bnx2_write_phy(bp, MII_BMCR, bmcr);
 
-                       if (bmcr & BMCR_ANENABLE) {
-                               u32 phy1, phy2;
+                       bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
+               }
+       } else
+               bp->current_interval = bp->timer_interval;
+
+       spin_unlock(&bp->phy_lock);
+}
 
-                               bnx2_write_phy(bp, 0x1c, 0x7c00);
-                               bnx2_read_phy(bp, 0x1c, &phy1);
+static void
+bnx2_5708_serdes_timer(struct bnx2 *bp)
+{
+       if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
+               bp->serdes_an_pending = 0;
+               return;
+       }
 
-                               bnx2_write_phy(bp, 0x17, 0x0f01);
-                               bnx2_read_phy(bp, 0x15, &phy2);
-                               bnx2_write_phy(bp, 0x17, 0x0f01);
-                               bnx2_read_phy(bp, 0x15, &phy2);
+       spin_lock(&bp->phy_lock);
+       if (bp->serdes_an_pending)
+               bp->serdes_an_pending--;
+       else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+               u32 bmcr;
 
-                               if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
-                                       !(phy2 & 0x20)) {       /* no CONFIG */
+               bnx2_read_phy(bp, MII_BMCR, &bmcr);
 
-                                       bmcr &= ~BMCR_ANENABLE;
-                                       bmcr |= BMCR_SPEED1000 |
-                                               BMCR_FULLDPLX;
-                                       bnx2_write_phy(bp, MII_BMCR, bmcr);
-                                       bp->phy_flags |=
-                                               PHY_PARALLEL_DETECT_FLAG;
-                               }
-                       }
+               if (bmcr & BMCR_ANENABLE) {
+                       bmcr &= ~BMCR_ANENABLE;
+                       bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
+                       bnx2_write_phy(bp, MII_BMCR, bmcr);
+                       bp->current_interval = SERDES_FORCED_TIMEOUT;
+               } else {
+                       bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
+                       bmcr |= BMCR_ANENABLE;
+                       bnx2_write_phy(bp, MII_BMCR, bmcr);
+                       bp->serdes_an_pending = 2;
+                       bp->current_interval = bp->timer_interval;
                }
-               else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
-                       (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
-                       u32 phy2;
 
-                       bnx2_write_phy(bp, 0x17, 0x0f01);
-                       bnx2_read_phy(bp, 0x15, &phy2);
-                       if (phy2 & 0x20) {
-                               u32 bmcr;
+       } else
+               bp->current_interval = bp->timer_interval;
 
-                               bnx2_read_phy(bp, MII_BMCR, &bmcr);
-                               bmcr |= BMCR_ANENABLE;
-                               bnx2_write_phy(bp, MII_BMCR, bmcr);
+       spin_unlock(&bp->phy_lock);
+}
+
+static void
+bnx2_timer(unsigned long data)
+{
+       struct bnx2 *bp = (struct bnx2 *) data;
+       u32 msg;
 
-                               bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
+       if (!netif_running(bp->dev))
+               return;
 
-                       }
-               }
-               else
-                       bp->current_interval = bp->timer_interval;
+       if (atomic_read(&bp->intr_sem) != 0)
+               goto bnx2_restart_timer;
 
-               spin_unlock(&bp->phy_lock);
+       msg = (u32) ++bp->fw_drv_pulse_wr_seq;
+       REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
+
+       bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
+
+       if (bp->phy_flags & PHY_SERDES_FLAG) {
+               if (CHIP_NUM(bp) == CHIP_NUM_5706)
+                       bnx2_5706_serdes_timer(bp);
+               else if (CHIP_NUM(bp) == CHIP_NUM_5708)
+                       bnx2_5708_serdes_timer(bp);
        }
 
 bnx2_restart_timer:
@@ -4260,11 +4288,11 @@ bnx2_open(struct net_device *dev)
                }
                else {
                        rc = request_irq(bp->pdev->irq, bnx2_interrupt,
-                                       SA_SHIRQ, dev->name, dev);
+                                       IRQF_SHARED, dev->name, dev);
                }
        }
        else {
-               rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
+               rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
                                dev->name, dev);
        }
        if (rc) {
@@ -4284,7 +4312,7 @@ bnx2_open(struct net_device *dev)
                bnx2_free_mem(bp);
                return rc;
        }
-       
+
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 
        atomic_set(&bp->intr_sem, 0);
@@ -4311,7 +4339,7 @@ bnx2_open(struct net_device *dev)
 
                        if (!rc) {
                                rc = request_irq(bp->pdev->irq, bnx2_interrupt,
-                                       SA_SHIRQ, dev->name, dev);
+                                       IRQF_SHARED, dev->name, dev);
                        }
                        if (rc) {
                                bnx2_free_skbs(bp);
@@ -4390,10 +4418,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
 #endif
 
 /* Called with netif_tx_lock.
- * hard_start_xmit is pseudo-lockless - a lock is only required when
- * the tx queue is full. This way, we get the benefit of lockless
- * operations most of the time without the complexities to handle
- * netif_stop_queue/wake_queue race conditions.
+ * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue().
  */
 static int
 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4418,7 +4444,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        ring_prod = TX_RING_IDX(prod);
 
        vlan_tag_flags = 0;
-       if (skb->ip_summed == CHECKSUM_HW) {
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
                vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
        }
 
@@ -4426,8 +4452,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
                vlan_tag_flags |=
                        (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
        }
-#ifdef BCM_TSO 
-       if ((mss = skb_shinfo(skb)->tso_size) &&
+#ifdef BCM_TSO
+       if ((mss = skb_shinfo(skb)->gso_size) &&
                (skb->len > (bp->dev->mtu + ETH_HLEN))) {
                u32 tcp_opt_len, ip_tcp_len;
 
@@ -4465,7 +4491,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
-       
+
        tx_buf = &bp->tx_buf_ring[ring_prod];
        tx_buf->skb = skb;
        pci_unmap_addr_set(tx_buf, mapping, mapping);
@@ -4512,12 +4538,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dev->trans_start = jiffies;
 
        if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
-               spin_lock(&bp->tx_lock);
                netif_stop_queue(dev);
-               
-               if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
+               if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
                        netif_wake_queue(dev);
-               spin_unlock(&bp->tx_lock);
        }
 
        return NETDEV_TX_OK;
@@ -4540,7 +4563,7 @@ bnx2_close(struct net_device *dev)
        bnx2_netif_stop(bp);
        del_timer_sync(&bp->timer);
        if (bp->flags & NO_WOL_FLAG)
-               reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
+               reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
        else if (bp->wol)
                reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
        else
@@ -4598,23 +4621,23 @@ bnx2_get_stats(struct net_device *dev)
        net_stats->tx_bytes =
                GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
 
-       net_stats->multicast = 
+       net_stats->multicast =
                GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
 
-       net_stats->collisions = 
+       net_stats->collisions =
                (unsigned long) stats_blk->stat_EtherStatsCollisions;
 
-       net_stats->rx_length_errors = 
+       net_stats->rx_length_errors =
                (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
                stats_blk->stat_EtherStatsOverrsizePkts);
 
-       net_stats->rx_over_errors = 
+       net_stats->rx_over_errors =
                (unsigned long) stats_blk->stat_IfInMBUFDiscards;
 
-       net_stats->rx_frame_errors = 
+       net_stats->rx_frame_errors =
                (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
 
-       net_stats->rx_crc_errors = 
+       net_stats->rx_crc_errors =
                (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
 
        net_stats->rx_errors = net_stats->rx_length_errors +
@@ -4635,7 +4658,7 @@ bnx2_get_stats(struct net_device *dev)
        }
 
        net_stats->tx_errors =
-               (unsigned long) 
+               (unsigned long)
                stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
                +
                net_stats->tx_aborted_errors +
@@ -4696,7 +4719,7 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        return 0;
 }
-  
+
 static int
 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
@@ -4709,7 +4732,7 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        if (cmd->autoneg == AUTONEG_ENABLE) {
                autoneg |= AUTONEG_SPEED;
 
-               cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; 
+               cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
 
                /* allow advertising 1 speed */
                if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
@@ -4741,10 +4764,14 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        }
        else {
                if (bp->phy_flags & PHY_SERDES_FLAG) {
-                       if ((cmd->speed != SPEED_1000) ||
-                               (cmd->duplex != DUPLEX_FULL)) {
+                       if ((cmd->speed != SPEED_1000 &&
+                            cmd->speed != SPEED_2500) ||
+                           (cmd->duplex != DUPLEX_FULL))
+                               return -EINVAL;
+
+                       if (cmd->speed == SPEED_2500 &&
+                           !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
                                return -EINVAL;
-                       }
                }
                else if (cmd->speed == SPEED_1000) {
                        return -EINVAL;
@@ -4901,11 +4928,10 @@ bnx2_nway_reset(struct net_device *dev)
                msleep(20);
 
                spin_lock_bh(&bp->phy_lock);
-               if (CHIP_NUM(bp) == CHIP_NUM_5706) {
-                       bp->current_interval = SERDES_AN_TIMEOUT;
-                       bp->serdes_an_pending = 1;
-                       mod_timer(&bp->timer, jiffies + bp->current_interval);
-               }
+
+               bp->current_interval = SERDES_AN_TIMEOUT;
+               bp->serdes_an_pending = 1;
+               mod_timer(&bp->timer, jiffies + bp->current_interval);
        }
 
        bnx2_read_phy(bp, MII_BMCR, &bmcr);
@@ -4986,7 +5012,7 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
        bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
        if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
 
-       bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; 
+       bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
        if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
 
        bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
@@ -5127,6 +5153,16 @@ bnx2_set_rx_csum(struct net_device *dev, u32 data)
        return 0;
 }
 
+static int
+bnx2_set_tso(struct net_device *dev, u32 data)
+{
+       if (data)
+               dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
+       else
+               dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
+       return 0;
+}
+
 #define BNX2_NUM_STATS 46
 
 static struct {
@@ -5194,46 +5230,46 @@ static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
-    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),                 
-    STATS_OFFSET32(stat_Dot3StatsFCSErrors),                          
-    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),                    
-    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),              
-    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),            
-    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),              
-    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),                
-    STATS_OFFSET32(stat_Dot3StatsLateCollisions),                     
-    STATS_OFFSET32(stat_EtherStatsCollisions),                        
-    STATS_OFFSET32(stat_EtherStatsFragments),                         
-    STATS_OFFSET32(stat_EtherStatsJabbers),                           
-    STATS_OFFSET32(stat_EtherStatsUndersizePkts),                     
-    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),                     
-    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),                    
-    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),         
-    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),        
-    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),        
-    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),       
-    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),      
-    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),      
-    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),                    
-    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),         
-    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),        
-    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),        
-    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),       
-    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),      
-    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),      
-    STATS_OFFSET32(stat_XonPauseFramesReceived),                      
-    STATS_OFFSET32(stat_XoffPauseFramesReceived),                     
-    STATS_OFFSET32(stat_OutXonSent),                                  
-    STATS_OFFSET32(stat_OutXoffSent),                                 
-    STATS_OFFSET32(stat_MacControlFramesReceived),                    
-    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),                  
-    STATS_OFFSET32(stat_IfInMBUFDiscards),                            
+    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
+    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
+    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
+    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
+    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
+    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
+    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
+    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
+    STATS_OFFSET32(stat_EtherStatsCollisions),
+    STATS_OFFSET32(stat_EtherStatsFragments),
+    STATS_OFFSET32(stat_EtherStatsJabbers),
+    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
+    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
+    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
+    STATS_OFFSET32(stat_XonPauseFramesReceived),
+    STATS_OFFSET32(stat_XoffPauseFramesReceived),
+    STATS_OFFSET32(stat_OutXonSent),
+    STATS_OFFSET32(stat_OutXoffSent),
+    STATS_OFFSET32(stat_MacControlFramesReceived),
+    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
+    STATS_OFFSET32(stat_IfInMBUFDiscards),
     STATS_OFFSET32(stat_FwRxDrop),
 };
 
 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
  * skipped because of errata.
- */               
+ */
 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
        8,0,8,8,8,8,8,8,8,8,
        4,0,4,4,4,4,4,4,4,4,
@@ -5276,6 +5312,8 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
 
        memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
        if (etest->flags & ETH_TEST_FL_OFFLINE) {
+               int i;
+
                bnx2_netif_stop(bp);
                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
                bnx2_free_skbs(bp);
@@ -5300,9 +5338,11 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
                }
 
                /* wait for link up */
-               msleep_interruptible(3000);
-               if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
-                       msleep_interruptible(4000);
+               for (i = 0; i < 7; i++) {
+                       if (bp->link_up)
+                               break;
+                       msleep_interruptible(1000);
+               }
        }
 
        if (bnx2_test_nvram(bp) != 0) {
@@ -5417,7 +5457,7 @@ bnx2_phys_id(struct net_device *dev, u32 data)
        return 0;
 }
 
-static struct ethtool_ops bnx2_ethtool_ops = {
+static const struct ethtool_ops bnx2_ethtool_ops = {
        .get_settings           = bnx2_get_settings,
        .set_settings           = bnx2_set_settings,
        .get_drvinfo            = bnx2_get_drvinfo,
@@ -5444,7 +5484,7 @@ static struct ethtool_ops bnx2_ethtool_ops = {
        .set_sg                 = ethtool_op_set_sg,
 #ifdef BCM_TSO
        .get_tso                = ethtool_op_get_tso,
-       .set_tso                = ethtool_op_set_tso,
+       .set_tso                = bnx2_set_tso,
 #endif
        .self_test_count        = bnx2_self_test_count,
        .self_test              = bnx2_self_test,
@@ -5542,7 +5582,7 @@ poll_bnx2(struct net_device *dev)
        struct bnx2 *bp = netdev_priv(dev);
 
        disable_irq(bp->pdev->irq);
-       bnx2_interrupt(bp->pdev->irq, dev, NULL);
+       bnx2_interrupt(bp->pdev->irq, dev);
        enable_irq(bp->pdev->irq);
 }
 #endif
@@ -5565,20 +5605,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        /* enable device (incl. PCI PM wakeup), and bus-mastering */
        rc = pci_enable_device(pdev);
        if (rc) {
-               printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
                goto err_out;
        }
 
        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-               printk(KERN_ERR PFX "Cannot find PCI device base address, "
-                      "aborting.\n");
+               dev_err(&pdev->dev,
+                       "Cannot find PCI device base address, aborting.\n");
                rc = -ENODEV;
                goto err_out_disable;
        }
 
        rc = pci_request_regions(pdev, DRV_MODULE_NAME);
        if (rc) {
-               printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
+               dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
                goto err_out_disable;
        }
 
@@ -5586,15 +5626,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
        if (bp->pm_cap == 0) {
-               printk(KERN_ERR PFX "Cannot find power management capability, "
-                              "aborting.\n");
-               rc = -EIO;
-               goto err_out_release;
-       }
-
-       bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
-       if (bp->pcix_cap == 0) {
-               printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
+               dev_err(&pdev->dev,
+                       "Cannot find power management capability, aborting.\n");
                rc = -EIO;
                goto err_out_release;
        }
@@ -5602,14 +5635,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
                bp->flags |= USING_DAC_FLAG;
                if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
-                       printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
-                              "failed, aborting.\n");
+                       dev_err(&pdev->dev,
+                               "pci_set_consistent_dma_mask failed, aborting.\n");
                        rc = -EIO;
                        goto err_out_release;
                }
        }
        else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
-               printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
+               dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
                rc = -EIO;
                goto err_out_release;
        }
@@ -5618,18 +5651,17 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->pdev = pdev;
 
        spin_lock_init(&bp->phy_lock);
-       spin_lock_init(&bp->tx_lock);
        INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
 
        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-       mem_len = MB_GET_CID_ADDR(17);
+       mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
        dev->mem_end = dev->mem_start + mem_len;
        dev->irq = pdev->irq;
 
        bp->regview = ioremap_nocache(dev->base_addr, mem_len);
 
        if (!bp->regview) {
-               printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
+               dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
                rc = -ENOMEM;
                goto err_out_release;
        }
@@ -5646,6 +5678,16 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
 
+       if (CHIP_NUM(bp) != CHIP_NUM_5709) {
+               bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
+               if (bp->pcix_cap == 0) {
+                       dev_err(&pdev->dev,
+                               "Cannot find PCIX capability, aborting.\n");
+                       rc = -EIO;
+                       goto err_out_unmap;
+               }
+       }
+
        /* Get bus information. */
        reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
        if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
@@ -5654,7 +5696,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                bp->flags |= PCIX_FLAG;
 
                clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
-               
+
                clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
                switch (clkreg) {
                case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
@@ -5701,8 +5743,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
                !(bp->flags & PCIX_FLAG)) {
 
-               printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
-                      "aborting.\n");
+               dev_err(&pdev->dev,
+                       "5706 A1 can only be used in a PCIX bus, aborting.\n");
                goto err_out_unmap;
        }
 
@@ -5723,7 +5765,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
            BNX2_DEV_INFO_SIGNATURE_MAGIC) {
-               printk(KERN_ERR PFX "Firmware not running, aborting.\n");
+               dev_err(&pdev->dev, "Firmware not running, aborting.\n");
                rc = -ENODEV;
                goto err_out_unmap;
        }
@@ -5741,7 +5783,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->mac_addr[5] = (u8) reg;
 
        bp->tx_ring_size = MAX_TX_DESC_CNT;
-       bnx2_set_rx_ring_size(bp, 100);
+       bnx2_set_rx_ring_size(bp, 255);
 
        bp->rx_csum = 1;
 
@@ -5751,7 +5793,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->tx_quick_cons_trip = 20;
        bp->tx_ticks_int = 80;
        bp->tx_ticks = 80;
-               
+
        bp->rx_quick_cons_trip_int = 6;
        bp->rx_quick_cons_trip = 6;
        bp->rx_ticks_int = 18;
@@ -5794,6 +5836,34 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                bp->cmd_ticks_int = bp->cmd_ticks;
        }
 
+       /* Disable MSI on 5706 if AMD 8132 bridge is found.
+        *
+        * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
+        * with byte enables disabled on the unused 32-bit word.  This is legal
+        * but causes problems on the AMD 8132 which will eventually stop
+        * responding after a while.
+        *
+        * AMD believes this incompatibility is unique to the 5706, and
+        * prefers to locally disable MSI rather than globally disabling it
+        * using pci_msi_quirk.
+        */
+       if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
+               struct pci_dev *amd_8132 = NULL;
+
+               while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
+                                                 PCI_DEVICE_ID_AMD_8132_BRIDGE,
+                                                 amd_8132))) {
+                       u8 rev;
+
+                       pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
+                       if (rev >= 0x10 && rev <= 0x13) {
+                               disable_msi = 1;
+                               pci_dev_put(amd_8132);
+                               break;
+                       }
+               }
+       }
+
        bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
        bp->req_line_speed = 0;
        if (bp->phy_flags & PHY_SERDES_FLAG) {
@@ -5885,7 +5955,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 #endif
 
        if ((rc = register_netdev(dev))) {
-               printk(KERN_ERR PFX "Cannot register net device\n");
+               dev_err(&pdev->dev, "Cannot register net device\n");
                if (bp->regview)
                        iounmap(bp->regview);
                pci_release_regions(pdev);
@@ -5925,7 +5995,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 #endif
 #ifdef BCM_TSO
-       dev->features |= NETIF_F_TSO;
+       dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
 #endif
 
        netif_carrier_off(bp->dev);
@@ -5967,7 +6037,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
        netif_device_detach(dev);
        del_timer_sync(&bp->timer);
        if (bp->flags & NO_WOL_FLAG)
-               reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
+               reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
        else if (bp->wol)
                reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
        else
@@ -6005,7 +6075,7 @@ static struct pci_driver bnx2_pci_driver = {
 
 static int __init bnx2_init(void)
 {
-       return pci_module_init(&bnx2_pci_driver);
+       return pci_register_driver(&bnx2_pci_driver);
 }
 
 static void __exit bnx2_cleanup(void)