[PATCH] Use a klist for device child lists.
[safe/jmp/linux-2.6] / drivers / net / tg3.c
index e9e0c6e..a0b8848 100644 (file)
@@ -7,7 +7,12 @@
  * Copyright (C) 2005 Broadcom Corporation.
  *
  * Firmware is:
- *     Copyright (C) 2000-2003 Broadcom Corporation.
+ *     Derived from proprietary unpublished source code,
+ *     Copyright (C) 2000-2003 Broadcom Corporation.
+ *
+ *     Permission is hereby granted for the distribution of this firmware
+ *     data in hexadecimal or equivalent format, provided this copyright
+ *     notice is accompanying it.
  */
 
 #include <linux/config.h>
@@ -61,8 +66,8 @@
 
 #define DRV_MODULE_NAME                "tg3"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "3.26"
-#define DRV_MODULE_RELDATE     "April 24, 2005"
+#define DRV_MODULE_VERSION     "3.31"
+#define DRV_MODULE_RELDATE     "June 8, 2005"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
 /* number of ETHTOOL_GSTATS u64's */
 #define TG3_NUM_STATS          (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
 
+#define TG3_NUM_TEST           6
+
 static char version[] __devinitdata =
        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
@@ -206,6 +213,8 @@ static struct pci_device_id tg3_pci_tbl[] = {
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
@@ -314,6 +323,17 @@ static struct {
        { "nic_tx_threshold_hit" }
 };
 
+static struct {
+       const char string[ETH_GSTRING_LEN];
+} ethtool_test_keys[TG3_NUM_TEST] = {
+       { "nvram test     (online) " },
+       { "link test      (online) " },
+       { "register test  (offline)" },
+       { "memory test    (offline)" },
+       { "loopback test  (offline)" },
+       { "interrupt test (offline)" },
+};
+
 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
 {
        if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
@@ -420,7 +440,8 @@ static void tg3_enable_ints(struct tg3 *tp)
 {
        tw32(TG3PCI_MISC_HOST_CTRL,
             (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
-       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
+       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+                    (tp->last_tag << 24));
        tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
 
        tg3_cond_int(tp);
@@ -455,10 +476,16 @@ static void tg3_restart_ints(struct tg3 *tp)
 {
        tw32(TG3PCI_MISC_HOST_CTRL,
                (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
-       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
+       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+                    tp->last_tag << 24);
        mmiowb();
 
-       if (tg3_has_work(tp))
+       /* When doing tagged status, this work check is unnecessary.
+        * The last_tag we write above tells the chip which piece of
+        * work we've completed.
+        */
+       if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
+           tg3_has_work(tp))
                tw32(HOSTCC_MODE, tp->coalesce_mode |
                     (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
 }
@@ -2500,7 +2527,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
                if (netif_carrier_ok(tp->dev)) {
                        tw32(HOSTCC_STAT_COAL_TICKS,
-                            DEFAULT_STAT_COAL_TICKS);
+                            tp->coal.stats_block_coalesce_usecs);
                } else {
                        tw32(HOSTCC_STAT_COAL_TICKS, 0);
                }
@@ -2886,7 +2913,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
         * All RX "locking" is done by ensuring outside
         * code synchronizes with dev->poll()
         */
-       done = 1;
        if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
                int orig_budget = *budget;
                int work_done;
@@ -2898,12 +2924,14 @@ static int tg3_poll(struct net_device *netdev, int *budget)
 
                *budget -= work_done;
                netdev->quota -= work_done;
-
-               if (work_done >= orig_budget)
-                       done = 0;
        }
 
+       if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+               tp->last_tag = sblk->status_tag;
+       rmb();
+
        /* if no more work, tell net stack and NIC we're done */
+       done = !tg3_has_work(tp);
        if (done) {
                spin_lock_irqsave(&tp->lock, flags);
                __netif_rx_complete(netdev);
@@ -2928,22 +2956,21 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
        spin_lock_irqsave(&tp->lock, flags);
 
        /*
-        * writing any value to intr-mbox-0 clears PCI INTA# and
+        * Writing any value to intr-mbox-0 clears PCI INTA# and
         * chip-internal interrupt pending events.
-        * writing non-zero to intr-mbox-0 additional tells the
+        * Writing non-zero to intr-mbox-0 additional tells the
         * NIC to stop sending us irqs, engaging "in-intr-handler"
         * event coalescing.
         */
        tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+       tp->last_tag = sblk->status_tag;
        sblk->status &= ~SD_STATUS_UPDATED;
-
        if (likely(tg3_has_work(tp)))
                netif_rx_schedule(dev);         /* schedule NAPI poll */
        else {
-               /* no work, re-enable interrupts
-                */
+               /* No work, re-enable interrupts.  */
                tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
-                            0x00000000);
+                            tp->last_tag << 24);
        }
 
        spin_unlock_irqrestore(&tp->lock, flags);
@@ -2969,21 +2996,62 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
        if ((sblk->status & SD_STATUS_UPDATED) ||
            !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
                /*
-                * writing any value to intr-mbox-0 clears PCI INTA# and
+                * Writing any value to intr-mbox-0 clears PCI INTA# and
                 * chip-internal interrupt pending events.
-                * writing non-zero to intr-mbox-0 additional tells the
+                * Writing non-zero to intr-mbox-0 additional tells the
                 * NIC to stop sending us irqs, engaging "in-intr-handler"
                 * event coalescing.
                 */
                tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
                             0x00000001);
+               sblk->status &= ~SD_STATUS_UPDATED;
+               if (likely(tg3_has_work(tp)))
+                       netif_rx_schedule(dev);         /* schedule NAPI poll */
+               else {
+                       /* No work, shared interrupt perhaps?  re-enable
+                        * interrupts, and flush that PCI write
+                        */
+                       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+                               0x00000000);
+                       tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
+               }
+       } else {        /* shared interrupt */
+               handled = 0;
+       }
+
+       spin_unlock_irqrestore(&tp->lock, flags);
+
+       return IRQ_RETVAL(handled);
+}
+
+static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct net_device *dev = dev_id;
+       struct tg3 *tp = netdev_priv(dev);
+       struct tg3_hw_status *sblk = tp->hw_status;
+       unsigned long flags;
+       unsigned int handled = 1;
+
+       spin_lock_irqsave(&tp->lock, flags);
+
+       /* In INTx mode, it is possible for the interrupt to arrive at
+        * the CPU before the status block posted prior to the interrupt.
+        * Reading the PCI State register will confirm whether the
+        * interrupt is ours and will flush the status block.
+        */
+       if ((sblk->status & SD_STATUS_UPDATED) ||
+           !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
                /*
-                * Flush PCI write.  This also guarantees that our
-                * status block has been flushed to host memory.
+                * writing any value to intr-mbox-0 clears PCI INTA# and
+                * chip-internal interrupt pending events.
+                * writing non-zero to intr-mbox-0 additional tells the
+                * NIC to stop sending us irqs, engaging "in-intr-handler"
+                * event coalescing.
                 */
-               tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
+               tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+                            0x00000001);
+               tp->last_tag = sblk->status_tag;
                sblk->status &= ~SD_STATUS_UPDATED;
-
                if (likely(tg3_has_work(tp)))
                        netif_rx_schedule(dev);         /* schedule NAPI poll */
                else {
@@ -2991,7 +3059,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
                         * interrupts, and flush that PCI write
                         */
                        tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
-                               0x00000000);
+                                    tp->last_tag << 24);
                        tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
                }
        } else {        /* shared interrupt */
@@ -3020,7 +3088,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
 }
 
 static int tg3_init_hw(struct tg3 *);
-static int tg3_halt(struct tg3 *, int);
+static int tg3_halt(struct tg3 *, int, int);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void tg3_poll_controller(struct net_device *dev)
@@ -3044,7 +3112,7 @@ static void tg3_reset_task(void *_data)
        restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
        tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
 
-       tg3_halt(tp, 0);
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
        tg3_init_hw(tp);
 
        tg3_netif_start(tp);
@@ -3390,7 +3458,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
        spin_lock_irq(&tp->lock);
        spin_lock(&tp->tx_lock);
 
-       tg3_halt(tp, 1);
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 
        tg3_set_mtu(dev, tp, new_mtu);
 
@@ -3725,8 +3793,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
        err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
        err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
        err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
-       if (err)
-               goto out;
 
        tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
        tw32_f(MAC_MODE, tp->mac_mode);
@@ -3744,10 +3810,10 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
                printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
                       "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
                       tp->dev->name, tr32(MAC_TX_MODE));
-               return -ENODEV;
+               err |= -ENODEV;
        }
 
-       err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
        err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
        err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
 
@@ -3756,15 +3822,12 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
 
        err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
        err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
-       if (err)
-               goto out;
 
        if (tp->hw_status)
                memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
        if (tp->hw_stats)
                memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 
-out:
        return err;
 }
 
@@ -4086,19 +4149,19 @@ static void tg3_stop_fw(struct tg3 *tp)
 }
 
 /* tp->lock is held. */
-static int tg3_halt(struct tg3 *tp, int silent)
+static int tg3_halt(struct tg3 *tp, int kind, int silent)
 {
        int err;
 
        tg3_stop_fw(tp);
 
-       tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
+       tg3_write_sig_pre_reset(tp, kind);
 
        tg3_abort_hw(tp, silent);
        err = tg3_chip_reset(tp);
 
-       tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
-       tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+       tg3_write_sig_legacy(tp, kind);
+       tg3_write_sig_post_reset(tp, kind);
 
        if (err)
                return err;
@@ -4312,7 +4375,12 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
         */
        tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
 
+       /* It is possible that bootcode is still loading at this point.
+        * Get the nvram lock first before halting the cpu.
+        */
+       tg3_nvram_lock(tp);
        err = tg3_halt_cpu(tp, cpu_base);
+       tg3_nvram_unlock(tp);
        if (err)
                goto out;
 
@@ -5049,6 +5117,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
 }
 
 static void __tg3_set_rx_mode(struct net_device *);
+static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+       tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
+       tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
+       tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
+       tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+               tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+               tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+       }
+       tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+       tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+               u32 val = ec->stats_block_coalesce_usecs;
+
+               if (!netif_carrier_ok(tp->dev))
+                       val = 0;
+
+               tw32(HOSTCC_STAT_COAL_TICKS, val);
+       }
+}
 
 /* tp->lock is held. */
 static int tg3_reset_hw(struct tg3 *tp)
@@ -5063,9 +5152,7 @@ static int tg3_reset_hw(struct tg3 *tp)
        tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
 
        if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
-               err = tg3_abort_hw(tp, 1);
-               if (err)
-                       return err;
+               tg3_abort_hw(tp, 1);
        }
 
        err = tg3_chip_reset(tp);
@@ -5373,16 +5460,7 @@ static int tg3_reset_hw(struct tg3 *tp)
                udelay(10);
        }
 
-       tw32(HOSTCC_RXCOL_TICKS, 0);
-       tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
-       tw32(HOSTCC_RXMAX_FRAMES, 1);
-       tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
-       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
-               tw32(HOSTCC_RXCOAL_TICK_INT, 0);
-               tw32(HOSTCC_TXCOAL_TICK_INT, 0);
-       }
-       tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
-       tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+       tg3_set_coalesce(tp, &tp->coal);
 
        /* set status block DMA address */
        tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@@ -5395,8 +5473,6 @@ static int tg3_reset_hw(struct tg3 *tp)
                 * the tg3_periodic_fetch_stats call there, and
                 * tg3_get_stats to see how this works for 5705/5750 chips.
                 */
-               tw32(HOSTCC_STAT_COAL_TICKS,
-                    DEFAULT_STAT_COAL_TICKS);
                tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
                     ((u64) tp->stats_mapping >> 32));
                tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -5452,7 +5528,8 @@ static int tg3_reset_hw(struct tg3 *tp)
        udelay(100);
 
        tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
-       tr32(MAILBOX_INTERRUPT_0);
+       tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
+       tp->last_tag = 0;
 
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
                tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -5730,31 +5807,33 @@ static void tg3_timer(unsigned long __opaque)
        spin_lock_irqsave(&tp->lock, flags);
        spin_lock(&tp->tx_lock);
 
-       /* All of this garbage is because when using non-tagged
-        * IRQ status the mailbox/status_block protocol the chip
-        * uses with the cpu is race prone.
-        */
-       if (tp->hw_status->status & SD_STATUS_UPDATED) {
-               tw32(GRC_LOCAL_CTRL,
-                    tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
-       } else {
-               tw32(HOSTCC_MODE, tp->coalesce_mode |
-                    (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
-       }
+       if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
+               /* All of this garbage is because when using non-tagged
+                * IRQ status the mailbox/status_block protocol the chip
+                * uses with the cpu is race prone.
+                */
+               if (tp->hw_status->status & SD_STATUS_UPDATED) {
+                       tw32(GRC_LOCAL_CTRL,
+                            tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+               } else {
+                       tw32(HOSTCC_MODE, tp->coalesce_mode |
+                            (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
+               }
 
-       if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
-               tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
-               spin_unlock(&tp->tx_lock);
-               spin_unlock_irqrestore(&tp->lock, flags);
-               schedule_work(&tp->reset_task);
-               return;
+               if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+                       tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
+                       spin_unlock(&tp->tx_lock);
+                       spin_unlock_irqrestore(&tp->lock, flags);
+                       schedule_work(&tp->reset_task);
+                       return;
+               }
        }
 
-       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
-               tg3_periodic_fetch_stats(tp);
-
        /* This part only runs once per second. */
        if (!--tp->timer_counter) {
+               if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+                       tg3_periodic_fetch_stats(tp);
+
                if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
                        u32 mac_stat;
                        int phy_event;
@@ -5825,6 +5904,9 @@ static int tg3_test_interrupt(struct tg3 *tp)
        int err, i;
        u32 int_mbox = 0;
 
+       if (!netif_running(dev))
+               return -ENODEV;
+
        tg3_disable_ints(tp);
 
        free_irq(tp->pdev->irq, dev);
@@ -5853,9 +5935,13 @@ static int tg3_test_interrupt(struct tg3 *tp)
        if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
                err = request_irq(tp->pdev->irq, tg3_msi,
                                  SA_SAMPLE_RANDOM, dev->name, dev);
-       else
-               err = request_irq(tp->pdev->irq, tg3_interrupt,
+       else {
+               irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
+               if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+                       fn = tg3_interrupt_tagged;
+               err = request_irq(tp->pdev->irq, fn,
                                  SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+       }
 
        if (err)
                return err;
@@ -5907,9 +5993,14 @@ static int tg3_test_msi(struct tg3 *tp)
 
        tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
 
-       err = request_irq(tp->pdev->irq, tg3_interrupt,
-                         SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+       {
+               irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
+               if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+                       fn = tg3_interrupt_tagged;
 
+               err = request_irq(tp->pdev->irq, fn,
+                                 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+       }
        if (err)
                return err;
 
@@ -5919,7 +6010,7 @@ static int tg3_test_msi(struct tg3 *tp)
        spin_lock_irq(&tp->lock);
        spin_lock(&tp->tx_lock);
 
-       tg3_halt(tp, 1);
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
        err = tg3_init_hw(tp);
 
        spin_unlock(&tp->tx_lock);
@@ -5955,7 +6046,13 @@ static int tg3_open(struct net_device *dev)
        if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
            (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
            (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
-               if (pci_enable_msi(tp->pdev) == 0) {
+               /* All MSI supporting chips should support tagged
+                * status.  Assert that this is the case.
+                */
+               if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
+                       printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
+                              "Not using MSI.\n", tp->dev->name);
+               } else if (pci_enable_msi(tp->pdev) == 0) {
                        u32 msi_mode;
 
                        msi_mode = tr32(MSGINT_MODE);
@@ -5966,9 +6063,14 @@ static int tg3_open(struct net_device *dev)
        if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
                err = request_irq(tp->pdev->irq, tg3_msi,
                                  SA_SAMPLE_RANDOM, dev->name, dev);
-       else
-               err = request_irq(tp->pdev->irq, tg3_interrupt,
+       else {
+               irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
+               if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+                       fn = tg3_interrupt_tagged;
+
+               err = request_irq(tp->pdev->irq, fn,
                                  SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+       }
 
        if (err) {
                if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -5984,12 +6086,19 @@ static int tg3_open(struct net_device *dev)
 
        err = tg3_init_hw(tp);
        if (err) {
-               tg3_halt(tp, 1);
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
                tg3_free_rings(tp);
        } else {
-               tp->timer_offset = HZ / 10;
-               tp->timer_counter = tp->timer_multiplier = 10;
-               tp->asf_counter = tp->asf_multiplier = (10 * 120);
+               if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+                       tp->timer_offset = HZ;
+               else
+                       tp->timer_offset = HZ / 10;
+
+               BUG_ON(tp->timer_offset > HZ);
+               tp->timer_counter = tp->timer_multiplier =
+                       (HZ / tp->timer_offset);
+               tp->asf_counter = tp->asf_multiplier =
+                       ((HZ / tp->timer_offset) * 120);
 
                init_timer(&tp->timer);
                tp->timer.expires = jiffies + tp->timer_offset;
@@ -6012,6 +6121,7 @@ static int tg3_open(struct net_device *dev)
 
        if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
                err = tg3_test_msi(tp);
+
                if (err) {
                        spin_lock_irq(&tp->lock);
                        spin_lock(&tp->tx_lock);
@@ -6020,7 +6130,7 @@ static int tg3_open(struct net_device *dev)
                                pci_disable_msi(tp->pdev);
                                tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
                        }
-                       tg3_halt(tp, 1);
+                       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
                        tg3_free_rings(tp);
                        tg3_free_consistent(tp);
 
@@ -6293,7 +6403,7 @@ static int tg3_close(struct net_device *dev)
 
        tg3_disable_ints(tp);
 
-       tg3_halt(tp, 1);
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
        tg3_free_rings(tp);
        tp->tg3_flags &=
                ~(TG3_FLAG_INIT_COMPLETE |
@@ -7013,7 +7123,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
        tp->tx_pending = ering->tx_pending;
 
        if (netif_running(dev)) {
-               tg3_halt(tp, 1);
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
                tg3_init_hw(tp);
                tg3_netif_start(tp);
        }
@@ -7056,7 +7166,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
                tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
 
        if (netif_running(dev)) {
-               tg3_halt(tp, 1);
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
                tg3_init_hw(tp);
                tg3_netif_start(tp);
        }
@@ -7115,12 +7225,20 @@ static int tg3_get_stats_count (struct net_device *dev)
        return TG3_NUM_STATS;
 }
 
+static int tg3_get_test_count (struct net_device *dev)
+{
+       return TG3_NUM_TEST;
+}
+
 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
 {
        switch (stringset) {
        case ETH_SS_STATS:
                memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
                break;
+       case ETH_SS_TEST:
+               memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
+               break;
        default:
                WARN_ON(1);     /* we need a WARN() */
                break;
@@ -7134,6 +7252,516 @@ static void tg3_get_ethtool_stats (struct net_device *dev,
        memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
 }
 
+#define NVRAM_TEST_SIZE 0x100
+
+static int tg3_test_nvram(struct tg3 *tp)
+{
+       u32 *buf, csum;
+       int i, j, err = 0;
+
+       buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
+               u32 val;
+
+               if ((err = tg3_nvram_read(tp, i, &val)) != 0)
+                       break;
+               buf[j] = cpu_to_le32(val);
+       }
+       if (i < NVRAM_TEST_SIZE)
+               goto out;
+
+       err = -EIO;
+       if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
+               goto out;
+
+       /* Bootstrap checksum at offset 0x10 */
+       csum = calc_crc((unsigned char *) buf, 0x10);
+       if(csum != cpu_to_le32(buf[0x10/4]))
+               goto out;
+
+       /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
+       csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
+       if (csum != cpu_to_le32(buf[0xfc/4]))
+                goto out;
+
+       err = 0;
+
+out:
+       kfree(buf);
+       return err;
+}
+
+#define TG3_SERDES_TIMEOUT_SEC 2
+#define TG3_COPPER_TIMEOUT_SEC 6
+
+static int tg3_test_link(struct tg3 *tp)
+{
+       int i, max;
+
+       if (!netif_running(tp->dev))
+               return -ENODEV;
+
+       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+               max = TG3_SERDES_TIMEOUT_SEC;
+       else
+               max = TG3_COPPER_TIMEOUT_SEC;
+
+       for (i = 0; i < max; i++) {
+               if (netif_carrier_ok(tp->dev))
+                       return 0;
+
+               if (msleep_interruptible(1000))
+                       break;
+       }
+
+       return -EIO;
+}
+
+/* Only test the commonly used registers */
+static int tg3_test_registers(struct tg3 *tp)
+{
+       int i, is_5705;
+       u32 offset, read_mask, write_mask, val, save_val, read_val;
+       static struct {
+               u16 offset;
+               u16 flags;
+#define TG3_FL_5705    0x1
+#define TG3_FL_NOT_5705        0x2
+#define TG3_FL_NOT_5788        0x4
+               u32 read_mask;
+               u32 write_mask;
+       } reg_tbl[] = {
+               /* MAC Control Registers */
+               { MAC_MODE, TG3_FL_NOT_5705,
+                       0x00000000, 0x00ef6f8c },
+               { MAC_MODE, TG3_FL_5705,
+                       0x00000000, 0x01ef6b8c },
+               { MAC_STATUS, TG3_FL_NOT_5705,
+                       0x03800107, 0x00000000 },
+               { MAC_STATUS, TG3_FL_5705,
+                       0x03800100, 0x00000000 },
+               { MAC_ADDR_0_HIGH, 0x0000,
+                       0x00000000, 0x0000ffff },
+               { MAC_ADDR_0_LOW, 0x0000,
+                       0x00000000, 0xffffffff },
+               { MAC_RX_MTU_SIZE, 0x0000,
+                       0x00000000, 0x0000ffff },
+               { MAC_TX_MODE, 0x0000,
+                       0x00000000, 0x00000070 },
+               { MAC_TX_LENGTHS, 0x0000,
+                       0x00000000, 0x00003fff },
+               { MAC_RX_MODE, TG3_FL_NOT_5705,
+                       0x00000000, 0x000007fc },
+               { MAC_RX_MODE, TG3_FL_5705,
+                       0x00000000, 0x000007dc },
+               { MAC_HASH_REG_0, 0x0000,
+                       0x00000000, 0xffffffff },
+               { MAC_HASH_REG_1, 0x0000,
+                       0x00000000, 0xffffffff },
+               { MAC_HASH_REG_2, 0x0000,
+                       0x00000000, 0xffffffff },
+               { MAC_HASH_REG_3, 0x0000,
+                       0x00000000, 0xffffffff },
+
+               /* Receive Data and Receive BD Initiator Control Registers. */
+               { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
+                       0x00000000, 0x00000003 },
+               { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { RCVDBDI_STD_BD+0, 0x0000,
+                       0x00000000, 0xffffffff },
+               { RCVDBDI_STD_BD+4, 0x0000,
+                       0x00000000, 0xffffffff },
+               { RCVDBDI_STD_BD+8, 0x0000,
+                       0x00000000, 0xffff0002 },
+               { RCVDBDI_STD_BD+0xc, 0x0000,
+                       0x00000000, 0xffffffff },
+       
+               /* Receive BD Initiator Control Registers. */
+               { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { RCVBDI_STD_THRESH, TG3_FL_5705,
+                       0x00000000, 0x000003ff },
+               { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+       
+               /* Host Coalescing Control Registers. */
+               { HOSTCC_MODE, TG3_FL_NOT_5705,
+                       0x00000000, 0x00000004 },
+               { HOSTCC_MODE, TG3_FL_5705,
+                       0x00000000, 0x000000f6 },
+               { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
+                       0x00000000, 0x000003ff },
+               { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
+                       0x00000000, 0x000003ff },
+               { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+                       0x00000000, 0x000000ff },
+               { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+                       0x00000000, 0x000000ff },
+               { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+                       0x00000000, 0x000000ff },
+               { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+                       0x00000000, 0x000000ff },
+               { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
+                       0xffffffff, 0x00000000 },
+               { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
+                       0xffffffff, 0x00000000 },
+
+               /* Buffer Manager Control Registers. */
+               { BUFMGR_MB_POOL_ADDR, 0x0000,
+                       0x00000000, 0x007fff80 },
+               { BUFMGR_MB_POOL_SIZE, 0x0000,
+                       0x00000000, 0x007fffff },
+               { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
+                       0x00000000, 0x0000003f },
+               { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
+                       0x00000000, 0x000001ff },
+               { BUFMGR_MB_HIGH_WATER, 0x0000,
+                       0x00000000, 0x000001ff },
+               { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
+                       0xffffffff, 0x00000000 },
+               { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
+                       0xffffffff, 0x00000000 },
+       
+               /* Mailbox Registers */
+               { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
+                       0x00000000, 0x000001ff },
+               { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
+                       0x00000000, 0x000001ff },
+               { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
+                       0x00000000, 0x000007ff },
+               { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
+                       0x00000000, 0x000001ff },
+
+               { 0xffff, 0x0000, 0x00000000, 0x00000000 },
+       };
+
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+               is_5705 = 1;
+       else
+               is_5705 = 0;
+
+       for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+               if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
+                       continue;
+
+               if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
+                       continue;
+
+               if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
+                   (reg_tbl[i].flags & TG3_FL_NOT_5788))
+                       continue;
+
+               offset = (u32) reg_tbl[i].offset;
+               read_mask = reg_tbl[i].read_mask;
+               write_mask = reg_tbl[i].write_mask;
+
+               /* Save the original register content */
+               save_val = tr32(offset);
+
+               /* Determine the read-only value. */
+               read_val = save_val & read_mask;
+
+               /* Write zero to the register, then make sure the read-only bits
+                * are not changed and the read/write bits are all zeros.
+                */
+               tw32(offset, 0);
+
+               val = tr32(offset);
+
+               /* Test the read-only and read/write bits. */
+               if (((val & read_mask) != read_val) || (val & write_mask))
+                       goto out;
+
+               /* Write ones to all the bits defined by RdMask and WrMask, then
+                * make sure the read-only bits are not changed and the
+                * read/write bits are all ones.
+                */
+               tw32(offset, read_mask | write_mask);
+
+               val = tr32(offset);
+
+               /* Test the read-only bits. */
+               if ((val & read_mask) != read_val)
+                       goto out;
+
+               /* Test the read/write bits. */
+               if ((val & write_mask) != write_mask)
+                       goto out;
+
+               tw32(offset, save_val);
+       }
+
+       return 0;
+
+out:
+       printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
+       tw32(offset, save_val);
+       return -EIO;
+}
+
+static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
+{
+       static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
+       int i;
+       u32 j;
+
+       for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
+               for (j = 0; j < len; j += 4) {
+                       u32 val;
+
+                       tg3_write_mem(tp, offset + j, test_pattern[i]);
+                       tg3_read_mem(tp, offset + j, &val);
+                       if (val != test_pattern[i])
+                               return -EIO;
+               }
+       }
+       return 0;
+}
+
+static int tg3_test_memory(struct tg3 *tp)
+{
+       static struct mem_entry {
+               u32 offset;
+               u32 len;
+       } mem_tbl_570x[] = {
+               { 0x00000000, 0x01000},
+               { 0x00002000, 0x1c000},
+               { 0xffffffff, 0x00000}
+       }, mem_tbl_5705[] = {
+               { 0x00000100, 0x0000c},
+               { 0x00000200, 0x00008},
+               { 0x00000b50, 0x00400},
+               { 0x00004000, 0x00800},
+               { 0x00006000, 0x01000},
+               { 0x00008000, 0x02000},
+               { 0x00010000, 0x0e000},
+               { 0xffffffff, 0x00000}
+       };
+       struct mem_entry *mem_tbl;
+       int err = 0;
+       int i;
+
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+               mem_tbl = mem_tbl_5705;
+       else
+               mem_tbl = mem_tbl_570x;
+
+       for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+               if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
+                   mem_tbl[i].len)) != 0)
+                       break;
+       }
+       
+       return err;
+}
+
+static int tg3_test_loopback(struct tg3 *tp)
+{
+       u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
+       u32 desc_idx;
+       struct sk_buff *skb, *rx_skb;
+       u8 *tx_data;
+       dma_addr_t map;
+       int num_pkts, tx_len, rx_len, i, err;
+       struct tg3_rx_buffer_desc *desc;
+
+       if (!netif_running(tp->dev))
+               return -ENODEV;
+
+       err = -EIO;
+
+       tg3_abort_hw(tp, 1);
+
+       /* Clearing this flag to keep interrupts disabled */
+       tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+       tg3_reset_hw(tp);
+
+       mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
+                  MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
+                  MAC_MODE_PORT_MODE_GMII;
+       tw32(MAC_MODE, mac_mode);
+
+       tx_len = 1514;
+       skb = dev_alloc_skb(tx_len);
+       tx_data = skb_put(skb, tx_len);
+       memcpy(tx_data, tp->dev->dev_addr, 6);
+       memset(tx_data + 6, 0x0, 8);
+
+       tw32(MAC_RX_MTU_SIZE, tx_len + 4);
+
+       for (i = 14; i < tx_len; i++)
+               tx_data[i] = (u8) (i & 0xff);
+
+       map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
+
+       tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+            HOSTCC_MODE_NOW);
+
+       udelay(10);
+
+       rx_start_idx = tp->hw_status->idx[0].rx_producer;
+
+       send_idx = 0;
+       num_pkts = 0;
+
+       tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
+
+       send_idx++;
+       num_pkts++;
+
+       tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
+       tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
+
+       udelay(10);
+
+       for (i = 0; i < 10; i++) {
+               tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+                      HOSTCC_MODE_NOW);
+
+               udelay(10);
+
+               tx_idx = tp->hw_status->idx[0].tx_consumer;
+               rx_idx = tp->hw_status->idx[0].rx_producer;
+               if ((tx_idx == send_idx) &&
+                   (rx_idx == (rx_start_idx + num_pkts)))
+                       break;
+       }
+
+       pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
+       dev_kfree_skb(skb);
+
+       if (tx_idx != send_idx)
+               goto out;
+
+       if (rx_idx != rx_start_idx + num_pkts)
+               goto out;
+
+       desc = &tp->rx_rcb[rx_start_idx];
+       desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+       opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+       if (opaque_key != RXD_OPAQUE_RING_STD)
+               goto out;
+
+       if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+           (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
+               goto out;
+
+       rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
+       if (rx_len != tx_len)
+               goto out;
+
+       rx_skb = tp->rx_std_buffers[desc_idx].skb;
+
+       map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
+       pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
+
+       for (i = 14; i < tx_len; i++) {
+               if (*(rx_skb->data + i) != (u8) (i & 0xff))
+                       goto out;
+       }
+       err = 0;
+       
+       /* tg3_free_rings will unmap and free the rx_skb */
+out:
+       return err;
+}
+
+static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
+                         u64 *data)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
+
+       if (tg3_test_nvram(tp) != 0) {
+               etest->flags |= ETH_TEST_FL_FAILED;
+               data[0] = 1;
+       }
+       if (tg3_test_link(tp) != 0) {
+               etest->flags |= ETH_TEST_FL_FAILED;
+               data[1] = 1;
+       }
+       if (etest->flags & ETH_TEST_FL_OFFLINE) {
+               if (netif_running(dev))
+                       tg3_netif_stop(tp);
+
+               spin_lock_irq(&tp->lock);
+               spin_lock(&tp->tx_lock);
+
+               tg3_halt(tp, RESET_KIND_SUSPEND, 1);
+               tg3_nvram_lock(tp);
+               tg3_halt_cpu(tp, RX_CPU_BASE);
+               if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+                       tg3_halt_cpu(tp, TX_CPU_BASE);
+               tg3_nvram_unlock(tp);
+
+               if (tg3_test_registers(tp) != 0) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       data[2] = 1;
+               }
+               if (tg3_test_memory(tp) != 0) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       data[3] = 1;
+               }
+               if (tg3_test_loopback(tp) != 0) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       data[4] = 1;
+               }
+
+               spin_unlock(&tp->tx_lock);
+               spin_unlock_irq(&tp->lock);
+               if (tg3_test_interrupt(tp) != 0) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       data[5] = 1;
+               }
+               spin_lock_irq(&tp->lock);
+               spin_lock(&tp->tx_lock);
+
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+               if (netif_running(dev)) {
+                       tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+                       tg3_init_hw(tp);
+                       tg3_netif_start(tp);
+               }
+               spin_unlock(&tp->tx_lock);
+               spin_unlock_irq(&tp->lock);
+       }
+}
+
 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        struct mii_ioctl_data *data = if_mii(ifr);
@@ -7210,6 +7838,14 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 }
 #endif
 
+static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       memcpy(ec, &tp->coal, sizeof(*ec));
+       return 0;
+}
+
 static struct ethtool_ops tg3_ethtool_ops = {
        .get_settings           = tg3_get_settings,
        .set_settings           = tg3_set_settings,
@@ -7239,9 +7875,12 @@ static struct ethtool_ops tg3_ethtool_ops = {
        .get_tso                = ethtool_op_get_tso,
        .set_tso                = tg3_set_tso,
 #endif
+       .self_test_count        = tg3_get_test_count,
+       .self_test              = tg3_self_test,
        .get_strings            = tg3_get_strings,
        .get_stats_count        = tg3_get_stats_count,
        .get_ethtool_stats      = tg3_get_ethtool_stats,
+       .get_coalesce           = tg3_get_coalesce,
 };
 
 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -7921,6 +8560,16 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
 
                case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
                        tp->led_ctrl = LED_CTRL_MODE_MAC;
+
+                       /* Default to PHY_1_MODE if 0 (MAC_MODE) is
+                        * read on some older 5700/5701 bootcode.
+                        */
+                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+                           ASIC_REV_5700 ||
+                           GET_ASIC_REV(tp->pci_chip_rev_id) ==
+                           ASIC_REV_5701)
+                               tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
                        break;
 
                case SHASTA_EXT_LED_SHARED:
@@ -8429,15 +9078,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
                tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
 
-       /* Only 5701 and later support tagged irq status mode.
-        * Also, 5788 chips cannot use tagged irq status.
-        *
-        * However, since we are using NAPI avoid tagged irq status
-        * because the interrupt condition is more difficult to
-        * fully clear in that mode.
-        */
        tp->coalesce_mode = 0;
-
        if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
            GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
                tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@@ -8501,6 +9142,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
                tp->tg3_flags2 |= TG3_FLG2_IS_5788;
 
+       if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
+           (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
+               tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
+       if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
+               tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
+                                     HOSTCC_MODE_CLRTICK_TXBD);
+
+               tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
+               pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+                                      tp->misc_host_ctrl);
+       }
+
        /* these are limited to 10/100 only */
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -8678,6 +9331,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
        return 0;
 }
 
+#define BOUNDARY_SINGLE_CACHELINE      1
+#define BOUNDARY_MULTI_CACHELINE       2
+
+static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
+{
+       int cacheline_size;
+       u8 byte;
+       int goal;
+
+       pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
+       if (byte == 0)
+               cacheline_size = 1024;
+       else
+               cacheline_size = (int) byte * 4;
+
+       /* On 5703 and later chips, the boundary bits have no
+        * effect.
+        */
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+           !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+               goto out;
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
+       goal = BOUNDARY_MULTI_CACHELINE;
+#else
+#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
+       goal = BOUNDARY_SINGLE_CACHELINE;
+#else
+       goal = 0;
+#endif
+#endif
+
+       if (!goal)
+               goto out;
+
+       /* PCI controllers on most RISC systems tend to disconnect
+        * when a device tries to burst across a cache-line boundary.
+        * Therefore, letting tg3 do so just wastes PCI bandwidth.
+        *
+        * Unfortunately, for PCI-E there are only limited
+        * write-side controls for this, and thus for reads
+        * we will still get the disconnects.  We'll also waste
+        * these PCI cycles for both read and write for chips
+        * other than 5700 and 5701 which do not implement the
+        * boundary bits.
+        */
+       if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
+           !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
+               switch (cacheline_size) {
+               case 16:
+               case 32:
+               case 64:
+               case 128:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
+                                       DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
+                       } else {
+                               val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+                                       DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+                       }
+                       break;
+
+               case 256:
+                       val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
+                               DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
+                       break;
+
+               default:
+                       val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+                               DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+                       break;
+               };
+       } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+               switch (cacheline_size) {
+               case 16:
+               case 32:
+               case 64:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+                               val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
+                               break;
+                       }
+                       /* fallthrough */
+               case 128:
+               default:
+                       val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+                       val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
+                       break;
+               };
+       } else {
+               switch (cacheline_size) {
+               case 16:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val |= (DMA_RWCTRL_READ_BNDRY_16 |
+                                       DMA_RWCTRL_WRITE_BNDRY_16);
+                               break;
+                       }
+                       /* fallthrough */
+               case 32:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val |= (DMA_RWCTRL_READ_BNDRY_32 |
+                                       DMA_RWCTRL_WRITE_BNDRY_32);
+                               break;
+                       }
+                       /* fallthrough */
+               case 64:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val |= (DMA_RWCTRL_READ_BNDRY_64 |
+                                       DMA_RWCTRL_WRITE_BNDRY_64);
+                               break;
+                       }
+                       /* fallthrough */
+               case 128:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val |= (DMA_RWCTRL_READ_BNDRY_128 |
+                                       DMA_RWCTRL_WRITE_BNDRY_128);
+                               break;
+                       }
+                       /* fallthrough */
+               case 256:
+                       val |= (DMA_RWCTRL_READ_BNDRY_256 |
+                               DMA_RWCTRL_WRITE_BNDRY_256);
+                       break;
+               case 512:
+                       val |= (DMA_RWCTRL_READ_BNDRY_512 |
+                               DMA_RWCTRL_WRITE_BNDRY_512);
+                       break;
+               case 1024:
+               default:
+                       val |= (DMA_RWCTRL_READ_BNDRY_1024 |
+                               DMA_RWCTRL_WRITE_BNDRY_1024);
+                       break;
+               };
+       }
+
+out:
+       return val;
+}
+
 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
 {
        struct tg3_internal_buffer_desc test_desc;
@@ -8759,12 +9552,12 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
        return ret;
 }
 
-#define TEST_BUFFER_SIZE       0x400
+#define TEST_BUFFER_SIZE       0x2000
 
 static int __devinit tg3_test_dma(struct tg3 *tp)
 {
        dma_addr_t buf_dma;
-       u32 *buf;
+       u32 *buf, saved_dma_rwctrl;
        int ret;
 
        buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@@ -8776,46 +9569,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
        tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
                          (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
 
-#ifndef CONFIG_X86
-       {
-               u8 byte;
-               int cacheline_size;
-               pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
-
-               if (byte == 0)
-                       cacheline_size = 1024;
-               else
-                       cacheline_size = (int) byte * 4;
-
-               switch (cacheline_size) {
-               case 16:
-               case 32:
-               case 64:
-               case 128:
-                       if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
-                           !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
-                               tp->dma_rwctrl |=
-                                       DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
-                               break;
-                       } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
-                               tp->dma_rwctrl &=
-                                       ~(DMA_RWCTRL_PCI_WRITE_CMD);
-                               tp->dma_rwctrl |=
-                                       DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
-                               break;
-                       }
-                       /* fallthrough */
-               case 256:
-                       if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
-                           !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
-                               tp->dma_rwctrl |=
-                                       DMA_RWCTRL_WRITE_BNDRY_256;
-                       else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
-                               tp->dma_rwctrl |=
-                                       DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
-               };
-       }
-#endif
+       tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
 
        if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
                /* DMA read watermark not used on PCIE */
@@ -8834,7 +9588,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
                        if (ccval == 0x6 || ccval == 0x7)
                                tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
 
-                       /* Set bit 23 to renable PCIX hw bug fix */
+                       /* Set bit 23 to enable PCIX hw bug fix */
                        tp->dma_rwctrl |= 0x009f0000;
                } else {
                        tp->dma_rwctrl |= 0x001b000f;
@@ -8875,6 +9629,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
                goto out;
 
+       /* It is best to perform DMA test with maximum write burst size
+        * to expose the 5700/5701 write DMA bug.
+        */
+       saved_dma_rwctrl = tp->dma_rwctrl;
+       tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+       tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
        while (1) {
                u32 *p = buf, i;
 
@@ -8913,8 +9674,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
                        if (p[i] == i)
                                continue;
 
-                       if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
-                           DMA_RWCTRL_WRITE_BNDRY_DISAB) {
+                       if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+                           DMA_RWCTRL_WRITE_BNDRY_16) {
+                               tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
                                tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
                                tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
                                break;
@@ -8931,6 +9693,28 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
                        break;
                }
        }
+       if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+           DMA_RWCTRL_WRITE_BNDRY_16) {
+               static struct pci_device_id dma_wait_state_chipsets[] = {
+                       { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
+                                    PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
+                       { },
+               };
+
+               /* DMA test passed without adjusting DMA boundary,
+                * now look for chipsets that are known to expose the
+                * DMA bug without failing the test.
+                */
+               if (pci_dev_present(dma_wait_state_chipsets)) {
+                       tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+                       tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+               }
+               else
+                       /* Safe to use the calculated DMA boundary. */
+                       tp->dma_rwctrl = saved_dma_rwctrl;
+
+               tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+       }
 
 out:
        pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@@ -9018,6 +9802,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
        return peer;
 }
 
+static void __devinit tg3_init_coal(struct tg3 *tp)
+{
+       struct ethtool_coalesce *ec = &tp->coal;
+
+       memset(ec, 0, sizeof(*ec));
+       ec->cmd = ETHTOOL_GCOALESCE;
+       ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
+       ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
+       ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
+       ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
+       ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
+       ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
+       ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
+       ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
+       ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
+
+       if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
+                                HOSTCC_MODE_CLRTICK_TXBD)) {
+               ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
+               ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
+               ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
+               ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
+       }
+}
+
 static int __devinit tg3_init_one(struct pci_dev *pdev,
                                  const struct pci_device_id *ent)
 {
@@ -9239,7 +10048,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
            (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
                pci_save_state(tp->pdev);
                tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
-               tg3_halt(tp, 1);
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
        }
 
        err = tg3_test_dma(tp);
@@ -9263,6 +10072,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
        /* flow control autonegotiation is default behavior */
        tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
 
+       tg3_init_coal(tp);
+
        err = register_netdev(dev);
        if (err) {
                printk(KERN_ERR PFX "Cannot register net device, "
@@ -9305,6 +10116,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
               (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
               (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
               (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
+       printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
+              dev->name, tp->dma_rwctrl);
 
        return 0;
 
@@ -9362,7 +10175,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
 
        spin_lock_irq(&tp->lock);
        spin_lock(&tp->tx_lock);
-       tg3_halt(tp, 1);
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
        spin_unlock(&tp->tx_lock);
        spin_unlock_irq(&tp->lock);