[IFB]: Fix crash on input device removal
[safe/jmp/linux-2.6] / drivers / net / forcedeth.c
index fd91071..d04214e 100644 (file)
  *     0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
  *     0.58: 30 Oct 2006: Added support for sideband management unit.
  *     0.59: 30 Oct 2006: Added support for recoverable error.
+ *     0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
  *
  * Known bugs:
  * We suspect that on some hardware no TX done interrupts are generated.
 #else
 #define DRIVERNAPI
 #endif
-#define FORCEDETH_VERSION              "0.59"
+#define FORCEDETH_VERSION              "0.60"
 #define DRV_NAME                       "forcedeth"
 
 #include <linux/module.h>
 #define DEV_HAS_MSI_X           0x0080  /* device supports MSI-X */
 #define DEV_HAS_POWER_CNTRL     0x0100  /* device supports power savings */
 #define DEV_HAS_PAUSEFRAME_TX   0x0200  /* device supports tx pause frames */
-#define DEV_HAS_STATISTICS      0x0400  /* device supports hw statistics */
-#define DEV_HAS_TEST_EXTENDED   0x0800  /* device supports extended diagnostic test */
-#define DEV_HAS_MGMT_UNIT       0x1000  /* device supports management unit */
+#define DEV_HAS_STATISTICS_V1   0x0400  /* device supports hw statistics version 1 */
+#define DEV_HAS_STATISTICS_V2   0x0800  /* device supports hw statistics version 2 */
+#define DEV_HAS_TEST_EXTENDED   0x1000  /* device supports extended diagnostic test */
+#define DEV_HAS_MGMT_UNIT       0x2000  /* device supports management unit */
 
 enum {
        NvRegIrqStatus = 0x000,
@@ -210,7 +212,7 @@ enum {
  * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
  */
        NvRegPollingInterval = 0x00c,
-#define NVREG_POLL_DEFAULT_THROUGHPUT  970
+#define NVREG_POLL_DEFAULT_THROUGHPUT  970 /* backup tx cleanup if loop max reached */
 #define NVREG_POLL_DEFAULT_CPU 13
        NvRegMSIMap0 = 0x020,
        NvRegMSIMap1 = 0x024,
@@ -487,7 +489,8 @@ union ring_type {
 
 /* Miscelaneous hardware related defines: */
 #define NV_PCI_REGSZ_VER1              0x270
-#define NV_PCI_REGSZ_VER2              0x604
+#define NV_PCI_REGSZ_VER2              0x2d4
+#define NV_PCI_REGSZ_VER3              0x604
 
 /* various timeout delays: all in usec */
 #define NV_TXRX_RESET_DELAY    4
@@ -605,9 +608,6 @@ static const struct nv_ethtool_str nv_estats_str[] = {
        { "tx_carrier_errors" },
        { "tx_excess_deferral" },
        { "tx_retry_error" },
-       { "tx_deferral" },
-       { "tx_packets" },
-       { "tx_pause" },
        { "rx_frame_error" },
        { "rx_extra_byte" },
        { "rx_late_collision" },
@@ -620,11 +620,17 @@ static const struct nv_ethtool_str nv_estats_str[] = {
        { "rx_unicast" },
        { "rx_multicast" },
        { "rx_broadcast" },
+       { "rx_packets" },
+       { "rx_errors_total" },
+       { "tx_errors_total" },
+
+       /* version 2 stats */
+       { "tx_deferral" },
+       { "tx_packets" },
        { "rx_bytes" },
+       { "tx_pause" },
        { "rx_pause" },
-       { "rx_drop_frame" },
-       { "rx_packets" },
-       { "rx_errors_total" }
+       { "rx_drop_frame" }
 };
 
 struct nv_ethtool_stats {
@@ -637,9 +643,6 @@ struct nv_ethtool_stats {
        u64 tx_carrier_errors;
        u64 tx_excess_deferral;
        u64 tx_retry_error;
-       u64 tx_deferral;
-       u64 tx_packets;
-       u64 tx_pause;
        u64 rx_frame_error;
        u64 rx_extra_byte;
        u64 rx_late_collision;
@@ -652,13 +655,22 @@ struct nv_ethtool_stats {
        u64 rx_unicast;
        u64 rx_multicast;
        u64 rx_broadcast;
+       u64 rx_packets;
+       u64 rx_errors_total;
+       u64 tx_errors_total;
+
+       /* version 2 stats */
+       u64 tx_deferral;
+       u64 tx_packets;
        u64 rx_bytes;
+       u64 tx_pause;
        u64 rx_pause;
        u64 rx_drop_frame;
-       u64 rx_packets;
-       u64 rx_errors_total;
 };
 
+#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
+#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
+
 /* diagnostics */
 #define NV_TEST_COUNT_BASE 3
 #define NV_TEST_COUNT_EXTENDED 4
@@ -827,7 +839,7 @@ enum {
        NV_MSIX_INT_DISABLED,
        NV_MSIX_INT_ENABLED
 };
-static int msix = NV_MSIX_INT_ENABLED;
+static int msix = NV_MSIX_INT_DISABLED;
 
 /*
  * DMA 64bit
@@ -1275,6 +1287,61 @@ static void nv_mac_reset(struct net_device *dev)
        pci_push(base);
 }
 
+static void nv_get_hw_stats(struct net_device *dev)
+{
+       struct fe_priv *np = netdev_priv(dev);
+       u8 __iomem *base = get_hwbase(dev);
+
+       np->estats.tx_bytes += readl(base + NvRegTxCnt);
+       np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
+       np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
+       np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
+       np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
+       np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
+       np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
+       np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
+       np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
+       np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
+       np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
+       np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
+       np->estats.rx_runt += readl(base + NvRegRxRunt);
+       np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
+       np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
+       np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
+       np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
+       np->estats.rx_length_error += readl(base + NvRegRxLenErr);
+       np->estats.rx_unicast += readl(base + NvRegRxUnicast);
+       np->estats.rx_multicast += readl(base + NvRegRxMulticast);
+       np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
+       np->estats.rx_packets =
+               np->estats.rx_unicast +
+               np->estats.rx_multicast +
+               np->estats.rx_broadcast;
+       np->estats.rx_errors_total =
+               np->estats.rx_crc_errors +
+               np->estats.rx_over_errors +
+               np->estats.rx_frame_error +
+               (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
+               np->estats.rx_late_collision +
+               np->estats.rx_runt +
+               np->estats.rx_frame_too_long;
+       np->estats.tx_errors_total =
+               np->estats.tx_late_collision +
+               np->estats.tx_fifo_errors +
+               np->estats.tx_carrier_errors +
+               np->estats.tx_excess_deferral +
+               np->estats.tx_retry_error;
+
+       if (np->driver_data & DEV_HAS_STATISTICS_V2) {
+               np->estats.tx_deferral += readl(base + NvRegTxDef);
+               np->estats.tx_packets += readl(base + NvRegTxFrame);
+               np->estats.rx_bytes += readl(base + NvRegRxCnt);
+               np->estats.tx_pause += readl(base + NvRegTxPause);
+               np->estats.rx_pause += readl(base + NvRegRxPause);
+               np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
+       }
+}
+
 /*
  * nv_get_stats: dev->get_stats function
  * Get latest stats value from the nic.
@@ -1285,10 +1352,19 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
 
-       /* It seems that the nic always generates interrupts and doesn't
-        * accumulate errors internally. Thus the current values in np->stats
-        * are already up to date.
-        */
+       /* If the nic supports hw counters then retrieve latest values */
+       if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
+               nv_get_hw_stats(dev);
+
+               /* copy to net_device stats */
+               np->stats.tx_bytes = np->estats.tx_bytes;
+               np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
+               np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
+               np->stats.rx_crc_errors = np->estats.rx_crc_errors;
+               np->stats.rx_over_errors = np->estats.rx_over_errors;
+               np->stats.rx_errors = np->estats.rx_errors_total;
+               np->stats.tx_errors = np->estats.tx_errors_total;
+       }
        return &np->stats;
 }
 
@@ -1859,14 +1935,15 @@ static void nv_tx_done(struct net_device *dev)
        }
 }
 
-static void nv_tx_done_optimized(struct net_device *dev)
+static void nv_tx_done_optimized(struct net_device *dev, int limit)
 {
        struct fe_priv *np = netdev_priv(dev);
        u32 flags;
        struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
 
        while ((np->get_tx.ex != np->put_tx.ex) &&
-              !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID)) {
+              !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
+              (limit-- > 0)) {
 
                dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
                                        dev->name, flags);
@@ -1877,16 +1954,8 @@ static void nv_tx_done_optimized(struct net_device *dev)
                np->get_tx_ctx->dma = 0;
 
                if (flags & NV_TX2_LASTPACKET) {
-                       if (flags & NV_TX2_ERROR) {
-                               if (flags & NV_TX2_UNDERFLOW)
-                                       np->stats.tx_fifo_errors++;
-                               if (flags & NV_TX2_CARRIERLOST)
-                                       np->stats.tx_carrier_errors++;
-                               np->stats.tx_errors++;
-                       } else {
+                       if (!(flags & NV_TX2_ERROR))
                                np->stats.tx_packets++;
-                               np->stats.tx_bytes += np->get_tx_ctx->skb->len;
-                       }
                        dev_kfree_skb_any(np->get_tx_ctx->skb);
                        np->get_tx_ctx->skb = NULL;
                }
@@ -1973,7 +2042,7 @@ static void nv_tx_timeout(struct net_device *dev)
        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
                nv_tx_done(dev);
        else
-               nv_tx_done_optimized(dev);
+               nv_tx_done_optimized(dev, np->tx_ring_size);
 
        /* 3) if there are dead entries: clear everything */
        if (np->get_tx_ctx != np->put_tx_ctx) {
@@ -1981,9 +2050,10 @@ static void nv_tx_timeout(struct net_device *dev)
                nv_drain_tx(dev);
                nv_init_tx(dev);
                setup_hw_rings(dev, NV_SETUP_TX_RING);
-               netif_wake_queue(dev);
        }
 
+       netif_wake_queue(dev);
+
        /* 4) restart tx engine */
        nv_start_tx(dev);
        spin_unlock_irq(&np->lock);
@@ -2223,7 +2293,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
                                if (flags & NV_RX2_ERROR4) {
                                        len = nv_getlen(dev, skb->data, len);
                                        if (len < 0) {
-                                               np->stats.rx_errors++;
                                                dev_kfree_skb(skb);
                                                goto next_pkt;
                                        }
@@ -2236,11 +2305,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
                                }
                                /* the rest are hard errors */
                                else {
-                                       if (flags & NV_RX2_CRCERR)
-                                               np->stats.rx_crc_errors++;
-                                       if (flags & NV_RX2_OVERFLOW)
-                                               np->stats.rx_over_errors++;
-                                       np->stats.rx_errors++;
                                        dev_kfree_skb(skb);
                                        goto next_pkt;
                                }
@@ -2777,7 +2841,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
                        events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
                        writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
                }
-               pci_push(base);
                dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
                if (!(events & np->irqmask))
                        break;
@@ -2786,22 +2849,46 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
                nv_tx_done(dev);
                spin_unlock(&np->lock);
 
-               if (events & NVREG_IRQ_LINK) {
+#ifdef CONFIG_FORCEDETH_NAPI
+               if (events & NVREG_IRQ_RX_ALL) {
+                       netif_rx_schedule(dev);
+
+                       /* Disable furthur receive irq's */
+                       spin_lock(&np->lock);
+                       np->irqmask &= ~NVREG_IRQ_RX_ALL;
+
+                       if (np->msi_flags & NV_MSI_X_ENABLED)
+                               writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+                       else
+                               writel(np->irqmask, base + NvRegIrqMask);
+                       spin_unlock(&np->lock);
+               }
+#else
+               if (nv_rx_process(dev, dev->weight)) {
+                       if (unlikely(nv_alloc_rx(dev))) {
+                               spin_lock(&np->lock);
+                               if (!np->in_shutdown)
+                                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+                               spin_unlock(&np->lock);
+                       }
+               }
+#endif
+               if (unlikely(events & NVREG_IRQ_LINK)) {
                        spin_lock(&np->lock);
                        nv_link_irq(dev);
                        spin_unlock(&np->lock);
                }
-               if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
+               if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
                        spin_lock(&np->lock);
                        nv_linkchange(dev);
                        spin_unlock(&np->lock);
                        np->link_timeout = jiffies + LINK_TIMEOUT;
                }
-               if (events & (NVREG_IRQ_TX_ERR)) {
+               if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
                        dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
                                                dev->name, events);
                }
-               if (events & (NVREG_IRQ_UNKNOWN)) {
+               if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
                        printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
                                                dev->name, events);
                }
@@ -2822,30 +2909,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
                        spin_unlock(&np->lock);
                        break;
                }
-#ifdef CONFIG_FORCEDETH_NAPI
-               if (events & NVREG_IRQ_RX_ALL) {
-                       netif_rx_schedule(dev);
-
-                       /* Disable furthur receive irq's */
-                       spin_lock(&np->lock);
-                       np->irqmask &= ~NVREG_IRQ_RX_ALL;
-
-                       if (np->msi_flags & NV_MSI_X_ENABLED)
-                               writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
-                       else
-                               writel(np->irqmask, base + NvRegIrqMask);
-                       spin_unlock(&np->lock);
-               }
-#else
-               nv_rx_process(dev, dev->weight);
-               if (nv_alloc_rx(dev)) {
-                       spin_lock(&np->lock);
-                       if (!np->in_shutdown)
-                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-                       spin_unlock(&np->lock);
-               }
-#endif
-               if (i > max_interrupt_work) {
+               if (unlikely(i > max_interrupt_work)) {
                        spin_lock(&np->lock);
                        /* disable interrupts on the nic */
                        if (!(np->msi_flags & NV_MSI_X_ENABLED))
@@ -2869,6 +2933,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
        return IRQ_RETVAL(i);
 }
 
+#define TX_WORK_PER_LOOP  64
+#define RX_WORK_PER_LOOP  64
+/**
+ * All _optimized functions are used to help increase performance
+ * (reduce CPU and increase throughput). They use descripter version 3,
+ * compiler directives, and reduce memory accesses.
+ */
 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
 {
        struct net_device *dev = (struct net_device *) data;
@@ -2887,31 +2958,54 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
                        events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
                        writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
                }
-               pci_push(base);
                dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
                if (!(events & np->irqmask))
                        break;
 
                spin_lock(&np->lock);
-               nv_tx_done_optimized(dev);
+               nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
                spin_unlock(&np->lock);
 
-               if (events & NVREG_IRQ_LINK) {
+#ifdef CONFIG_FORCEDETH_NAPI
+               if (events & NVREG_IRQ_RX_ALL) {
+                       netif_rx_schedule(dev);
+
+                       /* Disable furthur receive irq's */
+                       spin_lock(&np->lock);
+                       np->irqmask &= ~NVREG_IRQ_RX_ALL;
+
+                       if (np->msi_flags & NV_MSI_X_ENABLED)
+                               writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+                       else
+                               writel(np->irqmask, base + NvRegIrqMask);
+                       spin_unlock(&np->lock);
+               }
+#else
+               if (nv_rx_process_optimized(dev, dev->weight)) {
+                       if (unlikely(nv_alloc_rx_optimized(dev))) {
+                               spin_lock(&np->lock);
+                               if (!np->in_shutdown)
+                                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+                               spin_unlock(&np->lock);
+                       }
+               }
+#endif
+               if (unlikely(events & NVREG_IRQ_LINK)) {
                        spin_lock(&np->lock);
                        nv_link_irq(dev);
                        spin_unlock(&np->lock);
                }
-               if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
+               if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
                        spin_lock(&np->lock);
                        nv_linkchange(dev);
                        spin_unlock(&np->lock);
                        np->link_timeout = jiffies + LINK_TIMEOUT;
                }
-               if (events & (NVREG_IRQ_TX_ERR)) {
+               if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
                        dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
                                                dev->name, events);
                }
-               if (events & (NVREG_IRQ_UNKNOWN)) {
+               if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
                        printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
                                                dev->name, events);
                }
@@ -2933,30 +3027,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
                        break;
                }
 
-#ifdef CONFIG_FORCEDETH_NAPI
-               if (events & NVREG_IRQ_RX_ALL) {
-                       netif_rx_schedule(dev);
-
-                       /* Disable furthur receive irq's */
-                       spin_lock(&np->lock);
-                       np->irqmask &= ~NVREG_IRQ_RX_ALL;
-
-                       if (np->msi_flags & NV_MSI_X_ENABLED)
-                               writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
-                       else
-                               writel(np->irqmask, base + NvRegIrqMask);
-                       spin_unlock(&np->lock);
-               }
-#else
-               nv_rx_process_optimized(dev, dev->weight);
-               if (nv_alloc_rx_optimized(dev)) {
-                       spin_lock(&np->lock);
-                       if (!np->in_shutdown)
-                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-                       spin_unlock(&np->lock);
-               }
-#endif
-               if (i > max_interrupt_work) {
+               if (unlikely(i > max_interrupt_work)) {
                        spin_lock(&np->lock);
                        /* disable interrupts on the nic */
                        if (!(np->msi_flags & NV_MSI_X_ENABLED))
@@ -2994,20 +3065,19 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
        for (i=0; ; i++) {
                events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
                writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
-               pci_push(base);
                dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
                if (!(events & np->irqmask))
                        break;
 
                spin_lock_irqsave(&np->lock, flags);
-               nv_tx_done_optimized(dev);
+               nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
                spin_unlock_irqrestore(&np->lock, flags);
 
-               if (events & (NVREG_IRQ_TX_ERR)) {
+               if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
                        dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
                                                dev->name, events);
                }
-               if (i > max_interrupt_work) {
+               if (unlikely(i > max_interrupt_work)) {
                        spin_lock_irqsave(&np->lock, flags);
                        /* disable interrupts on the nic */
                        writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
@@ -3035,13 +3105,17 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
        unsigned long flags;
+       int retcode;
 
-       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
                pkts = nv_rx_process(dev, limit);
-       else
+               retcode = nv_alloc_rx(dev);
+       } else {
                pkts = nv_rx_process_optimized(dev, limit);
+               retcode = nv_alloc_rx_optimized(dev);
+       }
 
-       if (nv_alloc_rx(dev)) {
+       if (retcode) {
                spin_lock_irqsave(&np->lock, flags);
                if (!np->in_shutdown)
                        mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -3105,20 +3179,20 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
        for (i=0; ; i++) {
                events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
                writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
-               pci_push(base);
                dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
                if (!(events & np->irqmask))
                        break;
 
-               nv_rx_process_optimized(dev, dev->weight);
-               if (nv_alloc_rx_optimized(dev)) {
-                       spin_lock_irqsave(&np->lock, flags);
-                       if (!np->in_shutdown)
-                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-                       spin_unlock_irqrestore(&np->lock, flags);
+               if (nv_rx_process_optimized(dev, dev->weight)) {
+                       if (unlikely(nv_alloc_rx_optimized(dev))) {
+                               spin_lock_irqsave(&np->lock, flags);
+                               if (!np->in_shutdown)
+                                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+                               spin_unlock_irqrestore(&np->lock, flags);
+                       }
                }
 
-               if (i > max_interrupt_work) {
+               if (unlikely(i > max_interrupt_work)) {
                        spin_lock_irqsave(&np->lock, flags);
                        /* disable interrupts on the nic */
                        writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
@@ -3153,11 +3227,15 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
        for (i=0; ; i++) {
                events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
                writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
-               pci_push(base);
                dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
                if (!(events & np->irqmask))
                        break;
 
+               /* check tx in case we reached max loop limit in tx isr */
+               spin_lock_irqsave(&np->lock, flags);
+               nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
+               spin_unlock_irqrestore(&np->lock, flags);
+
                if (events & NVREG_IRQ_LINK) {
                        spin_lock_irqsave(&np->lock, flags);
                        nv_link_irq(dev);
@@ -3187,7 +3265,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
                        printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
                                                dev->name, events);
                }
-               if (i > max_interrupt_work) {
+               if (unlikely(i > max_interrupt_work)) {
                        spin_lock_irqsave(&np->lock, flags);
                        /* disable interrupts on the nic */
                        writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
@@ -3459,7 +3537,10 @@ static void nv_do_nic_poll(unsigned long data)
        pci_push(base);
 
        if (!using_multi_irqs(dev)) {
-               nv_nic_irq(0, dev);
+               if (np->desc_ver == DESC_VER_3)
+                       nv_nic_irq_optimized(0, dev);
+               else
+                       nv_nic_irq(0, dev);
                if (np->msi_flags & NV_MSI_X_ENABLED)
                        enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
                else
@@ -3491,47 +3572,8 @@ static void nv_do_stats_poll(unsigned long data)
 {
        struct net_device *dev = (struct net_device *) data;
        struct fe_priv *np = netdev_priv(dev);
-       u8 __iomem *base = get_hwbase(dev);
 
-       np->estats.tx_bytes += readl(base + NvRegTxCnt);
-       np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
-       np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
-       np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
-       np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
-       np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
-       np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
-       np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
-       np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
-       np->estats.tx_deferral += readl(base + NvRegTxDef);
-       np->estats.tx_packets += readl(base + NvRegTxFrame);
-       np->estats.tx_pause += readl(base + NvRegTxPause);
-       np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
-       np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
-       np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
-       np->estats.rx_runt += readl(base + NvRegRxRunt);
-       np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
-       np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
-       np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
-       np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
-       np->estats.rx_length_error += readl(base + NvRegRxLenErr);
-       np->estats.rx_unicast += readl(base + NvRegRxUnicast);
-       np->estats.rx_multicast += readl(base + NvRegRxMulticast);
-       np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
-       np->estats.rx_bytes += readl(base + NvRegRxCnt);
-       np->estats.rx_pause += readl(base + NvRegRxPause);
-       np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
-       np->estats.rx_packets =
-               np->estats.rx_unicast +
-               np->estats.rx_multicast +
-               np->estats.rx_broadcast;
-       np->estats.rx_errors_total =
-               np->estats.rx_crc_errors +
-               np->estats.rx_over_errors +
-               np->estats.rx_frame_error +
-               (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
-               np->estats.rx_late_collision +
-               np->estats.rx_runt +
-               np->estats.rx_frame_too_long;
+       nv_get_hw_stats(dev);
 
        if (!np->in_shutdown)
                mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
@@ -4150,8 +4192,10 @@ static int nv_get_stats_count(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
 
-       if (np->driver_data & DEV_HAS_STATISTICS)
-               return sizeof(struct nv_ethtool_stats)/sizeof(u64);
+       if (np->driver_data & DEV_HAS_STATISTICS_V1)
+               return NV_DEV_STATISTICS_V1_COUNT;
+       else if (np->driver_data & DEV_HAS_STATISTICS_V2)
+               return NV_DEV_STATISTICS_V2_COUNT;
        else
                return 0;
 }
@@ -4738,7 +4782,7 @@ static int nv_open(struct net_device *dev)
                mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
 
        /* start statistics timer */
-       if (np->driver_data & DEV_HAS_STATISTICS)
+       if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
                mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
 
        spin_unlock_irq(&np->lock);
@@ -4835,7 +4879,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        if (err < 0)
                goto out_disable;
 
-       if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
+       if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
+               np->register_size = NV_PCI_REGSZ_VER3;
+       else if (id->driver_data & DEV_HAS_STATISTICS_V1)
                np->register_size = NV_PCI_REGSZ_VER2;
        else
                np->register_size = NV_PCI_REGSZ_VER1;
@@ -4899,7 +4945,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
                dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
                dev->features |= NETIF_F_TSO;
-       }
+       }
 
        np->vlanctl_bits = 0;
        if (id->driver_data & DEV_HAS_VLAN) {
@@ -4969,7 +5015,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
 #ifdef CONFIG_NET_POLL_CONTROLLER
        dev->poll_controller = nv_poll_controller;
 #endif
-       dev->weight = 64;
+       dev->weight = RX_WORK_PER_LOOP;
 #ifdef CONFIG_FORCEDETH_NAPI
        dev->poll = nv_napi_poll;
 #endif
@@ -5284,83 +5330,83 @@ static struct pci_device_id pci_tbl[] = {
        },
        {       /* CK804 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
        },
        {       /* CK804 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
        },
        {       /* MCP04 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
        },
        {       /* MCP04 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
        },
        {       /* MCP51 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
        },
        {       /* MCP51 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
        },
        {       /* MCP55 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP55 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP61 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP61 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP61 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP61 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
        },
        {0,},
 };