wimax/i2400m: fix BUILD_BUG_ON() to use the maximum message size constant [v1]
[safe/jmp/linux-2.6] / drivers / net / forcedeth.c
index b60a304..e282d0a 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/delay.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/ethtool.h>
 #include <linux/timer.h>
@@ -58,6 +59,7 @@
 #include <linux/init.h>
 #include <linux/if_vlan.h>
 #include <linux/dma-mapping.h>
+#include <linux/slab.h>
 
 #include <asm/irq.h>
 #include <asm/io.h>
@@ -719,7 +721,8 @@ static const struct register_test nv_registers_test[] = {
 struct nv_skb_map {
        struct sk_buff *skb;
        dma_addr_t dma;
-       unsigned int dma_len;
+       unsigned int dma_len:31;
+       unsigned int dma_single:1;
        struct ring_desc_ex *first_tx_desc;
        struct nv_skb_map *next_tx_ctx;
 };
@@ -1101,20 +1104,16 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
 
 static void nv_napi_enable(struct net_device *dev)
 {
-#ifdef CONFIG_FORCEDETH_NAPI
        struct fe_priv *np = get_nvpriv(dev);
 
        napi_enable(&np->napi);
-#endif
 }
 
 static void nv_napi_disable(struct net_device *dev)
 {
-#ifdef CONFIG_FORCEDETH_NAPI
        struct fe_priv *np = get_nvpriv(dev);
 
        napi_disable(&np->napi);
-#endif
 }
 
 #define MII_READ       (-1)
@@ -1807,7 +1806,6 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
 }
 
 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
-#ifdef CONFIG_FORCEDETH_NAPI
 static void nv_do_rx_refill(unsigned long data)
 {
        struct net_device *dev = (struct net_device *) data;
@@ -1816,41 +1814,6 @@ static void nv_do_rx_refill(unsigned long data)
        /* Just reschedule NAPI rx processing */
        napi_schedule(&np->napi);
 }
-#else
-static void nv_do_rx_refill(unsigned long data)
-{
-       struct net_device *dev = (struct net_device *) data;
-       struct fe_priv *np = netdev_priv(dev);
-       int retcode;
-
-       if (!using_multi_irqs(dev)) {
-               if (np->msi_flags & NV_MSI_X_ENABLED)
-                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
-               else
-                       disable_irq(np->pci_dev->irq);
-       } else {
-               disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
-       }
-       if (!nv_optimized(np))
-               retcode = nv_alloc_rx(dev);
-       else
-               retcode = nv_alloc_rx_optimized(dev);
-       if (retcode) {
-               spin_lock_irq(&np->lock);
-               if (!np->in_shutdown)
-                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-               spin_unlock_irq(&np->lock);
-       }
-       if (!using_multi_irqs(dev)) {
-               if (np->msi_flags & NV_MSI_X_ENABLED)
-                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
-               else
-                       enable_irq(np->pci_dev->irq);
-       } else {
-               enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
-       }
-}
-#endif
 
 static void nv_init_rx(struct net_device *dev)
 {
@@ -1912,6 +1875,7 @@ static void nv_init_tx(struct net_device *dev)
                np->tx_skb[i].skb = NULL;
                np->tx_skb[i].dma = 0;
                np->tx_skb[i].dma_len = 0;
+               np->tx_skb[i].dma_single = 0;
                np->tx_skb[i].first_tx_desc = NULL;
                np->tx_skb[i].next_tx_ctx = NULL;
        }
@@ -1930,23 +1894,30 @@ static int nv_init_ring(struct net_device *dev)
                return nv_alloc_rx_optimized(dev);
 }
 
-static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
+static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
 {
-       struct fe_priv *np = netdev_priv(dev);
-
        if (tx_skb->dma) {
-               pci_unmap_page(np->pci_dev, tx_skb->dma,
-                              tx_skb->dma_len,
-                              PCI_DMA_TODEVICE);
+               if (tx_skb->dma_single)
+                       pci_unmap_single(np->pci_dev, tx_skb->dma,
+                                        tx_skb->dma_len,
+                                        PCI_DMA_TODEVICE);
+               else
+                       pci_unmap_page(np->pci_dev, tx_skb->dma,
+                                      tx_skb->dma_len,
+                                      PCI_DMA_TODEVICE);
                tx_skb->dma = 0;
        }
+}
+
+static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
+{
+       nv_unmap_txskb(np, tx_skb);
        if (tx_skb->skb) {
                dev_kfree_skb_any(tx_skb->skb);
                tx_skb->skb = NULL;
                return 1;
-       } else {
-               return 0;
        }
+       return 0;
 }
 
 static void nv_drain_tx(struct net_device *dev)
@@ -1964,10 +1935,11 @@ static void nv_drain_tx(struct net_device *dev)
                        np->tx_ring.ex[i].bufhigh = 0;
                        np->tx_ring.ex[i].buflow = 0;
                }
-               if (nv_release_txskb(dev, &np->tx_skb[i]))
+               if (nv_release_txskb(np, &np->tx_skb[i]))
                        dev->stats.tx_dropped++;
                np->tx_skb[i].dma = 0;
                np->tx_skb[i].dma_len = 0;
+               np->tx_skb[i].dma_single = 0;
                np->tx_skb[i].first_tx_desc = NULL;
                np->tx_skb[i].next_tx_ctx = NULL;
        }
@@ -2127,7 +2099,7 @@ static void nv_gear_backoff_reseed(struct net_device *dev)
  * nv_start_xmit: dev->hard_start_xmit function
  * Called with netif_tx_lock held.
  */
-static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
        u32 tx_flags = 0;
@@ -2136,7 +2108,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int i;
        u32 offset = 0;
        u32 bcnt;
-       u32 size = skb->len-skb->data_len;
+       u32 size = skb_headlen(skb);
        u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
        u32 empty_slots;
        struct ring_desc* put_tx;
@@ -2171,6 +2143,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
                                                PCI_DMA_TODEVICE);
                np->put_tx_ctx->dma_len = bcnt;
+               np->put_tx_ctx->dma_single = 1;
                put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
                put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
 
@@ -2196,6 +2169,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
                                                           PCI_DMA_TODEVICE);
                        np->put_tx_ctx->dma_len = bcnt;
+                       np->put_tx_ctx->dma_single = 0;
                        put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
                        put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
 
@@ -2245,7 +2219,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
+                                          struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
        u32 tx_flags = 0;
@@ -2254,7 +2229,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
        unsigned int i;
        u32 offset = 0;
        u32 bcnt;
-       u32 size = skb->len-skb->data_len;
+       u32 size = skb_headlen(skb);
        u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
        u32 empty_slots;
        struct ring_desc_ex* put_tx;
@@ -2291,6 +2266,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
                np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
                                                PCI_DMA_TODEVICE);
                np->put_tx_ctx->dma_len = bcnt;
+               np->put_tx_ctx->dma_single = 1;
                put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
                put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
                put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
@@ -2317,6 +2293,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
                        np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
                                                           PCI_DMA_TODEVICE);
                        np->put_tx_ctx->dma_len = bcnt;
+                       np->put_tx_ctx->dma_single = 0;
                        put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
                        put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
                        put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
@@ -2434,10 +2411,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
                dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
                                        dev->name, flags);
 
-               pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
-                              np->get_tx_ctx->dma_len,
-                              PCI_DMA_TODEVICE);
-               np->get_tx_ctx->dma = 0;
+               nv_unmap_txskb(np, np->get_tx_ctx);
 
                if (np->desc_ver == DESC_VER_1) {
                        if (flags & NV_TX_LASTPACKET) {
@@ -2502,10 +2476,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
                dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
                                        dev->name, flags);
 
-               pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
-                              np->get_tx_ctx->dma_len,
-                              PCI_DMA_TODEVICE);
-               np->get_tx_ctx->dma = 0;
+               nv_unmap_txskb(np, np->get_tx_ctx);
 
                if (flags & NV_TX2_LASTPACKET) {
                        if (!(flags & NV_TX2_ERROR))
@@ -2805,11 +2776,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
                skb->protocol = eth_type_trans(skb, dev);
                dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
                                        dev->name, len, skb->protocol);
-#ifdef CONFIG_FORCEDETH_NAPI
-               netif_receive_skb(skb);
-#else
-               netif_rx(skb);
-#endif
+               napi_gro_receive(&np->napi, skb);
                dev->stats.rx_packets++;
                dev->stats.rx_bytes += len;
 next_pkt:
@@ -2898,27 +2865,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
                                dev->name, len, skb->protocol);
 
                        if (likely(!np->vlangrp)) {
-#ifdef CONFIG_FORCEDETH_NAPI
-                               netif_receive_skb(skb);
-#else
-                               netif_rx(skb);
-#endif
+                               napi_gro_receive(&np->napi, skb);
                        } else {
                                vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
                                if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
-#ifdef CONFIG_FORCEDETH_NAPI
-                                       vlan_hwaccel_receive_skb(skb, np->vlangrp,
-                                                                vlanflags & NV_RX3_VLAN_TAG_MASK);
-#else
-                                       vlan_hwaccel_rx(skb, np->vlangrp,
-                                                       vlanflags & NV_RX3_VLAN_TAG_MASK);
-#endif
+                                       vlan_gro_receive(&np->napi, np->vlangrp,
+                                                        vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
                                } else {
-#ifdef CONFIG_FORCEDETH_NAPI
-                                       netif_receive_skb(skb);
-#else
-                                       netif_rx(skb);
-#endif
+                                       napi_gro_receive(&np->napi, skb);
                                }
                        }
 
@@ -3085,7 +3039,7 @@ static void nv_set_multicast(struct net_device *dev)
        } else {
                pff |= NVREG_PFF_MYADDR;
 
-               if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
+               if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
                        u32 alwaysOff[2];
                        u32 alwaysOn[2];
 
@@ -3093,18 +3047,18 @@ static void nv_set_multicast(struct net_device *dev)
                        if (dev->flags & IFF_ALLMULTI) {
                                alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
                        } else {
-                               struct dev_mc_list *walk;
+                               struct netdev_hw_addr *ha;
 
-                               walk = dev->mc_list;
-                               while (walk != NULL) {
+                               netdev_for_each_mc_addr(ha, dev) {
+                                       unsigned char *addr = ha->addr;
                                        u32 a, b;
-                                       a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
-                                       b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
+
+                                       a = le32_to_cpu(*(__le32 *) addr);
+                                       b = le16_to_cpu(*(__le16 *) (&addr[4]));
                                        alwaysOn[0] &= a;
                                        alwaysOff[0] &= ~a;
                                        alwaysOn[1] &= b;
                                        alwaysOff[1] &= ~b;
-                                       walk = walk->next;
                                }
                        }
                        addr[0] = alwaysOn[0];
@@ -3485,10 +3439,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
        struct net_device *dev = (struct net_device *) data;
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
-#ifndef CONFIG_FORCEDETH_NAPI
-       int total_work = 0;
-       int loop_count = 0;
-#endif
 
        dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
 
@@ -3505,72 +3455,14 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
 
        nv_msi_workaround(np);
 
-#ifdef CONFIG_FORCEDETH_NAPI
-       napi_schedule(&np->napi);
-
-       /* Disable furthur irq's
-          (msix not enabled with napi) */
-       writel(0, base + NvRegIrqMask);
-
-#else
-       do
-       {
-               int work = 0;
-               if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
-                       if (unlikely(nv_alloc_rx(dev))) {
-                               spin_lock(&np->lock);
-                               if (!np->in_shutdown)
-                                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-                               spin_unlock(&np->lock);
-                       }
-               }
-
-               spin_lock(&np->lock);
-               work += nv_tx_done(dev, TX_WORK_PER_LOOP);
-               spin_unlock(&np->lock);
-
-               if (!work)
-                       break;
-
-               total_work += work;
-
-               loop_count++;
-       }
-       while (loop_count < max_interrupt_work);
-
-       if (nv_change_interrupt_mode(dev, total_work)) {
-               /* setup new irq mask */
-               writel(np->irqmask, base + NvRegIrqMask);
-       }
-
-       if (unlikely(np->events & NVREG_IRQ_LINK)) {
-               spin_lock(&np->lock);
-               nv_link_irq(dev);
-               spin_unlock(&np->lock);
-       }
-       if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
-               spin_lock(&np->lock);
-               nv_linkchange(dev);
-               spin_unlock(&np->lock);
-               np->link_timeout = jiffies + LINK_TIMEOUT;
+       if (napi_schedule_prep(&np->napi)) {
+               /*
+                * Disable further irq's (msix not enabled with napi)
+                */
+               writel(0, base + NvRegIrqMask);
+               __napi_schedule(&np->napi);
        }
-       if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
-               spin_lock(&np->lock);
-               /* disable interrupts on the nic */
-               if (!(np->msi_flags & NV_MSI_X_ENABLED))
-                       writel(0, base + NvRegIrqMask);
-               else
-                       writel(np->irqmask, base + NvRegIrqMask);
-               pci_push(base);
 
-               if (!np->in_shutdown) {
-                       np->nic_poll_irq = np->irqmask;
-                       np->recover_error = 1;
-                       mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
-               }
-               spin_unlock(&np->lock);
-       }
-#endif
        dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
 
        return IRQ_HANDLED;
@@ -3586,10 +3478,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
        struct net_device *dev = (struct net_device *) data;
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
-#ifndef CONFIG_FORCEDETH_NAPI
-       int total_work = 0;
-       int loop_count = 0;
-#endif
 
        dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
 
@@ -3606,73 +3494,13 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
 
        nv_msi_workaround(np);
 
-#ifdef CONFIG_FORCEDETH_NAPI
-       napi_schedule(&np->napi);
-
-       /* Disable furthur irq's
-          (msix not enabled with napi) */
-       writel(0, base + NvRegIrqMask);
-
-#else
-       do
-       {
-               int work = 0;
-               if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
-                       if (unlikely(nv_alloc_rx_optimized(dev))) {
-                               spin_lock(&np->lock);
-                               if (!np->in_shutdown)
-                                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-                               spin_unlock(&np->lock);
-                       }
-               }
-
-               spin_lock(&np->lock);
-               work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
-               spin_unlock(&np->lock);
-
-               if (!work)
-                       break;
-
-               total_work += work;
-
-               loop_count++;
-       }
-       while (loop_count < max_interrupt_work);
-
-       if (nv_change_interrupt_mode(dev, total_work)) {
-               /* setup new irq mask */
-               writel(np->irqmask, base + NvRegIrqMask);
-       }
-
-       if (unlikely(np->events & NVREG_IRQ_LINK)) {
-               spin_lock(&np->lock);
-               nv_link_irq(dev);
-               spin_unlock(&np->lock);
-       }
-       if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
-               spin_lock(&np->lock);
-               nv_linkchange(dev);
-               spin_unlock(&np->lock);
-               np->link_timeout = jiffies + LINK_TIMEOUT;
-       }
-       if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
-               spin_lock(&np->lock);
-               /* disable interrupts on the nic */
-               if (!(np->msi_flags & NV_MSI_X_ENABLED))
-                       writel(0, base + NvRegIrqMask);
-               else
-                       writel(np->irqmask, base + NvRegIrqMask);
-               pci_push(base);
-
-               if (!np->in_shutdown) {
-                       np->nic_poll_irq = np->irqmask;
-                       np->recover_error = 1;
-                       mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
-               }
-               spin_unlock(&np->lock);
+       if (napi_schedule_prep(&np->napi)) {
+               /*
+                * Disable further irq's (msix not enabled with napi)
+                */
+               writel(0, base + NvRegIrqMask);
+               __napi_schedule(&np->napi);
        }
-
-#endif
        dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
 
        return IRQ_HANDLED;
@@ -3721,7 +3549,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
        return IRQ_RETVAL(i);
 }
 
-#ifdef CONFIG_FORCEDETH_NAPI
 static int nv_napi_poll(struct napi_struct *napi, int budget)
 {
        struct fe_priv *np = container_of(napi, struct fe_priv, napi);
@@ -3729,23 +3556,27 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
        u8 __iomem *base = get_hwbase(dev);
        unsigned long flags;
        int retcode;
-       int tx_work, rx_work;
+       int rx_count, tx_work=0, rx_work=0;
 
-       if (!nv_optimized(np)) {
-               spin_lock_irqsave(&np->lock, flags);
-               tx_work = nv_tx_done(dev, np->tx_ring_size);
-               spin_unlock_irqrestore(&np->lock, flags);
+       do {
+               if (!nv_optimized(np)) {
+                       spin_lock_irqsave(&np->lock, flags);
+                       tx_work += nv_tx_done(dev, np->tx_ring_size);
+                       spin_unlock_irqrestore(&np->lock, flags);
 
-               rx_work = nv_rx_process(dev, budget);
-               retcode = nv_alloc_rx(dev);
-       } else {
-               spin_lock_irqsave(&np->lock, flags);
-               tx_work = nv_tx_done_optimized(dev, np->tx_ring_size);
-               spin_unlock_irqrestore(&np->lock, flags);
+                       rx_count = nv_rx_process(dev, budget - rx_work);
+                       retcode = nv_alloc_rx(dev);
+               } else {
+                       spin_lock_irqsave(&np->lock, flags);
+                       tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
+                       spin_unlock_irqrestore(&np->lock, flags);
 
-               rx_work = nv_rx_process_optimized(dev, budget);
-               retcode = nv_alloc_rx_optimized(dev);
-       }
+                       rx_count = nv_rx_process_optimized(dev,
+                           budget - rx_work);
+                       retcode = nv_alloc_rx_optimized(dev);
+               }
+       } while (retcode == 0 &&
+                rx_count > 0 && (rx_work += rx_count) < budget);
 
        if (retcode) {
                spin_lock_irqsave(&np->lock, flags);
@@ -3788,7 +3619,6 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
        }
        return rx_work;
 }
-#endif
 
 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
 {
@@ -3991,7 +3821,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                                /* Request irq for rx handling */
                                sprintf(np->name_rx, "%s-rx", dev->name);
                                if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
-                                               &nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
+                                               nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
                                        printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
                                        pci_disable_msix(np->pci_dev);
                                        np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -4000,7 +3830,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                                /* Request irq for tx handling */
                                sprintf(np->name_tx, "%s-tx", dev->name);
                                if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
-                                               &nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
+                                               nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
                                        printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
                                        pci_disable_msix(np->pci_dev);
                                        np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -4009,7 +3839,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                                /* Request irq for link and timer handling */
                                sprintf(np->name_other, "%s-other", dev->name);
                                if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
-                                               &nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
+                                               nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
                                        printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
                                        pci_disable_msix(np->pci_dev);
                                        np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -5091,7 +4921,7 @@ static int nv_loopback_test(struct net_device *dev)
                dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
        }
 
-       pci_unmap_page(np->pci_dev, test_dma_addr,
+       pci_unmap_single(np->pci_dev, test_dma_addr,
                       (skb_end_pointer(tx_skb) - tx_skb->data),
                       PCI_DMA_TODEVICE);
        dev_kfree_skb_any(tx_skb);
@@ -5694,6 +5524,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
                dev->features |= NETIF_F_TSO;
+               dev->features |= NETIF_F_GRO;
        }
 
        np->vlanctl_bits = 0;
@@ -5746,9 +5577,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        else
                dev->netdev_ops = &nv_netdev_ops_optimized;
 
-#ifdef CONFIG_FORCEDETH_NAPI
        netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
-#endif
        SET_ETHTOOL_OPS(dev, &ops);
        dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
 
@@ -5808,10 +5637,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                        dev->dev_addr);
                dev_printk(KERN_ERR, &pci_dev->dev,
                        "Please complain to your hardware vendor. Switching to a random MAC.\n");
-               dev->dev_addr[0] = 0x00;
-               dev->dev_addr[1] = 0x00;
-               dev->dev_addr[2] = 0x6c;
-               get_random_bytes(&dev->dev_addr[3], 3);
+               random_ether_addr(dev->dev_addr);
        }
 
        dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
@@ -5854,7 +5680,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                /* msix has had reported issues when modifying irqmask
                   as in the case of napi, therefore, disable for now
                */
-#ifndef CONFIG_FORCEDETH_NAPI
+#if 0
                np->msi_flags |= NV_MSI_X_CAPABLE;
 #endif
        }
@@ -5890,7 +5716,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        /* Limit the number of tx's outstanding for hw bug */
        if (id->driver_data & DEV_NEED_TX_LIMIT) {
                np->tx_limit = 1;
-               if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
+               if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
                    pci_dev->revision >= 0xA2)
                        np->tx_limit = 0;
        }
@@ -6188,7 +6014,7 @@ static void nv_shutdown(struct pci_dev *pdev)
 #define nv_resume NULL
 #endif /* CONFIG_PM */
 
-static struct pci_device_id pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
        {       /* nForce Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x01C3),
                .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,