gianfar: Introduce logical group support.
[safe/jmp/linux-2.6] / drivers / net / gianfar.c
index 0ab4b26..fa0188e 100644 (file)
@@ -8,9 +8,10 @@
  *
  * Author: Andy Fleming
  * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
- * Copyright (c) 2007 MontaVista Software, Inc.
+ * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2007 MontaVista Software, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
  *
  *  Theory of operation
  *
- *  The driver is initialized through platform_device.  Structures which
- *  define the configuration needed by the board are defined in a
- *  board structure in arch/ppc/platforms (though I do not
- *  discount the possibility that other architectures could one
- *  day be supported.
+ *  The driver is initialized through of_device. Configuration information
+ *  is therefore conveyed through an OF-style device tree.
  *
  *  The Gianfar Ethernet Controller uses a ring of buffer
  *  descriptors.  The beginning is indicated by a register
@@ -44,8 +42,7 @@
  *  happen immediately, but will wait until either a set number
  *  of frames or amount of time have passed).  In NAPI, the
  *  interrupt handler will signal there is work to be done, and
- *  exit.  Without NAPI, the packet(s) will be handled
- *  immediately.  Both methods will start at the last known empty
+ *  exit. This method will start at the last known empty
  *  descriptor, and process every subsequent descriptor until there
  *  are none left with data (NAPI will stop after a set number of
  *  packets to give time to other tasks, but will eventually
@@ -79,7 +76,8 @@
 #include <linux/if_vlan.h>
 #include <linux/spinlock.h>
 #include <linux/mm.h>
-#include <linux/platform_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/crc32.h>
 #include <linux/mii.h>
 #include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/of.h>
 
 #include "gianfar.h"
-#include "gianfar_mii.h"
+#include "fsl_pq_mdio.h"
 
 #define TX_TIMEOUT      (1*HZ)
-#define SKB_ALLOC_TIMEOUT 1000000
 #undef BRIEF_GFAR_ERRORS
 #undef VERBOSE_GFAR_ERRORS
 
-#ifdef CONFIG_GFAR_NAPI
-#define RECEIVE(x) netif_receive_skb(x)
-#else
-#define RECEIVE(x) netif_rx(x)
-#endif
-
 const char gfar_driver_name[] = "Gianfar Ethernet";
 const char gfar_driver_version[] = "1.3";
 
 static int gfar_enet_open(struct net_device *dev);
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void gfar_reset_task(struct work_struct *work);
 static void gfar_timeout(struct net_device *dev);
 static int gfar_close(struct net_device *dev);
-struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
+struct sk_buff *gfar_new_skb(struct net_device *dev);
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+               struct sk_buff *skb);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -124,61 +120,406 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 static void adjust_link(struct net_device *dev);
 static void init_registers(struct net_device *dev);
 static int init_phy(struct net_device *dev);
-static int gfar_probe(struct platform_device *pdev);
-static int gfar_remove(struct platform_device *pdev);
+static int gfar_probe(struct of_device *ofdev,
+               const struct of_device_id *match);
+static int gfar_remove(struct of_device *ofdev);
 static void free_skb_resources(struct gfar_private *priv);
 static void gfar_set_multi(struct net_device *dev);
 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 static void gfar_configure_serdes(struct net_device *dev);
-extern int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value);
-extern int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
-#ifdef CONFIG_GFAR_NAPI
 static int gfar_poll(struct napi_struct *napi, int budget);
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void gfar_netpoll(struct net_device *dev);
 #endif
-int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
+static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+                             int amount_pull);
 static void gfar_vlan_rx_register(struct net_device *netdev,
                                struct vlan_group *grp);
 void gfar_halt(struct net_device *dev);
+static void gfar_halt_nodisable(struct net_device *dev);
 void gfar_start(struct net_device *dev);
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
-
-extern const struct ethtool_ops gfar_ethtool_ops;
+static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 
 MODULE_AUTHOR("Freescale Semiconductor, Inc");
 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 MODULE_LICENSE("GPL");
 
+static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+                           dma_addr_t buf)
+{
+       u32 lstatus;
+
+       bdp->bufPtr = buf;
+
+       lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
+       if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
+               lstatus |= BD_LFLAG(RXBD_WRAP);
+
+       eieio();
+
+       bdp->lstatus = lstatus;
+}
+
+static int gfar_init_bds(struct net_device *ndev)
+{
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       struct txbd8 *txbdp;
+       struct rxbd8 *rxbdp;
+       int i;
+
+       tx_queue = priv->tx_queue;
+       rx_queue = priv->rx_queue;
+
+       /* Initialize some variables in our dev structure */
+       tx_queue->num_txbdfree = tx_queue->tx_ring_size;
+       tx_queue->dirty_tx = tx_queue->cur_tx = tx_queue->tx_bd_base;
+       rx_queue->cur_rx = rx_queue->rx_bd_base;
+       tx_queue->skb_curtx = tx_queue->skb_dirtytx = 0;
+       rx_queue->skb_currx = 0;
+
+       /* Initialize Transmit Descriptor Ring */
+       txbdp = tx_queue->tx_bd_base;
+       for (i = 0; i < tx_queue->tx_ring_size; i++) {
+               txbdp->lstatus = 0;
+               txbdp->bufPtr = 0;
+               txbdp++;
+       }
+
+       /* Set the last descriptor in the ring to indicate wrap */
+       txbdp--;
+       txbdp->status |= TXBD_WRAP;
+
+       rxbdp = rx_queue->rx_bd_base;
+       for (i = 0; i < rx_queue->rx_ring_size; i++) {
+               struct sk_buff *skb = rx_queue->rx_skbuff[i];
+
+               if (skb) {
+                       gfar_init_rxbdp(rx_queue, rxbdp, rxbdp->bufPtr);
+               } else {
+                       skb = gfar_new_skb(ndev);
+                       if (!skb) {
+                               pr_err("%s: Can't allocate RX buffers\n",
+                                      ndev->name);
+                               return -ENOMEM;
+                       }
+                       rx_queue->rx_skbuff[i] = skb;
+
+                       gfar_new_rxbdp(rx_queue, rxbdp, skb);
+               }
+
+               rxbdp++;
+       }
+
+       return 0;
+}
+
+static int gfar_alloc_skb_resources(struct net_device *ndev)
+{
+       void *vaddr;
+       int i;
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct device *dev = &priv->ofdev->dev;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+
+       tx_queue = priv->tx_queue;
+       rx_queue = priv->rx_queue;
+
+       /* Allocate memory for the buffer descriptors */
+       vaddr = dma_alloc_coherent(dev,
+                       sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size +
+                       sizeof(*rx_queue->rx_bd_base) * rx_queue->rx_ring_size,
+                       &tx_queue->tx_bd_dma_base, GFP_KERNEL);
+       if (!vaddr) {
+               if (netif_msg_ifup(priv))
+                       pr_err("%s: Could not allocate buffer descriptors!\n",
+                              ndev->name);
+               return -ENOMEM;
+       }
+
+       tx_queue->tx_bd_base = vaddr;
+       tx_queue->dev = ndev;
+
+       /* Start the rx descriptor ring where the tx ring leaves off */
+       vaddr = vaddr + sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size;
+       rx_queue->rx_bd_base = vaddr;
+       rx_queue->dev = ndev;
+
+       /* Setup the skbuff rings */
+       tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
+                                 tx_queue->tx_ring_size, GFP_KERNEL);
+       if (!tx_queue->tx_skbuff) {
+               if (netif_msg_ifup(priv))
+                       pr_err("%s: Could not allocate tx_skbuff\n",
+                              ndev->name);
+               goto cleanup;
+       }
+
+       for (i = 0; i < tx_queue->tx_ring_size; i++)
+               tx_queue->tx_skbuff[i] = NULL;
+
+       rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
+                                 rx_queue->rx_ring_size, GFP_KERNEL);
+       if (!rx_queue->rx_skbuff) {
+               if (netif_msg_ifup(priv))
+                       pr_err("%s: Could not allocate rx_skbuff\n",
+                              ndev->name);
+               goto cleanup;
+       }
+
+       for (i = 0; i < rx_queue->rx_ring_size; i++)
+               rx_queue->rx_skbuff[i] = NULL;
+
+       if (gfar_init_bds(ndev))
+               goto cleanup;
+
+       return 0;
+
+cleanup:
+       free_skb_resources(priv);
+       return -ENOMEM;
+}
+
+static void gfar_init_mac(struct net_device *ndev)
+{
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       struct gfar __iomem *regs = priv->gfargrp.regs;
+       u32 rctrl = 0;
+       u32 tctrl = 0;
+       u32 attrs = 0;
+
+       tx_queue = priv->tx_queue;
+       rx_queue = priv->rx_queue;
+
+       /* enet DMA only understands physical addresses */
+       gfar_write(&regs->tbase0, tx_queue->tx_bd_dma_base);
+       gfar_write(&regs->rbase0, tx_queue->tx_bd_dma_base +
+                                 sizeof(*tx_queue->tx_bd_base) *
+                                 tx_queue->tx_ring_size);
+
+       /* Configure the coalescing support */
+       gfar_write(&regs->txic, 0);
+       if (tx_queue->txcoalescing)
+               gfar_write(&regs->txic, tx_queue->txic);
+
+       gfar_write(&regs->rxic, 0);
+       if (rx_queue->rxcoalescing)
+               gfar_write(&regs->rxic, rx_queue->rxic);
+
+       if (priv->rx_csum_enable)
+               rctrl |= RCTRL_CHECKSUMMING;
+
+       if (priv->extended_hash) {
+               rctrl |= RCTRL_EXTHASH;
+
+               gfar_clear_exact_match(ndev);
+               rctrl |= RCTRL_EMEN;
+       }
+
+       if (priv->padding) {
+               rctrl &= ~RCTRL_PAL_MASK;
+               rctrl |= RCTRL_PADDING(priv->padding);
+       }
+
+       /* keep vlan related bits if it's enabled */
+       if (priv->vlgrp) {
+               rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+               tctrl |= TCTRL_VLINS;
+       }
+
+       /* Init rctrl based on our settings */
+       gfar_write(&regs->rctrl, rctrl);
+
+       if (ndev->features & NETIF_F_IP_CSUM)
+               tctrl |= TCTRL_INIT_CSUM;
+
+       gfar_write(&regs->tctrl, tctrl);
+
+       /* Set the extraction length and index */
+       attrs = ATTRELI_EL(priv->rx_stash_size) |
+               ATTRELI_EI(priv->rx_stash_index);
+
+       gfar_write(&regs->attreli, attrs);
+
+       /* Start with defaults, and add stashing or locking
+        * depending on the approprate variables */
+       attrs = ATTR_INIT_SETTINGS;
+
+       if (priv->bd_stash_en)
+               attrs |= ATTR_BDSTASH;
+
+       if (priv->rx_stash_size != 0)
+               attrs |= ATTR_BUFSTASH;
+
+       gfar_write(&regs->attr, attrs);
+
+       gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
+       gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
+       gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+}
+
+static const struct net_device_ops gfar_netdev_ops = {
+       .ndo_open = gfar_enet_open,
+       .ndo_start_xmit = gfar_start_xmit,
+       .ndo_stop = gfar_close,
+       .ndo_change_mtu = gfar_change_mtu,
+       .ndo_set_multicast_list = gfar_set_multi,
+       .ndo_tx_timeout = gfar_timeout,
+       .ndo_do_ioctl = gfar_ioctl,
+       .ndo_vlan_rx_register = gfar_vlan_rx_register,
+       .ndo_set_mac_address = eth_mac_addr,
+       .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = gfar_netpoll,
+#endif
+};
+
 /* Returns 1 if incoming frames use an FCB */
 static inline int gfar_uses_fcb(struct gfar_private *priv)
 {
-       return (priv->vlan_enable || priv->rx_csum_enable);
+       return priv->vlgrp || priv->rx_csum_enable;
+}
+
+static int gfar_of_init(struct net_device *dev)
+{
+       const char *model;
+       const char *ctype;
+       const void *mac_addr;
+       u64 addr, size;
+       int err = 0;
+       struct gfar_private *priv = netdev_priv(dev);
+       struct device_node *np = priv->node;
+       const u32 *stash;
+       const u32 *stash_len;
+       const u32 *stash_idx;
+
+       if (!np || !of_device_is_available(np))
+               return -ENODEV;
+
+       /* get a pointer to the register memory */
+       addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
+       priv->gfargrp.regs = ioremap(addr, size);
+
+       if (priv->gfargrp.regs == NULL)
+               return -ENOMEM;
+
+       priv->gfargrp.priv = priv; /* back pointer from group to priv */
+       priv->gfargrp.interruptTransmit = irq_of_parse_and_map(np, 0);
+
+       model = of_get_property(np, "model", NULL);
+
+       /* If we aren't the FEC we have multiple interrupts */
+       if (model && strcasecmp(model, "FEC")) {
+               priv->gfargrp.interruptReceive = irq_of_parse_and_map(np, 1);
+
+               priv->gfargrp.interruptError = irq_of_parse_and_map(np, 2);
+
+               if (priv->gfargrp.interruptTransmit < 0 ||
+                               priv->gfargrp.interruptReceive < 0 ||
+                               priv->gfargrp.interruptError < 0) {
+                       err = -EINVAL;
+                       goto err_out;
+               }
+       }
+
+       stash = of_get_property(np, "bd-stash", NULL);
+
+       if (stash) {
+               priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
+               priv->bd_stash_en = 1;
+       }
+
+       stash_len = of_get_property(np, "rx-stash-len", NULL);
+
+       if (stash_len)
+               priv->rx_stash_size = *stash_len;
+
+       stash_idx = of_get_property(np, "rx-stash-idx", NULL);
+
+       if (stash_idx)
+               priv->rx_stash_index = *stash_idx;
+
+       if (stash_len || stash_idx)
+               priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
+
+       mac_addr = of_get_mac_address(np);
+       if (mac_addr)
+               memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
+
+       if (model && !strcasecmp(model, "TSEC"))
+               priv->device_flags =
+                       FSL_GIANFAR_DEV_HAS_GIGABIT |
+                       FSL_GIANFAR_DEV_HAS_COALESCE |
+                       FSL_GIANFAR_DEV_HAS_RMON |
+                       FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+       if (model && !strcasecmp(model, "eTSEC"))
+               priv->device_flags =
+                       FSL_GIANFAR_DEV_HAS_GIGABIT |
+                       FSL_GIANFAR_DEV_HAS_COALESCE |
+                       FSL_GIANFAR_DEV_HAS_RMON |
+                       FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+                       FSL_GIANFAR_DEV_HAS_PADDING |
+                       FSL_GIANFAR_DEV_HAS_CSUM |
+                       FSL_GIANFAR_DEV_HAS_VLAN |
+                       FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+                       FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+
+       ctype = of_get_property(np, "phy-connection-type", NULL);
+
+       /* We only care about rgmii-id.  The rest are autodetected */
+       if (ctype && !strcmp(ctype, "rgmii-id"))
+               priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
+       else
+               priv->interface = PHY_INTERFACE_MODE_MII;
+
+       if (of_get_property(np, "fsl,magic-packet", NULL))
+               priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
+
+       priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+       /* Find the TBI PHY.  If it's not there, we don't support SGMII */
+       priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+
+       return 0;
+
+err_out:
+       iounmap(priv->gfargrp.regs);
+       return err;
+}
+
+/* Ioctl MII Interface */
+static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       if (!priv->phydev)
+               return -ENODEV;
+
+       return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
 }
 
 /* Set up the ethernet device structure, private data,
  * and anything else we need before we start */
-static int gfar_probe(struct platform_device *pdev)
+static int gfar_probe(struct of_device *ofdev,
+               const struct of_device_id *match)
 {
        u32 tempval;
        struct net_device *dev = NULL;
        struct gfar_private *priv = NULL;
-       struct gianfar_platform_data *einfo;
-       struct resource *r;
+       struct gfar __iomem *regs = NULL;
        int err = 0;
-       DECLARE_MAC_BUF(mac);
-
-       einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
-
-       if (NULL == einfo) {
-               printk(KERN_ERR "gfar %d: Missing additional data!\n",
-                      pdev->id);
-
-               return -ENODEV;
-       }
+       int len_devname;
 
        /* Create an ethernet device instance */
        dev = alloc_etherdev(sizeof (*priv));
@@ -187,144 +528,115 @@ static int gfar_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        priv = netdev_priv(dev);
-       priv->dev = dev;
-
-       /* Set the info in the priv to the current info */
-       priv->einfo = einfo;
-
-       /* fill out IRQ fields */
-       if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-               priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
-               priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
-               priv->interruptError = platform_get_irq_byname(pdev, "error");
-               if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
-                       goto regs_fail;
-       } else {
-               priv->interruptTransmit = platform_get_irq(pdev, 0);
-               if (priv->interruptTransmit < 0)
-                       goto regs_fail;
-       }
+       priv->ndev = dev;
+       priv->ofdev = ofdev;
+       priv->node = ofdev->node;
+       SET_NETDEV_DEV(dev, &ofdev->dev);
 
-       /* get a pointer to the register memory */
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       priv->regs = ioremap(r->start, sizeof (struct gfar));
+       err = gfar_of_init(dev);
 
-       if (NULL == priv->regs) {
-               err = -ENOMEM;
+       if (err)
                goto regs_fail;
-       }
 
-       spin_lock_init(&priv->txlock);
-       spin_lock_init(&priv->rxlock);
+       priv->tx_queue = (struct gfar_priv_tx_q *)kmalloc(
+                               sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
+       if (!priv->tx_queue)
+               goto regs_fail;
 
-       platform_set_drvdata(pdev, dev);
+       priv->rx_queue = (struct gfar_priv_rx_q *)kmalloc(
+                               sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
+       if (!priv->rx_queue)
+               goto rx_queue_fail;
 
-       /* Stop the DMA engine now, in case it was running before */
-       /* (The firmware could have used it, and left it running). */
-       /* To do this, we write Graceful Receive Stop and Graceful */
-       /* Transmit Stop, and then wait until the corresponding bits */
-       /* in IEVENT indicate the stops have completed. */
-       tempval = gfar_read(&priv->regs->dmactrl);
-       tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
-       gfar_write(&priv->regs->dmactrl, tempval);
+       spin_lock_init(&priv->tx_queue->txlock);
+       spin_lock_init(&priv->rx_queue->rxlock);
+       spin_lock_init(&priv->gfargrp.grplock);
+       spin_lock_init(&priv->bflock);
+       INIT_WORK(&priv->reset_task, gfar_reset_task);
 
-       tempval = gfar_read(&priv->regs->dmactrl);
-       tempval |= (DMACTRL_GRS | DMACTRL_GTS);
-       gfar_write(&priv->regs->dmactrl, tempval);
+       dev_set_drvdata(&ofdev->dev, priv);
+       regs = priv->gfargrp.regs;
 
-       while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
-               cpu_relax();
+       /* Stop the DMA engine now, in case it was running before */
+       /* (The firmware could have used it, and left it running). */
+       gfar_halt(dev);
 
        /* Reset MAC layer */
-       gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
+       gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
+
+       /* We need to delay at least 3 TX clocks */
+       udelay(2);
 
        tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
-       gfar_write(&priv->regs->maccfg1, tempval);
+       gfar_write(&regs->maccfg1, tempval);
 
        /* Initialize MACCFG2. */
-       gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
+       gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
 
        /* Initialize ECNTRL */
-       gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
-
-       /* Copy the station address into the dev structure, */
-       memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
+       gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
 
        /* Set the dev->base_addr to the gfar reg region */
-       dev->base_addr = (unsigned long) (priv->regs);
+       dev->base_addr = (unsigned long) regs;
 
-       SET_NETDEV_DEV(dev, &pdev->dev);
+       SET_NETDEV_DEV(dev, &ofdev->dev);
 
        /* Fill in the dev structure */
-       dev->open = gfar_enet_open;
-       dev->hard_start_xmit = gfar_start_xmit;
-       dev->tx_timeout = gfar_timeout;
        dev->watchdog_timeo = TX_TIMEOUT;
-#ifdef CONFIG_GFAR_NAPI
-       netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
-#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       dev->poll_controller = gfar_netpoll;
-#endif
-       dev->stop = gfar_close;
-       dev->change_mtu = gfar_change_mtu;
        dev->mtu = 1500;
-       dev->set_multicast_list = gfar_set_multi;
-
+       dev->netdev_ops = &gfar_netdev_ops;
        dev->ethtool_ops = &gfar_ethtool_ops;
 
-       if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
+       /* Register for napi ...NAPI is for each rx_queue */
+       netif_napi_add(dev, &priv->rx_queue->napi, gfar_poll, GFAR_DEV_WEIGHT);
+
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
                priv->rx_csum_enable = 1;
-               dev->features |= NETIF_F_IP_CSUM;
+               dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
        } else
                priv->rx_csum_enable = 0;
 
        priv->vlgrp = NULL;
 
-       if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
-               dev->vlan_rx_register = gfar_vlan_rx_register;
-
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
                dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 
-               priv->vlan_enable = 1;
-       }
-
-       if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
                priv->extended_hash = 1;
                priv->hash_width = 9;
 
-               priv->hash_regs[0] = &priv->regs->igaddr0;
-               priv->hash_regs[1] = &priv->regs->igaddr1;
-               priv->hash_regs[2] = &priv->regs->igaddr2;
-               priv->hash_regs[3] = &priv->regs->igaddr3;
-               priv->hash_regs[4] = &priv->regs->igaddr4;
-               priv->hash_regs[5] = &priv->regs->igaddr5;
-               priv->hash_regs[6] = &priv->regs->igaddr6;
-               priv->hash_regs[7] = &priv->regs->igaddr7;
-               priv->hash_regs[8] = &priv->regs->gaddr0;
-               priv->hash_regs[9] = &priv->regs->gaddr1;
-               priv->hash_regs[10] = &priv->regs->gaddr2;
-               priv->hash_regs[11] = &priv->regs->gaddr3;
-               priv->hash_regs[12] = &priv->regs->gaddr4;
-               priv->hash_regs[13] = &priv->regs->gaddr5;
-               priv->hash_regs[14] = &priv->regs->gaddr6;
-               priv->hash_regs[15] = &priv->regs->gaddr7;
+               priv->hash_regs[0] = &regs->igaddr0;
+               priv->hash_regs[1] = &regs->igaddr1;
+               priv->hash_regs[2] = &regs->igaddr2;
+               priv->hash_regs[3] = &regs->igaddr3;
+               priv->hash_regs[4] = &regs->igaddr4;
+               priv->hash_regs[5] = &regs->igaddr5;
+               priv->hash_regs[6] = &regs->igaddr6;
+               priv->hash_regs[7] = &regs->igaddr7;
+               priv->hash_regs[8] = &regs->gaddr0;
+               priv->hash_regs[9] = &regs->gaddr1;
+               priv->hash_regs[10] = &regs->gaddr2;
+               priv->hash_regs[11] = &regs->gaddr3;
+               priv->hash_regs[12] = &regs->gaddr4;
+               priv->hash_regs[13] = &regs->gaddr5;
+               priv->hash_regs[14] = &regs->gaddr6;
+               priv->hash_regs[15] = &regs->gaddr7;
 
        } else {
                priv->extended_hash = 0;
                priv->hash_width = 8;
 
-               priv->hash_regs[0] = &priv->regs->gaddr0;
-                priv->hash_regs[1] = &priv->regs->gaddr1;
-               priv->hash_regs[2] = &priv->regs->gaddr2;
-               priv->hash_regs[3] = &priv->regs->gaddr3;
-               priv->hash_regs[4] = &priv->regs->gaddr4;
-               priv->hash_regs[5] = &priv->regs->gaddr5;
-               priv->hash_regs[6] = &priv->regs->gaddr6;
-               priv->hash_regs[7] = &priv->regs->gaddr7;
+               priv->hash_regs[0] = &regs->gaddr0;
+               priv->hash_regs[1] = &regs->gaddr1;
+               priv->hash_regs[2] = &regs->gaddr2;
+               priv->hash_regs[3] = &regs->gaddr3;
+               priv->hash_regs[4] = &regs->gaddr4;
+               priv->hash_regs[5] = &regs->gaddr5;
+               priv->hash_regs[6] = &regs->gaddr6;
+               priv->hash_regs[7] = &regs->gaddr7;
        }
 
-       if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
                priv->padding = DEFAULT_PADDING;
        else
                priv->padding = 0;
@@ -333,19 +645,23 @@ static int gfar_probe(struct platform_device *pdev)
                dev->hard_header_len += GMAC_FCB_LEN;
 
        priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-       priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
-       priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
 
-       priv->txcoalescing = DEFAULT_TX_COALESCE;
-       priv->txcount = DEFAULT_TXCOUNT;
-       priv->txtime = DEFAULT_TXTIME;
-       priv->rxcoalescing = DEFAULT_RX_COALESCE;
-       priv->rxcount = DEFAULT_RXCOUNT;
-       priv->rxtime = DEFAULT_RXTIME;
+       /* Initializing some of the rx/tx queue level parameters */
+       priv->tx_queue->tx_ring_size = DEFAULT_TX_RING_SIZE;
+       priv->tx_queue->num_txbdfree = DEFAULT_TX_RING_SIZE;
+       priv->tx_queue->txcoalescing = DEFAULT_TX_COALESCE;
+       priv->tx_queue->txic = DEFAULT_TXIC;
+
+       priv->rx_queue->rx_ring_size = DEFAULT_RX_RING_SIZE;
+       priv->rx_queue->rxcoalescing = DEFAULT_RX_COALESCE;
+       priv->rx_queue->rxic = DEFAULT_RXIC;
 
        /* Enable most messages by default */
        priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
 
+       /* Carrier starts down, phylib will bring it up */
+       netif_carrier_off(dev);
+
        err = register_netdev(dev);
 
        if (err) {
@@ -354,45 +670,229 @@ static int gfar_probe(struct platform_device *pdev)
                goto register_fail;
        }
 
+       device_init_wakeup(&dev->dev,
+               priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+       /* fill out IRQ number and name fields */
+       len_devname = strlen(dev->name);
+       strncpy(&priv->gfargrp.int_name_tx[0], dev->name, len_devname);
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+               strncpy(&priv->gfargrp.int_name_tx[len_devname],
+                       "_tx", sizeof("_tx") + 1);
+
+               strncpy(&priv->gfargrp.int_name_rx[0], dev->name, len_devname);
+               strncpy(&priv->gfargrp.int_name_rx[len_devname],
+                       "_rx", sizeof("_rx") + 1);
+
+               strncpy(&priv->gfargrp.int_name_er[0], dev->name, len_devname);
+               strncpy(&priv->gfargrp.int_name_er[len_devname],
+                       "_er", sizeof("_er") + 1);
+       } else
+               priv->gfargrp.int_name_tx[len_devname] = '\0';
+
        /* Create all the sysfs files */
        gfar_init_sysfs(dev);
 
        /* Print out the device info */
-       printk(KERN_INFO DEVICE_NAME "%s\n",
-              dev->name, print_mac(mac, dev->dev_addr));
+       printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
 
        /* Even more device info helps when determining which kernel */
        /* provided which set of benchmarks. */
-#ifdef CONFIG_GFAR_NAPI
        printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
-#else
-       printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
-#endif
        printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
-              dev->name, priv->rx_ring_size, priv->tx_ring_size);
+              dev->name, priv->rx_queue->rx_ring_size, priv->tx_queue->tx_ring_size);
 
        return 0;
 
 register_fail:
-       iounmap(priv->regs);
+       iounmap(priv->gfargrp.regs);
+       kfree(priv->rx_queue);
+rx_queue_fail:
+       kfree(priv->tx_queue);
 regs_fail:
+       if (priv->phy_node)
+               of_node_put(priv->phy_node);
+       if (priv->tbi_node)
+               of_node_put(priv->tbi_node);
        free_netdev(dev);
        return err;
 }
 
-static int gfar_remove(struct platform_device *pdev)
+static int gfar_remove(struct of_device *ofdev)
 {
-       struct net_device *dev = platform_get_drvdata(pdev);
-       struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
 
-       platform_set_drvdata(pdev, NULL);
+       if (priv->phy_node)
+               of_node_put(priv->phy_node);
+       if (priv->tbi_node)
+               of_node_put(priv->tbi_node);
 
-       iounmap(priv->regs);
-       free_netdev(dev);
+       dev_set_drvdata(&ofdev->dev, NULL);
+
+       unregister_netdev(priv->ndev);
+       iounmap(priv->gfargrp.regs);
+       free_netdev(priv->ndev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int gfar_suspend(struct device *dev)
+{
+       struct gfar_private *priv = dev_get_drvdata(dev);
+       struct net_device *ndev = priv->ndev;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       struct gfar __iomem *regs = NULL;
+       unsigned long flags;
+       u32 tempval;
+
+       int magic_packet = priv->wol_en &&
+               (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+       netif_device_detach(ndev);
+       tx_queue = priv->tx_queue;
+       rx_queue = priv->rx_queue;
+       regs = priv->gfargrp.regs;
+
+       if (netif_running(ndev)) {
+               spin_lock_irqsave(&tx_queue->txlock, flags);
+               spin_lock(&rx_queue->rxlock);
+
+               gfar_halt_nodisable(ndev);
+
+               /* Disable Tx, and Rx if wake-on-LAN is disabled. */
+               tempval = gfar_read(&regs->maccfg1);
+
+               tempval &= ~MACCFG1_TX_EN;
+
+               if (!magic_packet)
+                       tempval &= ~MACCFG1_RX_EN;
+
+               gfar_write(&regs->maccfg1, tempval);
+
+               spin_unlock(&rx_queue->rxlock);
+               spin_unlock_irqrestore(&tx_queue->txlock, flags);
+
+               napi_disable(&rx_queue->napi);
+
+               if (magic_packet) {
+                       /* Enable interrupt on Magic Packet */
+                       gfar_write(&regs->imask, IMASK_MAG);
+
+                       /* Enable Magic Packet mode */
+                       tempval = gfar_read(&regs->maccfg2);
+                       tempval |= MACCFG2_MPEN;
+                       gfar_write(&regs->maccfg2, tempval);
+               } else {
+                       phy_stop(priv->phydev);
+               }
+       }
 
        return 0;
 }
 
+static int gfar_resume(struct device *dev)
+{
+       struct gfar_private *priv = dev_get_drvdata(dev);
+       struct net_device *ndev = priv->ndev;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       struct gfar __iomem *regs = NULL;
+       unsigned long flags;
+       u32 tempval;
+       int magic_packet = priv->wol_en &&
+               (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+       if (!netif_running(ndev)) {
+               netif_device_attach(ndev);
+               return 0;
+       }
+
+       if (!magic_packet && priv->phydev)
+               phy_start(priv->phydev);
+
+       /* Disable Magic Packet mode, in case something
+        * else woke us up.
+        */
+       rx_queue = priv->rx_queue;
+       tx_queue = priv->tx_queue;
+       regs = priv->gfargrp.regs;
+
+       spin_lock_irqsave(&tx_queue->txlock, flags);
+       spin_lock(&rx_queue->rxlock);
+
+       tempval = gfar_read(&regs->maccfg2);
+       tempval &= ~MACCFG2_MPEN;
+       gfar_write(&regs->maccfg2, tempval);
+
+       gfar_start(ndev);
+
+       spin_unlock(&rx_queue->rxlock);
+       spin_unlock_irqrestore(&tx_queue->txlock, flags);
+
+       netif_device_attach(ndev);
+
+       napi_enable(&rx_queue->napi);
+
+       return 0;
+}
+
+static int gfar_restore(struct device *dev)
+{
+       struct gfar_private *priv = dev_get_drvdata(dev);
+       struct net_device *ndev = priv->ndev;
+
+       if (!netif_running(ndev))
+               return 0;
+
+       gfar_init_bds(ndev);
+       init_registers(ndev);
+       gfar_set_mac_address(ndev);
+       gfar_init_mac(ndev);
+       gfar_start(ndev);
+
+       priv->oldlink = 0;
+       priv->oldspeed = 0;
+       priv->oldduplex = -1;
+
+       if (priv->phydev)
+               phy_start(priv->phydev);
+
+       netif_device_attach(ndev);
+       napi_enable(&priv->napi);
+
+       return 0;
+}
+
+static struct dev_pm_ops gfar_pm_ops = {
+       .suspend = gfar_suspend,
+       .resume = gfar_resume,
+       .freeze = gfar_suspend,
+       .thaw = gfar_resume,
+       .restore = gfar_restore,
+};
+
+#define GFAR_PM_OPS (&gfar_pm_ops)
+
+static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
+{
+       return gfar_suspend(&ofdev->dev);
+}
+
+static int gfar_legacy_resume(struct of_device *ofdev)
+{
+       return gfar_resume(&ofdev->dev);
+}
+
+#else
+
+#define GFAR_PM_OPS NULL
+#define gfar_legacy_suspend NULL
+#define gfar_legacy_resume NULL
+
+#endif
 
 /* Reads the controller's registers to determine what interface
  * connects it to the PHY.
@@ -400,7 +900,11 @@ static int gfar_remove(struct platform_device *pdev)
 static phy_interface_t gfar_get_interface(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       u32 ecntrl = gfar_read(&priv->regs->ecntrl);
+       struct gfar __iomem *regs = NULL;
+       u32 ecntrl;
+
+       regs = priv->gfargrp.regs;
+       ecntrl = gfar_read(&regs->ecntrl);
 
        if (ecntrl & ECNTRL_SGMII_MODE)
                return PHY_INTERFACE_MODE_SGMII;
@@ -416,7 +920,7 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
                if (ecntrl & ECNTRL_REDUCED_MII_MODE)
                        return PHY_INTERFACE_MODE_RMII;
                else {
-                       phy_interface_t interface = priv->einfo->interface;
+                       phy_interface_t interface = priv->interface;
 
                        /*
                         * This isn't autodetected right now, so it must
@@ -429,7 +933,7 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
                }
        }
 
-       if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
                return PHY_INTERFACE_MODE_GMII;
 
        return PHY_INTERFACE_MODE_MII;
@@ -443,114 +947,135 @@ static int init_phy(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
        uint gigabit_support =
-               priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
+               priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
                SUPPORTED_1000baseT_Full : 0;
-       struct phy_device *phydev;
-       char phy_id[BUS_ID_SIZE];
        phy_interface_t interface;
 
        priv->oldlink = 0;
        priv->oldspeed = 0;
        priv->oldduplex = -1;
 
-       snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
-
        interface = gfar_get_interface(dev);
 
-       phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
+       priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+                                     interface);
+       if (!priv->phydev)
+               priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
+                                                        interface);
+       if (!priv->phydev) {
+               dev_err(&dev->dev, "could not attach to PHY\n");
+               return -ENODEV;
+       }
 
        if (interface == PHY_INTERFACE_MODE_SGMII)
                gfar_configure_serdes(dev);
 
-       if (IS_ERR(phydev)) {
-               printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
-               return PTR_ERR(phydev);
-       }
-
        /* Remove any features not supported by the controller */
-       phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
-       phydev->advertising = phydev->supported;
-
-       priv->phydev = phydev;
+       priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
+       priv->phydev->advertising = priv->phydev->supported;
 
        return 0;
 }
 
+/*
+ * Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip.  We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the TBIPA register.  We assume
+ * that the TBIPA register is valid.  Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
 static void gfar_configure_serdes(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar_mii __iomem *regs =
-                       (void __iomem *)&priv->regs->gfar_mii_regs;
+       struct phy_device *tbiphy;
+
+       if (!priv->tbi_node) {
+               dev_warn(&dev->dev, "error: SGMII mode requires that the "
+                                   "device tree specify a tbi-handle\n");
+               return;
+       }
 
-       /* Initialise TBI i/f to communicate with serdes (lynx phy) */
+       tbiphy = of_phy_find_device(priv->tbi_node);
+       if (!tbiphy) {
+               dev_err(&dev->dev, "error: Could not get TBI device\n");
+               return;
+       }
 
-       /* Single clk mode, mii mode off(for aerdes communication) */
-       gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT);
+       /*
+        * If the link is already up, we must already be ok, and don't need to
+        * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
+        * everything for us?  Resetting it takes the link down and requires
+        * several seconds for it to come back.
+        */
+       if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
+               return;
 
-       /* Supported pause and full-duplex, no half-duplex */
-       gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE,
+       /* Single clk mode, mii mode off(for serdes communication) */
+       phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
+
+       phy_write(tbiphy, MII_ADVERTISE,
                        ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
                        ADVERTISE_1000XPSE_ASYM);
 
-       /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */
-       gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE |
+       phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
                        BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
 }
 
 static void init_registers(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar __iomem *regs = NULL;
 
+       regs = priv->gfargrp.regs;
        /* Clear IEVENT */
-       gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
+       gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 
        /* Initialize IMASK */
-       gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
+       gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 
        /* Init hash registers to zero */
-       gfar_write(&priv->regs->igaddr0, 0);
-       gfar_write(&priv->regs->igaddr1, 0);
-       gfar_write(&priv->regs->igaddr2, 0);
-       gfar_write(&priv->regs->igaddr3, 0);
-       gfar_write(&priv->regs->igaddr4, 0);
-       gfar_write(&priv->regs->igaddr5, 0);
-       gfar_write(&priv->regs->igaddr6, 0);
-       gfar_write(&priv->regs->igaddr7, 0);
-
-       gfar_write(&priv->regs->gaddr0, 0);
-       gfar_write(&priv->regs->gaddr1, 0);
-       gfar_write(&priv->regs->gaddr2, 0);
-       gfar_write(&priv->regs->gaddr3, 0);
-       gfar_write(&priv->regs->gaddr4, 0);
-       gfar_write(&priv->regs->gaddr5, 0);
-       gfar_write(&priv->regs->gaddr6, 0);
-       gfar_write(&priv->regs->gaddr7, 0);
+       gfar_write(&regs->igaddr0, 0);
+       gfar_write(&regs->igaddr1, 0);
+       gfar_write(&regs->igaddr2, 0);
+       gfar_write(&regs->igaddr3, 0);
+       gfar_write(&regs->igaddr4, 0);
+       gfar_write(&regs->igaddr5, 0);
+       gfar_write(&regs->igaddr6, 0);
+       gfar_write(&regs->igaddr7, 0);
+
+       gfar_write(&regs->gaddr0, 0);
+       gfar_write(&regs->gaddr1, 0);
+       gfar_write(&regs->gaddr2, 0);
+       gfar_write(&regs->gaddr3, 0);
+       gfar_write(&regs->gaddr4, 0);
+       gfar_write(&regs->gaddr5, 0);
+       gfar_write(&regs->gaddr6, 0);
+       gfar_write(&regs->gaddr7, 0);
 
        /* Zero out the rmon mib registers if it has them */
-       if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
-               memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+               memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
 
                /* Mask off the CAM interrupts */
-               gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
-               gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
+               gfar_write(&regs->rmon.cam1, 0xffffffff);
+               gfar_write(&regs->rmon.cam2, 0xffffffff);
        }
 
        /* Initialize the max receive buffer length */
-       gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
+       gfar_write(&regs->mrblr, priv->rx_buffer_size);
 
        /* Initialize the Minimum Frame Length Register */
-       gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
-
-       /* Assign the TBI an address which won't conflict with the PHYs */
-       gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
+       gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
 }
 
 
 /* Halt the receive and transmit queues */
-void gfar_halt(struct net_device *dev)
+static void gfar_halt_nodisable(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
+       struct gfar __iomem *regs = priv->gfargrp.regs;
        u32 tempval;
 
        /* Mask all interrupts */
@@ -560,16 +1085,26 @@ void gfar_halt(struct net_device *dev)
        gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 
        /* Stop the DMA, and wait for it to stop */
-       tempval = gfar_read(&priv->regs->dmactrl);
+       tempval = gfar_read(&regs->dmactrl);
        if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
            != (DMACTRL_GRS | DMACTRL_GTS)) {
                tempval |= (DMACTRL_GRS | DMACTRL_GTS);
-               gfar_write(&priv->regs->dmactrl, tempval);
+               gfar_write(&regs->dmactrl, tempval);
 
-               while (!(gfar_read(&priv->regs->ievent) &
+               while (!(gfar_read(&regs->ievent) &
                         (IEVENT_GRSC | IEVENT_GTSC)))
                        cpu_relax();
        }
+}
+
+/* Halt the receive and transmit queues */
+void gfar_halt(struct net_device *dev)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+       struct gfar __iomem *regs = priv->gfargrp.regs;
+       u32 tempval;
+
+       gfar_halt_nodisable(dev);
 
        /* Disable Rx and Tx */
        tempval = gfar_read(&regs->maccfg1);
@@ -580,92 +1115,106 @@ void gfar_halt(struct net_device *dev)
 void stop_gfar(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
        unsigned long flags;
 
        phy_stop(priv->phydev);
 
+       tx_queue = priv->tx_queue;
+       rx_queue = priv->rx_queue;
+
        /* Lock it down */
-       spin_lock_irqsave(&priv->txlock, flags);
-       spin_lock(&priv->rxlock);
+       spin_lock_irqsave(&tx_queue->txlock, flags);
+       spin_lock(&rx_queue->rxlock);
 
        gfar_halt(dev);
 
-       spin_unlock(&priv->rxlock);
-       spin_unlock_irqrestore(&priv->txlock, flags);
+       spin_unlock(&rx_queue->rxlock);
+       spin_unlock_irqrestore(&tx_queue->txlock, flags);
 
        /* Free the IRQs */
-       if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-               free_irq(priv->interruptError, dev);
-               free_irq(priv->interruptTransmit, dev);
-               free_irq(priv->interruptReceive, dev);
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+               free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
+               free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
+               free_irq(priv->gfargrp.interruptReceive, &priv->gfargrp);
        } else {
-               free_irq(priv->interruptTransmit, dev);
+               free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
        }
 
        free_skb_resources(priv);
-
-       dma_free_coherent(&dev->dev,
-                       sizeof(struct txbd8)*priv->tx_ring_size
-                       + sizeof(struct rxbd8)*priv->rx_ring_size,
-                       priv->tx_bd_base,
-                       gfar_read(&regs->tbase0));
 }
 
 /* If there are any tx skbs or rx skbs still around, free them.
  * Then free tx_skbuff and rx_skbuff */
 static void free_skb_resources(struct gfar_private *priv)
 {
+       struct device *dev = &priv->ofdev->dev;
        struct rxbd8 *rxbdp;
        struct txbd8 *txbdp;
-       int i;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       int i, j;
 
        /* Go through all the buffer descriptors and free their data buffers */
-       txbdp = priv->tx_bd_base;
-
-       for (i = 0; i < priv->tx_ring_size; i++) {
-
-               if (priv->tx_skbuff[i]) {
-                       dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
-                                       txbdp->length,
-                                       DMA_TO_DEVICE);
-                       dev_kfree_skb_any(priv->tx_skbuff[i]);
-                       priv->tx_skbuff[i] = NULL;
+       tx_queue = priv->tx_queue;
+       txbdp = tx_queue->tx_bd_base;
+
+       if (!tx_queue->tx_skbuff)
+               goto skip_tx_skbuff;
+
+       for (i = 0; i < tx_queue->tx_ring_size; i++) {
+               if (!tx_queue->tx_skbuff[i])
+                       continue;
+
+               dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
+                               txbdp->length, DMA_TO_DEVICE);
+               txbdp->lstatus = 0;
+               for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) {
+                       txbdp++;
+                       dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
+                                       txbdp->length, DMA_TO_DEVICE);
                }
+               txbdp++;
+               dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
+               tx_queue->tx_skbuff[i] = NULL;
        }
 
-       kfree(priv->tx_skbuff);
-
-       rxbdp = priv->rx_bd_base;
-
-       /* rx_skbuff is not guaranteed to be allocated, so only
-        * free it and its contents if it is allocated */
-       if(priv->rx_skbuff != NULL) {
-               for (i = 0; i < priv->rx_ring_size; i++) {
-                       if (priv->rx_skbuff[i]) {
-                               dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
-                                               priv->rx_buffer_size,
-                                               DMA_FROM_DEVICE);
+       kfree(tx_queue->tx_skbuff);
+skip_tx_skbuff:
 
-                               dev_kfree_skb_any(priv->rx_skbuff[i]);
-                               priv->rx_skbuff[i] = NULL;
-                       }
+       rx_queue = priv->rx_queue;
+       rxbdp = rx_queue->rx_bd_base;
 
-                       rxbdp->status = 0;
-                       rxbdp->length = 0;
-                       rxbdp->bufPtr = 0;
+       if (!rx_queue->rx_skbuff)
+               goto skip_rx_skbuff;
 
-                       rxbdp++;
+       for (i = 0; i < rx_queue->rx_ring_size; i++) {
+               if (rx_queue->rx_skbuff[i]) {
+                       dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
+                                        priv->rx_buffer_size,
+                                       DMA_FROM_DEVICE);
+                       dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
+                       rx_queue->rx_skbuff[i] = NULL;
                }
 
-               kfree(priv->rx_skbuff);
+               rxbdp->lstatus = 0;
+               rxbdp->bufPtr = 0;
+               rxbdp++;
        }
+
+       kfree(rx_queue->rx_skbuff);
+skip_rx_skbuff:
+
+       dma_free_coherent(dev, sizeof(*txbdp) * tx_queue->tx_ring_size +
+                              sizeof(*rxbdp) * rx_queue->rx_ring_size,
+                         tx_queue->tx_bd_base, tx_queue->tx_bd_dma_base);
 }
 
 void gfar_start(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
+       struct gfar __iomem *regs = priv->gfargrp.regs;
        u32 tempval;
 
        /* Enable Rx and Tx in MACCFG1 */
@@ -674,14 +1223,14 @@ void gfar_start(struct net_device *dev)
        gfar_write(&regs->maccfg1, tempval);
 
        /* Initialize DMACTRL to have WWR and WOP */
-       tempval = gfar_read(&priv->regs->dmactrl);
+       tempval = gfar_read(&regs->dmactrl);
        tempval |= DMACTRL_INIT_SETTINGS;
-       gfar_write(&priv->regs->dmactrl, tempval);
+       gfar_write(&regs->dmactrl, tempval);
 
        /* Make sure we aren't stopped */
-       tempval = gfar_read(&priv->regs->dmactrl);
+       tempval = gfar_read(&regs->dmactrl);
        tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
-       gfar_write(&priv->regs->dmactrl, tempval);
+       gfar_write(&regs->dmactrl, tempval);
 
        /* Clear THLT/RHLT, so that the DMA starts polling now */
        gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
@@ -689,242 +1238,87 @@ void gfar_start(struct net_device *dev)
 
        /* Unmask the interrupts we look for */
        gfar_write(&regs->imask, IMASK_DEFAULT);
+
+       dev->trans_start = jiffies;
 }
 
 /* Bring the controller up and running */
-int startup_gfar(struct net_device *dev)
+int startup_gfar(struct net_device *ndev)
 {
-       struct txbd8 *txbdp;
-       struct rxbd8 *rxbdp;
-       dma_addr_t addr = 0;
-       unsigned long vaddr;
-       int i;
-       struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
-       int err = 0;
-       u32 rctrl = 0;
-       u32 attrs = 0;
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct gfar __iomem *regs = priv->gfargrp.regs;
+       int err;
 
        gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 
-       /* Allocate memory for the buffer descriptors */
-       vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
-                       sizeof (struct txbd8) * priv->tx_ring_size +
-                       sizeof (struct rxbd8) * priv->rx_ring_size,
-                       &addr, GFP_KERNEL);
-
-       if (vaddr == 0) {
-               if (netif_msg_ifup(priv))
-                       printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
-                                       dev->name);
-               return -ENOMEM;
-       }
-
-       priv->tx_bd_base = (struct txbd8 *) vaddr;
-
-       /* enet DMA only understands physical addresses */
-       gfar_write(&regs->tbase0, addr);
-
-       /* Start the rx descriptor ring where the tx ring leaves off */
-       addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
-       vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
-       priv->rx_bd_base = (struct rxbd8 *) vaddr;
-       gfar_write(&regs->rbase0, addr);
-
-       /* Setup the skbuff rings */
-       priv->tx_skbuff =
-           (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
-                                       priv->tx_ring_size, GFP_KERNEL);
-
-       if (NULL == priv->tx_skbuff) {
-               if (netif_msg_ifup(priv))
-                       printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
-                                       dev->name);
-               err = -ENOMEM;
-               goto tx_skb_fail;
-       }
-
-       for (i = 0; i < priv->tx_ring_size; i++)
-               priv->tx_skbuff[i] = NULL;
-
-       priv->rx_skbuff =
-           (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
-                                       priv->rx_ring_size, GFP_KERNEL);
-
-       if (NULL == priv->rx_skbuff) {
-               if (netif_msg_ifup(priv))
-                       printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
-                                       dev->name);
-               err = -ENOMEM;
-               goto rx_skb_fail;
-       }
-
-       for (i = 0; i < priv->rx_ring_size; i++)
-               priv->rx_skbuff[i] = NULL;
-
-       /* Initialize some variables in our dev structure */
-       priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
-       priv->cur_rx = priv->rx_bd_base;
-       priv->skb_curtx = priv->skb_dirtytx = 0;
-       priv->skb_currx = 0;
-
-       /* Initialize Transmit Descriptor Ring */
-       txbdp = priv->tx_bd_base;
-       for (i = 0; i < priv->tx_ring_size; i++) {
-               txbdp->status = 0;
-               txbdp->length = 0;
-               txbdp->bufPtr = 0;
-               txbdp++;
-       }
-
-       /* Set the last descriptor in the ring to indicate wrap */
-       txbdp--;
-       txbdp->status |= TXBD_WRAP;
-
-       rxbdp = priv->rx_bd_base;
-       for (i = 0; i < priv->rx_ring_size; i++) {
-               struct sk_buff *skb = NULL;
-
-               rxbdp->status = 0;
-
-               skb = gfar_new_skb(dev, rxbdp);
-
-               priv->rx_skbuff[i] = skb;
-
-               rxbdp++;
-       }
+       err = gfar_alloc_skb_resources(ndev);
+       if (err)
+               return err;
 
-       /* Set the last descriptor in the ring to wrap */
-       rxbdp--;
-       rxbdp->status |= RXBD_WRAP;
+       gfar_init_mac(ndev);
 
        /* If the device has multiple interrupts, register for
         * them.  Otherwise, only register for the one */
-       if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
                /* Install our interrupt handlers for Error,
                 * Transmit, and Receive */
-               if (request_irq(priv->interruptError, gfar_error,
-                               0, "enet_error", dev) < 0) {
+               err = request_irq(priv->gfargrp.interruptError, gfar_error, 0,
+                                 priv->gfargrp.int_name_er, &priv->gfargrp);
+               if (err) {
                        if (netif_msg_intr(priv))
-                               printk(KERN_ERR "%s: Can't get IRQ %d\n",
-                                       dev->name, priv->interruptError);
-
-                       err = -1;
+                               pr_err("%s: Can't get IRQ %d\n", ndev->name,
+                                      priv->gfargrp.interruptError);
                        goto err_irq_fail;
                }
 
-               if (request_irq(priv->interruptTransmit, gfar_transmit,
-                               0, "enet_tx", dev) < 0) {
+               err = request_irq(priv->gfargrp.interruptTransmit,
+                                       gfar_transmit, 0,
+                                       priv->gfargrp.int_name_tx,
+                                       &priv->gfargrp);
+               if (err) {
                        if (netif_msg_intr(priv))
-                               printk(KERN_ERR "%s: Can't get IRQ %d\n",
-                                       dev->name, priv->interruptTransmit);
-
-                       err = -1;
-
+                               pr_err("%s: Can't get IRQ %d\n", ndev->name,
+                                      priv->gfargrp.interruptTransmit);
                        goto tx_irq_fail;
                }
 
-               if (request_irq(priv->interruptReceive, gfar_receive,
-                               0, "enet_rx", dev) < 0) {
+               err = request_irq(priv->gfargrp.interruptReceive,
+                                       gfar_receive, 0,
+                                       priv->gfargrp.int_name_rx,
+                                       &priv->gfargrp);
+               if (err) {
                        if (netif_msg_intr(priv))
-                               printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
-                                               dev->name, priv->interruptReceive);
-
-                       err = -1;
+                               pr_err("%s: Can't get IRQ %d (receive0)\n",
+                                       ndev->name,
+                                       priv->gfargrp.interruptReceive);
                        goto rx_irq_fail;
                }
        } else {
-               if (request_irq(priv->interruptTransmit, gfar_interrupt,
-                               0, "gfar_interrupt", dev) < 0) {
+               err = request_irq(priv->gfargrp.interruptTransmit,
+                                       gfar_interrupt, 0,
+                                       priv->gfargrp.int_name_tx,
+                                       &priv->gfargrp);
+               if (err) {
                        if (netif_msg_intr(priv))
-                               printk(KERN_ERR "%s: Can't get IRQ %d\n",
-                                       dev->name, priv->interruptError);
-
-                       err = -1;
+                               pr_err("%s: Can't get IRQ %d\n", ndev->name,
+                                      priv->gfargrp.interruptTransmit);
                        goto err_irq_fail;
                }
        }
 
-       phy_start(priv->phydev);
-
-       /* Configure the coalescing support */
-       if (priv->txcoalescing)
-               gfar_write(&regs->txic,
-                          mk_ic_value(priv->txcount, priv->txtime));
-       else
-               gfar_write(&regs->txic, 0);
-
-       if (priv->rxcoalescing)
-               gfar_write(&regs->rxic,
-                          mk_ic_value(priv->rxcount, priv->rxtime));
-       else
-               gfar_write(&regs->rxic, 0);
-
-       if (priv->rx_csum_enable)
-               rctrl |= RCTRL_CHECKSUMMING;
-
-       if (priv->extended_hash) {
-               rctrl |= RCTRL_EXTHASH;
-
-               gfar_clear_exact_match(dev);
-               rctrl |= RCTRL_EMEN;
-       }
-
-       if (priv->vlan_enable)
-               rctrl |= RCTRL_VLAN;
-
-       if (priv->padding) {
-               rctrl &= ~RCTRL_PAL_MASK;
-               rctrl |= RCTRL_PADDING(priv->padding);
-       }
-
-       /* Init rctrl based on our settings */
-       gfar_write(&priv->regs->rctrl, rctrl);
-
-       if (dev->features & NETIF_F_IP_CSUM)
-               gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
-
-       /* Set the extraction length and index */
-       attrs = ATTRELI_EL(priv->rx_stash_size) |
-               ATTRELI_EI(priv->rx_stash_index);
-
-       gfar_write(&priv->regs->attreli, attrs);
-
-       /* Start with defaults, and add stashing or locking
-        * depending on the approprate variables */
-       attrs = ATTR_INIT_SETTINGS;
-
-       if (priv->bd_stash_en)
-               attrs |= ATTR_BDSTASH;
-
-       if (priv->rx_stash_size != 0)
-               attrs |= ATTR_BUFSTASH;
-
-       gfar_write(&priv->regs->attr, attrs);
-
-       gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
-       gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
-       gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
-
        /* Start the controller */
-       gfar_start(dev);
+       gfar_start(ndev);
+
+       phy_start(priv->phydev);
 
        return 0;
 
 rx_irq_fail:
-       free_irq(priv->interruptTransmit, dev);
+       free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
 tx_irq_fail:
-       free_irq(priv->interruptError, dev);
+       free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
 err_irq_fail:
-rx_skb_fail:
        free_skb_resources(priv);
-tx_skb_fail:
-       dma_free_coherent(&dev->dev,
-                       sizeof(struct txbd8)*priv->tx_ring_size
-                       + sizeof(struct rxbd8)*priv->rx_ring_size,
-                       priv->tx_bd_base,
-                       gfar_read(&regs->tbase0));
-
        return err;
 }
 
@@ -932,14 +1326,12 @@ tx_skb_fail:
 /* Returns 0 for success. */
 static int gfar_enet_open(struct net_device *dev)
 {
-#ifdef CONFIG_GFAR_NAPI
        struct gfar_private *priv = netdev_priv(dev);
-#endif
        int err;
 
-#ifdef CONFIG_GFAR_NAPI
-       napi_enable(&priv->napi);
-#endif
+       napi_enable(&priv->rx_queue->napi);
+
+       skb_queue_head_init(&priv->rx_recycle);
 
        /* Initialize a bunch of registers */
        init_registers(dev);
@@ -948,29 +1340,27 @@ static int gfar_enet_open(struct net_device *dev)
 
        err = init_phy(dev);
 
-       if(err) {
-#ifdef CONFIG_GFAR_NAPI
-               napi_disable(&priv->napi);
-#endif
+       if (err) {
+               napi_disable(&priv->rx_queue->napi);
                return err;
        }
 
        err = startup_gfar(dev);
        if (err) {
-#ifdef CONFIG_GFAR_NAPI
-               napi_disable(&priv->napi);
-#endif
+               napi_disable(&priv->rx_queue->napi);
                return err;
        }
 
        netif_start_queue(dev);
 
+       device_set_wakeup_enable(&dev->dev, priv->wol_en);
+
        return err;
 }
 
-static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
+static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
 {
-       struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
+       struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
 
        memset(fcb, 0, GMAC_FCB_LEN);
 
@@ -1011,103 +1401,167 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
        fcb->vlctl = vlan_tx_tag_get(skb);
 }
 
+static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
+                              struct txbd8 *base, int ring_size)
+{
+       struct txbd8 *new_bd = bdp + stride;
+
+       return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
+}
+
+static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
+               int ring_size)
+{
+       return skip_txbd(bdp, 1, base, ring_size);
+}
+
 /* This is called by the kernel when a frame is ready for transmission. */
 /* It is pointed to by the dev->hard_start_xmit function pointer */
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar __iomem *regs = NULL;
        struct txfcb *fcb = NULL;
-       struct txbd8 *txbdp;
-       u16 status;
+       struct txbd8 *txbdp, *txbdp_start, *base;
+       u32 lstatus;
+       int i;
+       u32 bufaddr;
        unsigned long flags;
+       unsigned int nr_frags, length;
+
+       tx_queue = priv->tx_queue;
+       base = tx_queue->tx_bd_base;
+       regs = priv->gfargrp.regs;
+
+       /* make space for additional header when fcb is needed */
+       if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
+                       (priv->vlgrp && vlan_tx_tag_present(skb))) &&
+                       (skb_headroom(skb) < GMAC_FCB_LEN)) {
+               struct sk_buff *skb_new;
+
+               skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
+               if (!skb_new) {
+                       dev->stats.tx_errors++;
+                       kfree_skb(skb);
+                       return NETDEV_TX_OK;
+               }
+               kfree_skb(skb);
+               skb = skb_new;
+       }
+
+       /* total number of fragments in the SKB */
+       nr_frags = skb_shinfo(skb)->nr_frags;
+
+       spin_lock_irqsave(&tx_queue->txlock, flags);
+
+       /* check if there is space to queue this packet */
+       if ((nr_frags+1) > tx_queue->num_txbdfree) {
+               /* no space, stop the queue */
+               netif_stop_queue(dev);
+               dev->stats.tx_fifo_errors++;
+               spin_unlock_irqrestore(&tx_queue->txlock, flags);
+               return NETDEV_TX_BUSY;
+       }
 
        /* Update transmit stats */
        dev->stats.tx_bytes += skb->len;
 
-       /* Lock priv now */
-       spin_lock_irqsave(&priv->txlock, flags);
+       txbdp = txbdp_start = tx_queue->cur_tx;
+
+       if (nr_frags == 0) {
+               lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+       } else {
+               /* Place the fragment addresses and lengths into the TxBDs */
+               for (i = 0; i < nr_frags; i++) {
+                       /* Point at the next BD, wrapping as needed */
+                       txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+                       length = skb_shinfo(skb)->frags[i].size;
+
+                       lstatus = txbdp->lstatus | length |
+                               BD_LFLAG(TXBD_READY);
+
+                       /* Handle the last BD specially */
+                       if (i == nr_frags - 1)
+                               lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+
+                       bufaddr = dma_map_page(&priv->ofdev->dev,
+                                       skb_shinfo(skb)->frags[i].page,
+                                       skb_shinfo(skb)->frags[i].page_offset,
+                                       length,
+                                       DMA_TO_DEVICE);
 
-       /* Point at the first free tx descriptor */
-       txbdp = priv->cur_tx;
+                       /* set the TxBD length and buffer pointer */
+                       txbdp->bufPtr = bufaddr;
+                       txbdp->lstatus = lstatus;
+               }
 
-       /* Clear all but the WRAP status flags */
-       status = txbdp->status & TXBD_WRAP;
+               lstatus = txbdp_start->lstatus;
+       }
 
        /* Set up checksumming */
-       if (likely((dev->features & NETIF_F_IP_CSUM)
-                       && (CHECKSUM_PARTIAL == skb->ip_summed))) {
-               fcb = gfar_add_fcb(skb, txbdp);
-               status |= TXBD_TOE;
+       if (CHECKSUM_PARTIAL == skb->ip_summed) {
+               fcb = gfar_add_fcb(skb);
+               lstatus |= BD_LFLAG(TXBD_TOE);
                gfar_tx_checksum(skb, fcb);
        }
 
-       if (priv->vlan_enable &&
-                       unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
+       if (priv->vlgrp && vlan_tx_tag_present(skb)) {
                if (unlikely(NULL == fcb)) {
-                       fcb = gfar_add_fcb(skb, txbdp);
-                       status |= TXBD_TOE;
+                       fcb = gfar_add_fcb(skb);
+                       lstatus |= BD_LFLAG(TXBD_TOE);
                }
 
                gfar_tx_vlan(skb, fcb);
        }
 
-       /* Set buffer length and pointer */
-       txbdp->length = skb->len;
-       txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
-                       skb->len, DMA_TO_DEVICE);
-
-       /* Save the skb pointer so we can free it later */
-       priv->tx_skbuff[priv->skb_curtx] = skb;
+       /* setup the TxBD length and buffer pointer for the first BD */
+       tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+       txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
+                       skb_headlen(skb), DMA_TO_DEVICE);
 
-       /* Update the current skb pointer (wrapping if this was the last) */
-       priv->skb_curtx =
-           (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+       lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
 
-       /* Flag the BD as interrupt-causing */
-       status |= TXBD_INTERRUPT;
-
-       /* Flag the BD as ready to go, last in frame, and  */
-       /* in need of CRC */
-       status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
-
-       dev->trans_start = jiffies;
-
-       /* The powerpc-specific eieio() is used, as wmb() has too strong
+       /*
+        * The powerpc-specific eieio() is used, as wmb() has too strong
         * semantics (it requires synchronization between cacheable and
         * uncacheable mappings, which eieio doesn't provide and which we
         * don't need), thus requiring a more expensive sync instruction.  At
         * some point, the set of architecture-independent barrier functions
         * should be expanded to include weaker barriers.
         */
-
        eieio();
-       txbdp->status = status;
 
-       /* If this was the last BD in the ring, the next one */
-       /* is at the beginning of the ring */
-       if (txbdp->status & TXBD_WRAP)
-               txbdp = priv->tx_bd_base;
-       else
-               txbdp++;
+       txbdp_start->lstatus = lstatus;
+
+       /* Update the current skb pointer to the next entry we will use
+        * (wrapping if necessary) */
+       tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
+               TX_RING_MOD_MASK(tx_queue->tx_ring_size);
+
+       tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+       /* reduce TxBD free count */
+       tx_queue->num_txbdfree -= (nr_frags + 1);
+
+       dev->trans_start = jiffies;
 
        /* If the next BD still needs to be cleaned up, then the bds
           are full.  We need to tell the kernel to stop sending us stuff. */
-       if (txbdp == priv->dirty_tx) {
+       if (!tx_queue->num_txbdfree) {
                netif_stop_queue(dev);
 
                dev->stats.tx_fifo_errors++;
        }
 
-       /* Update the current txbd to the next one */
-       priv->cur_tx = txbdp;
-
        /* Tell the DMA to go go go */
-       gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+       gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
 
        /* Unlock priv */
-       spin_unlock_irqrestore(&priv->txlock, flags);
+       spin_unlock_irqrestore(&tx_queue->txlock, flags);
 
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 /* Stops the kernel queue, and halts the controller */
@@ -1115,10 +1569,10 @@ static int gfar_close(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
 
-#ifdef CONFIG_GFAR_NAPI
-       napi_disable(&priv->napi);
-#endif
+       napi_disable(&priv->rx_queue->napi);
 
+       skb_queue_purge(&priv->rx_recycle);
+       cancel_work_sync(&priv->reset_task);
        stop_gfar(dev);
 
        /* Disconnect from the PHY */
@@ -1131,7 +1585,7 @@ static int gfar_close(struct net_device *dev)
 }
 
 /* Changes the mac address if the controller is not running. */
-int gfar_set_mac_address(struct net_device *dev)
+static int gfar_set_mac_address(struct net_device *dev)
 {
        gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
 
@@ -1144,54 +1598,61 @@ static void gfar_vlan_rx_register(struct net_device *dev,
                struct vlan_group *grp)
 {
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       struct gfar __iomem *regs = NULL;
        unsigned long flags;
        u32 tempval;
 
-       spin_lock_irqsave(&priv->rxlock, flags);
+       rx_queue = priv->rx_queue;
+       regs = priv->gfargrp.regs;
+       spin_lock_irqsave(&rx_queue->rxlock, flags);
 
        priv->vlgrp = grp;
 
        if (grp) {
                /* Enable VLAN tag insertion */
-               tempval = gfar_read(&priv->regs->tctrl);
+               tempval = gfar_read(&regs->tctrl);
                tempval |= TCTRL_VLINS;
 
-               gfar_write(&priv->regs->tctrl, tempval);
+               gfar_write(&regs->tctrl, tempval);
 
                /* Enable VLAN tag extraction */
-               tempval = gfar_read(&priv->regs->rctrl);
-               tempval |= RCTRL_VLEX;
-               gfar_write(&priv->regs->rctrl, tempval);
+               tempval = gfar_read(&regs->rctrl);
+               tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
+               gfar_write(&regs->rctrl, tempval);
        } else {
                /* Disable VLAN tag insertion */
-               tempval = gfar_read(&priv->regs->tctrl);
+               tempval = gfar_read(&regs->tctrl);
                tempval &= ~TCTRL_VLINS;
-               gfar_write(&priv->regs->tctrl, tempval);
+               gfar_write(&regs->tctrl, tempval);
 
                /* Disable VLAN tag extraction */
-               tempval = gfar_read(&priv->regs->rctrl);
+               tempval = gfar_read(&regs->rctrl);
                tempval &= ~RCTRL_VLEX;
-               gfar_write(&priv->regs->rctrl, tempval);
+               /* If parse is no longer required, then disable parser */
+               if (tempval & RCTRL_REQ_PARSER)
+                       tempval |= RCTRL_PRSDEP_INIT;
+               else
+                       tempval &= ~RCTRL_PRSDEP_INIT;
+               gfar_write(&regs->rctrl, tempval);
        }
 
-       spin_unlock_irqrestore(&priv->rxlock, flags);
+       gfar_change_mtu(dev, dev->mtu);
+
+       spin_unlock_irqrestore(&rx_queue->rxlock, flags);
 }
 
 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
 {
        int tempsize, tempval;
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar __iomem *regs = priv->gfargrp.regs;
        int oldsize = priv->rx_buffer_size;
        int frame_size = new_mtu + ETH_HLEN;
 
-       if (priv->vlan_enable)
+       if (priv->vlgrp)
                frame_size += VLAN_HLEN;
 
-       if (gfar_uses_fcb(priv))
-               frame_size += GMAC_FCB_LEN;
-
-       frame_size += priv->padding;
-
        if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
                if (netif_msg_drv(priv))
                        printk(KERN_ERR "%s: Invalid MTU setting\n",
@@ -1199,6 +1660,11 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
                return -EINVAL;
        }
 
+       if (gfar_uses_fcb(priv))
+               frame_size += GMAC_FCB_LEN;
+
+       frame_size += priv->padding;
+
        tempsize =
            (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
            INCREMENTAL_BUFFER_SIZE;
@@ -1212,20 +1678,20 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
 
        dev->mtu = new_mtu;
 
-       gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
-       gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
+       gfar_write(&regs->mrblr, priv->rx_buffer_size);
+       gfar_write(&regs->maxfrm, priv->rx_buffer_size);
 
        /* If the mtu is larger than the max size for standard
         * ethernet frames (ie, a jumbo frame), then set maccfg2
         * to allow huge frames, and to check the length */
-       tempval = gfar_read(&priv->regs->maccfg2);
+       tempval = gfar_read(&regs->maccfg2);
 
        if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
                tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
        else
                tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
 
-       gfar_write(&priv->regs->maccfg2, tempval);
+       gfar_write(&regs->maccfg2, tempval);
 
        if ((oldsize != tempsize) && (dev->flags & IFF_UP))
                startup_gfar(dev);
@@ -1233,95 +1699,176 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-/* gfar_timeout gets called when a packet has not been
+/* gfar_reset_task gets scheduled when a packet has not been
  * transmitted after a set amount of time.
  * For now, assume that clearing out all the structures, and
- * starting over will fix the problem. */
-static void gfar_timeout(struct net_device *dev)
+ * starting over will fix the problem.
+ */
+static void gfar_reset_task(struct work_struct *work)
 {
-       dev->stats.tx_errors++;
+       struct gfar_private *priv = container_of(work, struct gfar_private,
+                       reset_task);
+       struct net_device *dev = priv->ndev;
 
        if (dev->flags & IFF_UP) {
+               netif_stop_queue(dev);
                stop_gfar(dev);
                startup_gfar(dev);
+               netif_start_queue(dev);
        }
 
-       netif_schedule(dev);
+       netif_tx_schedule_all(dev);
+}
+
+static void gfar_timeout(struct net_device *dev)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+
+       dev->stats.tx_errors++;
+       schedule_work(&priv->reset_task);
 }
 
 /* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *dev_id)
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 {
-       struct net_device *dev = (struct net_device *) dev_id;
+       struct net_device *dev = tx_queue->dev;
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_priv_rx_q *rx_queue = NULL;
        struct txbd8 *bdp;
+       struct txbd8 *lbdp = NULL;
+       struct txbd8 *base = tx_queue->tx_bd_base;
+       struct sk_buff *skb;
+       int skb_dirtytx;
+       int tx_ring_size = tx_queue->tx_ring_size;
+       int frags = 0;
+       int i;
+       int howmany = 0;
+       u32 lstatus;
 
-       /* Clear IEVENT */
-       gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
-
-       /* Lock priv */
-       spin_lock(&priv->txlock);
-       bdp = priv->dirty_tx;
-       while ((bdp->status & TXBD_READY) == 0) {
-               /* If dirty_tx and cur_tx are the same, then either the */
-               /* ring is empty or full now (it could only be full in the beginning, */
-               /* obviously).  If it is empty, we are done. */
-               if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
+       rx_queue = priv->rx_queue;
+       bdp = tx_queue->dirty_tx;
+       skb_dirtytx = tx_queue->skb_dirtytx;
+
+       while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+               frags = skb_shinfo(skb)->nr_frags;
+               lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
+
+               lstatus = lbdp->lstatus;
+
+               /* Only clean completed frames */
+               if ((lstatus & BD_LFLAG(TXBD_READY)) &&
+                               (lstatus & BD_LENGTH_MASK))
                        break;
 
-               dev->stats.tx_packets++;
+               dma_unmap_single(&priv->ofdev->dev,
+                               bdp->bufPtr,
+                               bdp->length,
+                               DMA_TO_DEVICE);
 
-               /* Deferred means some collisions occurred during transmit, */
-               /* but we eventually sent the packet. */
-               if (bdp->status & TXBD_DEF)
-                       dev->stats.collisions++;
+               bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+               bdp = next_txbd(bdp, base, tx_ring_size);
 
-               /* Free the sk buffer associated with this TxBD */
-               dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
-               priv->tx_skbuff[priv->skb_dirtytx] = NULL;
-               priv->skb_dirtytx =
-                   (priv->skb_dirtytx +
-                    1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+               for (i = 0; i < frags; i++) {
+                       dma_unmap_page(&priv->ofdev->dev,
+                                       bdp->bufPtr,
+                                       bdp->length,
+                                       DMA_TO_DEVICE);
+                       bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+                       bdp = next_txbd(bdp, base, tx_ring_size);
+               }
 
-               /* update bdp to point at next bd in the ring (wrapping if necessary) */
-               if (bdp->status & TXBD_WRAP)
-                       bdp = priv->tx_bd_base;
+               /*
+                * If there's room in the queue (limit it to rx_buffer_size)
+                * we add this skb back into the pool, if it's the right size
+                */
+               if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
+                               skb_recycle_check(skb, priv->rx_buffer_size +
+                                       RXBUF_ALIGNMENT))
+                       __skb_queue_head(&priv->rx_recycle, skb);
                else
-                       bdp++;
+                       dev_kfree_skb_any(skb);
 
-               /* Move dirty_tx to be the next bd */
-               priv->dirty_tx = bdp;
+               tx_queue->tx_skbuff[skb_dirtytx] = NULL;
 
-               /* We freed a buffer, so now we can restart transmission */
-               if (netif_queue_stopped(dev))
-                       netif_wake_queue(dev);
-       } /* while ((bdp->status & TXBD_READY) == 0) */
+               skb_dirtytx = (skb_dirtytx + 1) &
+                       TX_RING_MOD_MASK(tx_ring_size);
 
-       /* If we are coalescing the interrupts, reset the timer */
-       /* Otherwise, clear it */
-       if (priv->txcoalescing)
-               gfar_write(&priv->regs->txic,
-                          mk_ic_value(priv->txcount, priv->txtime));
-       else
-               gfar_write(&priv->regs->txic, 0);
+               howmany++;
+               tx_queue->num_txbdfree += frags + 1;
+       }
+
+       /* If we freed a buffer, we can restart transmission, if necessary */
+       if (netif_queue_stopped(dev) && tx_queue->num_txbdfree)
+               netif_wake_queue(dev);
+
+       /* Update dirty indicators */
+       tx_queue->skb_dirtytx = skb_dirtytx;
+       tx_queue->dirty_tx = bdp;
 
-       spin_unlock(&priv->txlock);
+       dev->stats.tx_packets += howmany;
 
+       return howmany;
+}
+
+static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
+{
+       struct gfar_private *priv = gfargrp->priv;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       unsigned long flags;
+
+       rx_queue = priv->rx_queue;
+       tx_queue = priv->tx_queue;
+       spin_lock_irqsave(&tx_queue->txlock, flags);
+       spin_lock(&rx_queue->rxlock);
+
+       if (napi_schedule_prep(&rx_queue->napi)) {
+               gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
+               __napi_schedule(&rx_queue->napi);
+       } else {
+               /*
+                * Clear IEVENT, so interrupts aren't called again
+                * because of the packets that have already arrived.
+                */
+               gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
+       }
+
+       spin_unlock(&rx_queue->rxlock);
+       spin_unlock_irqrestore(&tx_queue->txlock, flags);
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
+{
+       gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
        return IRQ_HANDLED;
 }
 
-struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+               struct sk_buff *skb)
+{
+       struct net_device *dev = rx_queue->dev;
+       struct gfar_private *priv = netdev_priv(dev);
+       dma_addr_t buf;
+
+       buf = dma_map_single(&priv->ofdev->dev, skb->data,
+                            priv->rx_buffer_size, DMA_FROM_DEVICE);
+       gfar_init_rxbdp(rx_queue, bdp, buf);
+}
+
+
+struct sk_buff * gfar_new_skb(struct net_device *dev)
 {
        unsigned int alignamount;
        struct gfar_private *priv = netdev_priv(dev);
        struct sk_buff *skb = NULL;
-       unsigned int timeout = SKB_ALLOC_TIMEOUT;
 
-       /* We have to allocate the skb, so keep trying till we succeed */
-       while ((!skb) && timeout--)
-               skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
+       skb = __skb_dequeue(&priv->rx_recycle);
+       if (!skb)
+               skb = netdev_alloc_skb(dev,
+                               priv->rx_buffer_size + RXBUF_ALIGNMENT);
 
-       if (NULL == skb)
+       if (!skb)
                return NULL;
 
        alignamount = RXBUF_ALIGNMENT -
@@ -1332,15 +1879,6 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
         */
        skb_reserve(skb, alignamount);
 
-       bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
-                       priv->rx_buffer_size, DMA_FROM_DEVICE);
-
-       bdp->length = 0;
-
-       /* Mark the buffer empty */
-       eieio();
-       bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
-
        return skb;
 }
 
@@ -1382,63 +1920,12 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
        }
 }
 
-irqreturn_t gfar_receive(int irq, void *dev_id)
+irqreturn_t gfar_receive(int irq, void *grp_id)
 {
-       struct net_device *dev = (struct net_device *) dev_id;
-       struct gfar_private *priv = netdev_priv(dev);
-#ifdef CONFIG_GFAR_NAPI
-       u32 tempval;
-#else
-       unsigned long flags;
-#endif
-
-       /* Clear IEVENT, so rx interrupt isn't called again
-        * because of this interrupt */
-       gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
-
-       /* support NAPI */
-#ifdef CONFIG_GFAR_NAPI
-       if (netif_rx_schedule_prep(dev, &priv->napi)) {
-               tempval = gfar_read(&priv->regs->imask);
-               tempval &= IMASK_RX_DISABLED;
-               gfar_write(&priv->regs->imask, tempval);
-
-               __netif_rx_schedule(dev, &priv->napi);
-       } else {
-               if (netif_msg_rx_err(priv))
-                       printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
-                               dev->name, gfar_read(&priv->regs->ievent),
-                               gfar_read(&priv->regs->imask));
-       }
-#else
-
-       spin_lock_irqsave(&priv->rxlock, flags);
-       gfar_clean_rx_ring(dev, priv->rx_ring_size);
-
-       /* If we are coalescing interrupts, update the timer */
-       /* Otherwise, clear it */
-       if (priv->rxcoalescing)
-               gfar_write(&priv->regs->rxic,
-                          mk_ic_value(priv->rxcount, priv->rxtime));
-       else
-               gfar_write(&priv->regs->rxic, 0);
-
-       spin_unlock_irqrestore(&priv->rxlock, flags);
-#endif
-
+       gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
        return IRQ_HANDLED;
 }
 
-static inline int gfar_rx_vlan(struct sk_buff *skb,
-               struct vlan_group *vlgrp, unsigned short vlctl)
-{
-#ifdef CONFIG_GFAR_NAPI
-       return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
-#else
-       return vlan_hwaccel_rx(skb, vlgrp, vlctl);
-#endif
-}
-
 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
 {
        /* If valid headers were found, and valid sums
@@ -1451,58 +1938,38 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
 }
 
 
-static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
-{
-       struct rxfcb *fcb = (struct rxfcb *)skb->data;
-
-       /* Remove the FCB from the skb */
-       skb_pull(skb, GMAC_FCB_LEN);
-
-       return fcb;
-}
-
 /* gfar_process_frame() -- handle one incoming packet if skb
  * isn't NULL.  */
 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-               int length)
+                             int amount_pull)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct rxfcb *fcb = NULL;
 
-       if (NULL == skb) {
-               if (netif_msg_rx_err(priv))
-                       printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
-               dev->stats.rx_dropped++;
-               priv->extra_stats.rx_skbmissing++;
-       } else {
-               int ret;
-
-               /* Prep the skb for the packet */
-               skb_put(skb, length);
+       int ret;
 
-               /* Grab the FCB if there is one */
-               if (gfar_uses_fcb(priv))
-                       fcb = gfar_get_fcb(skb);
+       /* fcb is at the beginning if exists */
+       fcb = (struct rxfcb *)skb->data;
 
-               /* Remove the padded bytes, if there are any */
-               if (priv->padding)
-                       skb_pull(skb, priv->padding);
+       /* Remove the FCB from the skb */
+       /* Remove the padded bytes, if there are any */
+       if (amount_pull)
+               skb_pull(skb, amount_pull);
 
-               if (priv->rx_csum_enable)
-                       gfar_rx_checksum(skb, fcb);
+       if (priv->rx_csum_enable)
+               gfar_rx_checksum(skb, fcb);
 
-               /* Tell the skb what kind of packet this is */
-               skb->protocol = eth_type_trans(skb, dev);
+       /* Tell the skb what kind of packet this is */
+       skb->protocol = eth_type_trans(skb, dev);
 
-               /* Send the packet up the stack */
-               if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
-                       ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
-               else
-                       ret = RECEIVE(skb);
+       /* Send the packet up the stack */
+       if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
+               ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
+       else
+               ret = netif_receive_skb(skb);
 
-               if (NET_RX_DROP == ret)
-                       priv->extra_stats.kernel_dropped++;
-       }
+       if (NET_RX_DROP == ret)
+               priv->extra_stats.kernel_dropped++;
 
        return 0;
 }
@@ -1511,100 +1978,148 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  *   until the budget/quota has been reached. Returns the number
  *   of frames handled
  */
-int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 {
-       struct rxbd8 *bdp;
+       struct net_device *dev = rx_queue->dev;
+       struct rxbd8 *bdp, *base;
        struct sk_buff *skb;
-       u16 pkt_len;
+       int pkt_len;
+       int amount_pull;
        int howmany = 0;
        struct gfar_private *priv = netdev_priv(dev);
 
        /* Get the first full descriptor */
-       bdp = priv->cur_rx;
+       bdp = rx_queue->cur_rx;
+       base = rx_queue->rx_bd_base;
+
+       amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
+               priv->padding;
 
        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
+               struct sk_buff *newskb;
                rmb();
-               skb = priv->rx_skbuff[priv->skb_currx];
 
-               if (!(bdp->status &
-                     (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
-                      | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
+               /* Add another skb for the future */
+               newskb = gfar_new_skb(dev);
+
+               skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
+
+               dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+                               priv->rx_buffer_size, DMA_FROM_DEVICE);
+
+               /* We drop the frame if we failed to allocate a new buffer */
+               if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
+                                bdp->status & RXBD_ERR)) {
+                       count_errors(bdp->status, dev);
+
+                       if (unlikely(!newskb))
+                               newskb = skb;
+                       else if (skb) {
+                               /*
+                                * We need to reset ->data to what it
+                                * was before gfar_new_skb() re-aligned
+                                * it to an RXBUF_ALIGNMENT boundary
+                                * before we put the skb back on the
+                                * recycle list.
+                                */
+                               skb->data = skb->head + NET_SKB_PAD;
+                               __skb_queue_head(&priv->rx_recycle, skb);
+                       }
+               } else {
                        /* Increment the number of packets */
                        dev->stats.rx_packets++;
                        howmany++;
 
-                       /* Remove the FCS from the packet length */
-                       pkt_len = bdp->length - 4;
-
-                       gfar_process_frame(dev, skb, pkt_len);
+                       if (likely(skb)) {
+                               pkt_len = bdp->length - ETH_FCS_LEN;
+                               /* Remove the FCS from the packet length */
+                               skb_put(skb, pkt_len);
+                               dev->stats.rx_bytes += pkt_len;
 
-                       dev->stats.rx_bytes += pkt_len;
-               } else {
-                       count_errors(bdp->status, dev);
+                               if (in_irq() || irqs_disabled())
+                                       printk("Interrupt problem!\n");
+                               gfar_process_frame(dev, skb, amount_pull);
 
-                       if (skb)
-                               dev_kfree_skb_any(skb);
+                       } else {
+                               if (netif_msg_rx_err(priv))
+                                       printk(KERN_WARNING
+                                              "%s: Missing skb!\n", dev->name);
+                               dev->stats.rx_dropped++;
+                               priv->extra_stats.rx_skbmissing++;
+                       }
 
-                       priv->rx_skbuff[priv->skb_currx] = NULL;
                }
 
-               dev->last_rx = jiffies;
+               rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
 
-               /* Clear the status flags for this buffer */
-               bdp->status &= ~RXBD_STATS;
-
-               /* Add another skb for the future */
-               skb = gfar_new_skb(dev, bdp);
-               priv->rx_skbuff[priv->skb_currx] = skb;
+               /* Setup the new bdp */
+               gfar_new_rxbdp(rx_queue, bdp, newskb);
 
                /* Update to the next pointer */
-               if (bdp->status & RXBD_WRAP)
-                       bdp = priv->rx_bd_base;
-               else
-                       bdp++;
+               bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
 
                /* update to point at the next skb */
-               priv->skb_currx =
-                   (priv->skb_currx +
-                    1) & RX_RING_MOD_MASK(priv->rx_ring_size);
-
+               rx_queue->skb_currx =
+                   (rx_queue->skb_currx + 1) &
+                   RX_RING_MOD_MASK(rx_queue->rx_ring_size);
        }
 
        /* Update the current rxbd pointer to be the next one */
-       priv->cur_rx = bdp;
+       rx_queue->cur_rx = bdp;
 
        return howmany;
 }
 
-#ifdef CONFIG_GFAR_NAPI
 static int gfar_poll(struct napi_struct *napi, int budget)
 {
-       struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
-       struct net_device *dev = priv->dev;
-       int howmany;
+       struct gfar_priv_rx_q *rx_queue = container_of(napi,
+                       struct gfar_priv_rx_q, napi);
+       struct net_device *dev = rx_queue->dev;
+       struct gfar_private *priv = netdev_priv(dev);
+       struct gfar __iomem *regs = priv->gfargrp.regs;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       int tx_cleaned = 0;
+       int rx_cleaned = 0;
+       unsigned long flags;
+
+       /* Clear IEVENT, so interrupts aren't called again
+        * because of the packets that have already arrived */
+       gfar_write(&regs->ievent, IEVENT_RTX_MASK);
+       tx_queue = priv->tx_queue;
+
+       /* If we fail to get the lock, don't bother with the TX BDs */
+       if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
+               tx_cleaned = gfar_clean_tx_ring(tx_queue);
+               spin_unlock_irqrestore(&tx_queue->txlock, flags);
+       }
+
+       rx_cleaned = gfar_clean_rx_ring(rx_queue, budget);
 
-       howmany = gfar_clean_rx_ring(dev, budget);
+       if (tx_cleaned)
+               return budget;
 
-       if (howmany < budget) {
-               netif_rx_complete(dev, napi);
+       if (rx_cleaned < budget) {
+               napi_complete(napi);
 
                /* Clear the halt bit in RSTAT */
-               gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+               gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
 
-               gfar_write(&priv->regs->imask, IMASK_DEFAULT);
+               gfar_write(&regs->imask, IMASK_DEFAULT);
 
                /* If we are coalescing interrupts, update the timer */
                /* Otherwise, clear it */
-               if (priv->rxcoalescing)
-                       gfar_write(&priv->regs->rxic,
-                                  mk_ic_value(priv->rxcount, priv->rxtime));
-               else
-                       gfar_write(&priv->regs->rxic, 0);
+               if (likely(rx_queue->rxcoalescing)) {
+                       gfar_write(&regs->rxic, 0);
+                       gfar_write(&regs->rxic, rx_queue->rxic);
+               }
+               if (likely(tx_queue->txcoalescing)) {
+                       gfar_write(&regs->txic, 0);
+                       gfar_write(&regs->txic, tx_queue->txic);
+               }
        }
 
-       return howmany;
+       return rx_cleaned;
 }
-#endif
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /*
@@ -1617,42 +2132,41 @@ static void gfar_netpoll(struct net_device *dev)
        struct gfar_private *priv = netdev_priv(dev);
 
        /* If the device has multiple interrupts, run tx/rx */
-       if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-               disable_irq(priv->interruptTransmit);
-               disable_irq(priv->interruptReceive);
-               disable_irq(priv->interruptError);
-               gfar_interrupt(priv->interruptTransmit, dev);
-               enable_irq(priv->interruptError);
-               enable_irq(priv->interruptReceive);
-               enable_irq(priv->interruptTransmit);
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+               disable_irq(priv->gfargrp.interruptTransmit);
+               disable_irq(priv->gfargrp.interruptReceive);
+               disable_irq(priv->gfargrp.interruptError);
+               gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp);
+               enable_irq(priv->gfargrp.interruptError);
+               enable_irq(priv->gfargrp.interruptReceive);
+               enable_irq(priv->gfargrp.interruptTransmit);
        } else {
-               disable_irq(priv->interruptTransmit);
-               gfar_interrupt(priv->interruptTransmit, dev);
-               enable_irq(priv->interruptTransmit);
+               disable_irq(priv->gfargrp.interruptTransmit);
+               gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp);
+               enable_irq(priv->gfargrp.interruptTransmit);
        }
 }
 #endif
 
 /* The interrupt handler for devices with one interrupt */
-static irqreturn_t gfar_interrupt(int irq, void *dev_id)
+static irqreturn_t gfar_interrupt(int irq, void *grp_id)
 {
-       struct net_device *dev = dev_id;
-       struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_priv_grp *gfargrp = grp_id;
 
        /* Save ievent for future reference */
-       u32 events = gfar_read(&priv->regs->ievent);
+       u32 events = gfar_read(&gfargrp->regs->ievent);
 
        /* Check for reception */
        if (events & IEVENT_RX_MASK)
-               gfar_receive(irq, dev_id);
+               gfar_receive(irq, grp_id);
 
        /* Check for transmit completion */
        if (events & IEVENT_TX_MASK)
-               gfar_transmit(irq, dev_id);
+               gfar_transmit(irq, grp_id);
 
        /* Check for errors */
        if (events & IEVENT_ERR_MASK)
-               gfar_error(irq, dev_id);
+               gfar_error(irq, grp_id);
 
        return IRQ_HANDLED;
 }
@@ -1666,12 +2180,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
 static void adjust_link(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar __iomem *regs = priv->gfargrp.regs;
        unsigned long flags;
        struct phy_device *phydev = priv->phydev;
        int new_state = 0;
 
-       spin_lock_irqsave(&priv->txlock, flags);
+       tx_queue = priv->tx_queue;
+       spin_lock_irqsave(&tx_queue->txlock, flags);
        if (phydev->link) {
                u32 tempval = gfar_read(&regs->maccfg2);
                u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -1694,6 +2210,8 @@ static void adjust_link(struct net_device *dev)
                        case 1000:
                                tempval =
                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+                               ecntrl &= ~(ECNTRL_R100);
                                break;
                        case 100:
                        case 10:
@@ -1724,7 +2242,6 @@ static void adjust_link(struct net_device *dev)
                if (!priv->oldlink) {
                        new_state = 1;
                        priv->oldlink = 1;
-                       netif_schedule(dev);
                }
        } else if (priv->oldlink) {
                new_state = 1;
@@ -1736,7 +2253,7 @@ static void adjust_link(struct net_device *dev)
        if (new_state && netif_msg_link(priv))
                phy_print_status(phydev);
 
-       spin_unlock_irqrestore(&priv->txlock, flags);
+       spin_unlock_irqrestore(&tx_queue->txlock, flags);
 }
 
 /* Update the hash table based on the current list of multicast
@@ -1747,10 +2264,10 @@ static void gfar_set_multi(struct net_device *dev)
 {
        struct dev_mc_list *mc_ptr;
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
+       struct gfar __iomem *regs = priv->gfargrp.regs;
        u32 tempval;
 
-       if(dev->flags & IFF_PROMISC) {
+       if (dev->flags & IFF_PROMISC) {
                /* Set RCTRL to PROM */
                tempval = gfar_read(&regs->rctrl);
                tempval |= RCTRL_PROM;
@@ -1762,7 +2279,7 @@ static void gfar_set_multi(struct net_device *dev)
                gfar_write(&regs->rctrl, tempval);
        }
 
-       if(dev->flags & IFF_ALLMULTI) {
+       if (dev->flags & IFF_ALLMULTI) {
                /* Set the hash to rx all multicast frames */
                gfar_write(&regs->igaddr0, 0xffffffff);
                gfar_write(&regs->igaddr1, 0xffffffff);
@@ -1814,7 +2331,7 @@ static void gfar_set_multi(struct net_device *dev)
                        em_num = 0;
                }
 
-               if(dev->mc_count == 0)
+               if (dev->mc_count == 0)
                        return;
 
                /* Parse the list, and set the appropriate bits */
@@ -1880,10 +2397,11 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
 {
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar __iomem *regs = priv->gfargrp.regs;
        int idx;
        char tmpbuf[MAC_ADDR_LEN];
        u32 tempval;
-       u32 __iomem *macptr = &priv->regs->macstnaddr1;
+       u32 __iomem *macptr = &regs->macstnaddr1;
 
        macptr += num*2;
 
@@ -1900,21 +2418,28 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
 }
 
 /* GFAR error interrupt handler */
-static irqreturn_t gfar_error(int irq, void *dev_id)
+static irqreturn_t gfar_error(int irq, void *grp_id)
 {
-       struct net_device *dev = dev_id;
-       struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_priv_grp *gfargrp = grp_id;
+       struct gfar __iomem *regs = gfargrp->regs;
+       struct gfar_private *priv= gfargrp->priv;
+       struct net_device *dev = priv->ndev;
 
        /* Save ievent for future reference */
-       u32 events = gfar_read(&priv->regs->ievent);
+       u32 events = gfar_read(&regs->ievent);
 
        /* Clear IEVENT */
-       gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
+       gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
+
+       /* Magic Packet is not an error. */
+       if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
+           (events & IEVENT_MAG))
+               events &= ~IEVENT_MAG;
 
        /* Hmm... */
        if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
                printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
-                      dev->name, events, gfar_read(&priv->regs->imask));
+                      dev->name, events, gfar_read(&regs->imask));
 
        /* Update the error counters */
        if (events & IEVENT_TXE) {
@@ -1932,7 +2457,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
                        priv->extra_stats.tx_underrun++;
 
                        /* Reactivate the Tx Queues */
-                       gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+                       gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
                }
                if (netif_msg_tx_err(priv))
                        printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
@@ -1941,16 +2466,11 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
                dev->stats.rx_errors++;
                priv->extra_stats.rx_bsy++;
 
-               gfar_receive(irq, dev_id);
-
-#ifndef CONFIG_GFAR_NAPI
-               /* Clear the halt bit in RSTAT */
-               gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
-#endif
+               gfar_receive(irq, grp_id);
 
                if (netif_msg_rx_err(priv))
                        printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
-                              dev->name, gfar_read(&priv->regs->rstat));
+                              dev->name, gfar_read(&regs->rstat));
        }
        if (events & IEVENT_BABR) {
                dev->stats.rx_errors++;
@@ -1975,34 +2495,36 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static struct of_device_id gfar_match[] =
+{
+       {
+               .type = "network",
+               .compatible = "gianfar",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, gfar_match);
+
 /* Structure for a device driver */
-static struct platform_driver gfar_driver = {
+static struct of_platform_driver gfar_driver = {
+       .name = "fsl-gianfar",
+       .match_table = gfar_match,
+
        .probe = gfar_probe,
        .remove = gfar_remove,
-       .driver = {
-               .name = "fsl-gianfar",
-       },
+       .suspend = gfar_legacy_suspend,
+       .resume = gfar_legacy_resume,
+       .driver.pm = GFAR_PM_OPS,
 };
 
 static int __init gfar_init(void)
 {
-       int err = gfar_mdio_init();
-
-       if (err)
-               return err;
-
-       err = platform_driver_register(&gfar_driver);
-
-       if (err)
-               gfar_mdio_exit();
-
-       return err;
+       return of_register_platform_driver(&gfar_driver);
 }
 
 static void __exit gfar_exit(void)
 {
-       platform_driver_unregister(&gfar_driver);
-       gfar_mdio_exit();
+       of_unregister_platform_driver(&gfar_driver);
 }
 
 module_init(gfar_init);