of: Always use 'struct device.of_node' to get device node pointer.
[safe/jmp/linux-2.6] / drivers / net / gianfar.c
index aa258e8..b71bba9 100644 (file)
@@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev);
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
 
 MODULE_AUTHOR("Freescale Semiconductor, Inc");
 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@@ -326,8 +325,8 @@ cleanup:
 
 static void gfar_init_tx_rx_base(struct gfar_private *priv)
 {
-       struct gfar __iomem *regs = priv->gfargrp.regs;
-       u32 *baddr;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 __iomem *baddr;
        int i;
 
        baddr = &regs->tbase0;
@@ -346,7 +345,7 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
 static void gfar_init_mac(struct net_device *ndev)
 {
        struct gfar_private *priv = netdev_priv(ndev);
-       struct gfar __iomem *regs = priv->gfargrp.regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 rctrl = 0;
        u32 tctrl = 0;
        u32 attrs = 0;
@@ -355,16 +354,13 @@ static void gfar_init_mac(struct net_device *ndev)
        gfar_init_tx_rx_base(priv);
 
        /* Configure the coalescing support */
-       gfar_write(&regs->txic, 0);
-       if (priv->tx_queue[0]->txcoalescing)
-               gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+       gfar_configure_coalescing(priv, 0xFF, 0xFF);
 
-       gfar_write(&regs->rxic, 0);
-       if (priv->rx_queue[0]->rxcoalescing)
-               gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
-
-       if (priv->rx_filer_enable)
+       if (priv->rx_filer_enable) {
                rctrl |= RCTRL_FILREN;
+               /* Program the RIR0 reg with the required distribution */
+               gfar_write(&regs->rir0, DEFAULT_RIR0);
+       }
 
        if (priv->rx_csum_enable)
                rctrl |= RCTRL_CHECKSUMMING;
@@ -420,6 +416,36 @@ static void gfar_init_mac(struct net_device *ndev)
        gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
 }
 
+static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+       struct netdev_queue *txq;
+       unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
+       unsigned long tx_packets = 0, tx_bytes = 0;
+       int i = 0;
+
+       for (i = 0; i < priv->num_rx_queues; i++) {
+               rx_packets += priv->rx_queue[i]->stats.rx_packets;
+               rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+               rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+       }
+
+       dev->stats.rx_packets = rx_packets;
+       dev->stats.rx_bytes = rx_bytes;
+       dev->stats.rx_dropped = rx_dropped;
+
+       for (i = 0; i < priv->num_tx_queues; i++) {
+               txq = netdev_get_tx_queue(dev, i);
+               tx_bytes += txq->tx_bytes;
+               tx_packets += txq->tx_packets;
+       }
+
+       dev->stats.tx_bytes = tx_bytes;
+       dev->stats.tx_packets = tx_packets;
+
+       return &dev->stats;
+}
+
 static const struct net_device_ops gfar_netdev_ops = {
        .ndo_open = gfar_enet_open,
        .ndo_start_xmit = gfar_start_xmit,
@@ -428,7 +454,7 @@ static const struct net_device_ops gfar_netdev_ops = {
        .ndo_set_multicast_list = gfar_set_multi,
        .ndo_tx_timeout = gfar_timeout,
        .ndo_do_ioctl = gfar_ioctl,
-       .ndo_select_queue = gfar_select_queue,
+       .ndo_get_stats = gfar_get_stats,
        .ndo_vlan_rx_register = gfar_vlan_rx_register,
        .ndo_set_mac_address = eth_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
@@ -437,6 +463,9 @@ static const struct net_device_ops gfar_netdev_ops = {
 #endif
 };
 
+unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+
 void lock_rx_qs(struct gfar_private *priv)
 {
        int i = 0x0;
@@ -475,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
        return priv->vlgrp || priv->rx_csum_enable;
 }
 
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
-       return skb_get_queue_mapping(skb);
-}
 static void free_tx_pointers(struct gfar_private *priv)
 {
        int i = 0;
@@ -495,16 +520,91 @@ static void free_rx_pointers(struct gfar_private *priv)
                kfree(priv->rx_queue[i]);
 }
 
+static void unmap_group_regs(struct gfar_private *priv)
+{
+       int i = 0;
+
+       for (i = 0; i < MAXGROUPS; i++)
+               if (priv->gfargrp[i].regs)
+                       iounmap(priv->gfargrp[i].regs);
+}
+
+static void disable_napi(struct gfar_private *priv)
+{
+       int i = 0;
+
+       for (i = 0; i < priv->num_grps; i++)
+               napi_disable(&priv->gfargrp[i].napi);
+}
+
+static void enable_napi(struct gfar_private *priv)
+{
+       int i = 0;
+
+       for (i = 0; i < priv->num_grps; i++)
+               napi_enable(&priv->gfargrp[i].napi);
+}
+
+static int gfar_parse_group(struct device_node *np,
+               struct gfar_private *priv, const char *model)
+{
+       u32 *queue_mask;
+       u64 addr, size;
+
+       addr = of_translate_address(np,
+                       of_get_address(np, 0, &size, NULL));
+       priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
+
+       if (!priv->gfargrp[priv->num_grps].regs)
+               return -ENOMEM;
+
+       priv->gfargrp[priv->num_grps].interruptTransmit =
+                       irq_of_parse_and_map(np, 0);
+
+       /* If we aren't the FEC we have multiple interrupts */
+       if (model && strcasecmp(model, "FEC")) {
+               priv->gfargrp[priv->num_grps].interruptReceive =
+                       irq_of_parse_and_map(np, 1);
+               priv->gfargrp[priv->num_grps].interruptError =
+                       irq_of_parse_and_map(np,2);
+               if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
+                       priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
+                       priv->gfargrp[priv->num_grps].interruptError < 0) {
+                       return -EINVAL;
+               }
+       }
+
+       priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
+       priv->gfargrp[priv->num_grps].priv = priv;
+       spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
+       if(priv->mode == MQ_MG_MODE) {
+               queue_mask = (u32 *)of_get_property(np,
+                                       "fsl,rx-bit-map", NULL);
+               priv->gfargrp[priv->num_grps].rx_bit_map =
+                       queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
+               queue_mask = (u32 *)of_get_property(np,
+                                       "fsl,tx-bit-map", NULL);
+               priv->gfargrp[priv->num_grps].tx_bit_map =
+                       queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+       } else {
+               priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
+               priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
+       }
+       priv->num_grps++;
+
+       return 0;
+}
+
 static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
 {
        const char *model;
        const char *ctype;
        const void *mac_addr;
-       u64 addr, size;
        int err = 0, i;
        struct net_device *dev = NULL;
        struct gfar_private *priv = NULL;
-       struct device_node *np = ofdev->node;
+       struct device_node *np = ofdev->dev.of_node;
+       struct device_node *child = NULL;
        const u32 *stash;
        const u32 *stash_len;
        const u32 *stash_idx;
@@ -541,43 +641,33 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
                return -ENOMEM;
 
        priv = netdev_priv(dev);
-       priv->node = ofdev->node;
+       priv->node = ofdev->dev.of_node;
        priv->ndev = dev;
 
        dev->num_tx_queues = num_tx_qs;
        dev->real_num_tx_queues = num_tx_qs;
        priv->num_tx_queues = num_tx_qs;
        priv->num_rx_queues = num_rx_qs;
-
-       /* get a pointer to the register memory */
-       addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
-       priv->gfargrp.regs = ioremap(addr, size);
-
-       if (priv->gfargrp.regs == NULL) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-
-       priv->gfargrp.priv = priv; /* back pointer from group to priv */
-       priv->gfargrp.rx_bit_map = DEFAULT_MAPPING;
-       priv->gfargrp.tx_bit_map = DEFAULT_MAPPING;
-
-       priv->gfargrp.interruptTransmit = irq_of_parse_and_map(np, 0);
+       priv->num_grps = 0x0;
 
        model = of_get_property(np, "model", NULL);
 
-       /* If we aren't the FEC we have multiple interrupts */
-       if (model && strcasecmp(model, "FEC")) {
-               priv->gfargrp.interruptReceive = irq_of_parse_and_map(np, 1);
-
-               priv->gfargrp.interruptError = irq_of_parse_and_map(np, 2);
+       for (i = 0; i < MAXGROUPS; i++)
+               priv->gfargrp[i].regs = NULL;
 
-               if (priv->gfargrp.interruptTransmit < 0 ||
-                               priv->gfargrp.interruptReceive < 0 ||
-                               priv->gfargrp.interruptError < 0) {
-                       err = -EINVAL;
-                       goto err_out;
+       /* Parse and initialize group specific information */
+       if (of_device_is_compatible(np, "fsl,etsec2")) {
+               priv->mode = MQ_MG_MODE;
+               for_each_child_of_node(np, child) {
+                       err = gfar_parse_group(child, priv, model);
+                       if (err)
+                               goto err_grp_init;
                }
+       } else {
+               priv->mode = SQ_SG_MODE;
+               err = gfar_parse_group(np, priv, model);
+               if(err)
+                       goto err_grp_init;
        }
 
        for (i = 0; i < priv->num_tx_queues; i++)
@@ -586,7 +676,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
                priv->rx_queue[i] = NULL;
 
        for (i = 0; i < priv->num_tx_queues; i++) {
-               priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kmalloc(
+               priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kzalloc(
                                sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
                if (!priv->tx_queue[i]) {
                        err = -ENOMEM;
@@ -599,7 +689,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
        }
 
        for (i = 0; i < priv->num_rx_queues; i++) {
-               priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
+               priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
                                        sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
                if (!priv->rx_queue[i]) {
                        err = -ENOMEM;
@@ -676,8 +766,8 @@ rx_alloc_failed:
        free_rx_pointers(priv);
 tx_alloc_failed:
        free_tx_pointers(priv);
-err_out:
-       iounmap(priv->gfargrp.regs);
+err_grp_init:
+       unmap_group_regs(priv);
        free_netdev(dev);
        return err;
 }
@@ -707,6 +797,74 @@ static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
        }
        return new_bit_map;
 }
+
+static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
+                                  u32 class)
+{
+       u32 rqfpr = FPR_FILER_MASK;
+       u32 rqfcr = 0x0;
+
+       rqfar--;
+       rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+       ftp_rqfpr[rqfar] = rqfpr;
+       ftp_rqfcr[rqfar] = rqfcr;
+       gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+       rqfar--;
+       rqfcr = RQFCR_CMP_NOMATCH;
+       ftp_rqfpr[rqfar] = rqfpr;
+       ftp_rqfcr[rqfar] = rqfcr;
+       gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+       rqfar--;
+       rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
+       rqfpr = class;
+       ftp_rqfcr[rqfar] = rqfcr;
+       ftp_rqfpr[rqfar] = rqfpr;
+       gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+       rqfar--;
+       rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
+       rqfpr = class;
+       ftp_rqfcr[rqfar] = rqfcr;
+       ftp_rqfpr[rqfar] = rqfpr;
+       gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+       return rqfar;
+}
+
+static void gfar_init_filer_table(struct gfar_private *priv)
+{
+       int i = 0x0;
+       u32 rqfar = MAX_FILER_IDX;
+       u32 rqfcr = 0x0;
+       u32 rqfpr = FPR_FILER_MASK;
+
+       /* Default rule */
+       rqfcr = RQFCR_CMP_MATCH;
+       ftp_rqfcr[rqfar] = rqfcr;
+       ftp_rqfpr[rqfar] = rqfpr;
+       gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
+
+       /* cur_filer_idx indicated the fisrt non-masked rule */
+       priv->cur_filer_idx = rqfar;
+
+       /* Rest are masked rules */
+       rqfcr = RQFCR_CMP_NOMATCH;
+       for (i = 0; i < rqfar; i++) {
+               ftp_rqfcr[i] = rqfcr;
+               ftp_rqfpr[i] = rqfpr;
+               gfar_write_filer(priv, i, rqfcr, rqfpr);
+       }
+}
+
 /* Set up the ethernet device structure, private data,
  * and anything else we need before we start */
 static int gfar_probe(struct of_device *ofdev,
@@ -716,9 +874,11 @@ static int gfar_probe(struct of_device *ofdev,
        struct net_device *dev = NULL;
        struct gfar_private *priv = NULL;
        struct gfar __iomem *regs = NULL;
-       int err = 0, i;
+       int err = 0, i, grp_idx = 0;
        int len_devname;
        u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
+       u32 isrg = 0;
+       u32 __iomem *baddr;
 
        err = gfar_of_init(ofdev, &dev);
 
@@ -728,15 +888,14 @@ static int gfar_probe(struct of_device *ofdev,
        priv = netdev_priv(dev);
        priv->ndev = dev;
        priv->ofdev = ofdev;
-       priv->node = ofdev->node;
+       priv->node = ofdev->dev.of_node;
        SET_NETDEV_DEV(dev, &ofdev->dev);
 
-       spin_lock_init(&priv->gfargrp.grplock);
        spin_lock_init(&priv->bflock);
        INIT_WORK(&priv->reset_task, gfar_reset_task);
 
        dev_set_drvdata(&ofdev->dev, priv);
-       regs = priv->gfargrp.regs;
+       regs = priv->gfargrp[0].regs;
 
        /* Stop the DMA engine now, in case it was running before */
        /* (The firmware could have used it, and left it running). */
@@ -769,7 +928,8 @@ static int gfar_probe(struct of_device *ofdev,
        dev->ethtool_ops = &gfar_ethtool_ops;
 
        /* Register for napi ...We are registering NAPI for each grp */
-       netif_napi_add(dev, &priv->gfargrp.napi, gfar_poll, GFAR_DEV_WEIGHT);
+       for (i = 0; i < priv->num_grps; i++)
+               netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
                priv->rx_csum_enable = 1;
@@ -825,25 +985,51 @@ static int gfar_probe(struct of_device *ofdev,
        if (dev->features & NETIF_F_IP_CSUM)
                dev->hard_header_len += GMAC_FCB_LEN;
 
+       /* Program the isrg regs only if number of grps > 1 */
+       if (priv->num_grps > 1) {
+               baddr = &regs->isrg0;
+               for (i = 0; i < priv->num_grps; i++) {
+                       isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
+                       isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
+                       gfar_write(baddr, isrg);
+                       baddr++;
+                       isrg = 0x0;
+               }
+       }
+
        /* Need to reverse the bit maps as  bit_map's MSB is q0
-        * but, for_each_bit parses from right to left, which
+        * but, for_each_set_bit parses from right to left, which
         * basically reverses the queue numbers */
-       priv->gfargrp.tx_bit_map = reverse_bitmap(priv->gfargrp.tx_bit_map, MAX_TX_QS);
-       priv->gfargrp.rx_bit_map = reverse_bitmap(priv->gfargrp.rx_bit_map, MAX_RX_QS);
-
-       /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values */
-       for_each_bit(i, &priv->gfargrp.rx_bit_map, priv->num_rx_queues) {
-               priv->gfargrp.num_rx_queues++;
-               rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
-               rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
-       }
-       for_each_bit (i, &priv->gfargrp.tx_bit_map, priv->num_tx_queues) {
-               priv->gfargrp.num_tx_queues++;
-               tstat = tstat | (TSTAT_CLEAR_THALT >> i);
-               tqueue = tqueue | (TQUEUE_EN0 >> i);
+       for (i = 0; i< priv->num_grps; i++) {
+               priv->gfargrp[i].tx_bit_map = reverse_bitmap(
+                               priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+               priv->gfargrp[i].rx_bit_map = reverse_bitmap(
+                               priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+       }
+
+       /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+        * also assign queues to groups */
+       for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+               priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+               for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+                               priv->num_rx_queues) {
+                       priv->gfargrp[grp_idx].num_rx_queues++;
+                       priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
+                       rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
+                       rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+               }
+               priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+               for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
+                               priv->num_tx_queues) {
+                       priv->gfargrp[grp_idx].num_tx_queues++;
+                       priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
+                       tstat = tstat | (TSTAT_CLEAR_THALT >> i);
+                       tqueue = tqueue | (TQUEUE_EN0 >> i);
+               }
+               priv->gfargrp[grp_idx].rstat = rstat;
+               priv->gfargrp[grp_idx].tstat = tstat;
+               rstat = tstat =0;
        }
-       priv->gfargrp.rstat = rstat;
-       priv->gfargrp.tstat = tstat;
 
        gfar_write(&regs->rqueue, rqueue);
        gfar_write(&regs->tqueue, tqueue);
@@ -864,6 +1050,9 @@ static int gfar_probe(struct of_device *ofdev,
                priv->rx_queue[i]->rxic = DEFAULT_RXIC;
        }
 
+       /* enable filer if using multiple RX queues*/
+       if(priv->num_rx_queues > 1)
+               priv->rx_filer_enable = 1;
        /* Enable most messages by default */
        priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
 
@@ -883,20 +1072,43 @@ static int gfar_probe(struct of_device *ofdev,
 
        /* fill out IRQ number and name fields */
        len_devname = strlen(dev->name);
-       strncpy(&priv->gfargrp.int_name_tx[0], dev->name, len_devname);
-       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-               strncpy(&priv->gfargrp.int_name_tx[len_devname],
-                       "_tx", sizeof("_tx") + 1);
-
-               strncpy(&priv->gfargrp.int_name_rx[0], dev->name, len_devname);
-               strncpy(&priv->gfargrp.int_name_rx[len_devname],
-                       "_rx", sizeof("_rx") + 1);
-
-               strncpy(&priv->gfargrp.int_name_er[0], dev->name, len_devname);
-               strncpy(&priv->gfargrp.int_name_er[len_devname],
-                       "_er", sizeof("_er") + 1);
-       } else
-               priv->gfargrp.int_name_tx[len_devname] = '\0';
+       for (i = 0; i < priv->num_grps; i++) {
+               strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
+                               len_devname);
+               if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+                       strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
+                               "_g", sizeof("_g"));
+                       priv->gfargrp[i].int_name_tx[
+                               strlen(priv->gfargrp[i].int_name_tx)] = i+48;
+                       strncpy(&priv->gfargrp[i].int_name_tx[strlen(
+                               priv->gfargrp[i].int_name_tx)],
+                               "_tx", sizeof("_tx") + 1);
+
+                       strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
+                                       len_devname);
+                       strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
+                                       "_g", sizeof("_g"));
+                       priv->gfargrp[i].int_name_rx[
+                               strlen(priv->gfargrp[i].int_name_rx)] = i+48;
+                       strncpy(&priv->gfargrp[i].int_name_rx[strlen(
+                               priv->gfargrp[i].int_name_rx)],
+                               "_rx", sizeof("_rx") + 1);
+
+                       strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
+                                       len_devname);
+                       strncpy(&priv->gfargrp[i].int_name_er[len_devname],
+                               "_g", sizeof("_g"));
+                       priv->gfargrp[i].int_name_er[strlen(
+                                       priv->gfargrp[i].int_name_er)] = i+48;
+                       strncpy(&priv->gfargrp[i].int_name_er[strlen(\
+                               priv->gfargrp[i].int_name_er)],
+                               "_er", sizeof("_er") + 1);
+               } else
+                       priv->gfargrp[i].int_name_tx[len_devname] = '\0';
+       }
+
+       /* Initialize the filer table */
+       gfar_init_filer_table(priv);
 
        /* Create all the sysfs files */
        gfar_init_sysfs(dev);
@@ -908,16 +1120,16 @@ static int gfar_probe(struct of_device *ofdev,
        /* provided which set of benchmarks. */
        printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
        for (i = 0; i < priv->num_rx_queues; i++)
-               printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
+               printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
                        dev->name, i, priv->rx_queue[i]->rx_ring_size);
        for(i = 0; i < priv->num_tx_queues; i++)
-                printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
+                printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
                        dev->name, i, priv->tx_queue[i]->tx_ring_size);
 
        return 0;
 
 register_fail:
-       iounmap(priv->gfargrp.regs);
+       unmap_group_regs(priv);
        free_tx_pointers(priv);
        free_rx_pointers(priv);
        if (priv->phy_node)
@@ -940,7 +1152,7 @@ static int gfar_remove(struct of_device *ofdev)
        dev_set_drvdata(&ofdev->dev, NULL);
 
        unregister_netdev(priv->ndev);
-       iounmap(priv->gfargrp.regs);
+       unmap_group_regs(priv);
        free_netdev(priv->ndev);
 
        return 0;
@@ -952,7 +1164,7 @@ static int gfar_suspend(struct device *dev)
 {
        struct gfar_private *priv = dev_get_drvdata(dev);
        struct net_device *ndev = priv->ndev;
-       struct gfar __iomem *regs = NULL;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        unsigned long flags;
        u32 tempval;
 
@@ -960,7 +1172,6 @@ static int gfar_suspend(struct device *dev)
                (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
        netif_device_detach(ndev);
-       regs = priv->gfargrp.regs;
 
        if (netif_running(ndev)) {
 
@@ -984,7 +1195,7 @@ static int gfar_suspend(struct device *dev)
                unlock_tx_qs(priv);
                local_irq_restore(flags);
 
-               napi_disable(&priv->gfargrp.napi);
+               disable_napi(priv);
 
                if (magic_packet) {
                        /* Enable interrupt on Magic Packet */
@@ -1006,7 +1217,7 @@ static int gfar_resume(struct device *dev)
 {
        struct gfar_private *priv = dev_get_drvdata(dev);
        struct net_device *ndev = priv->ndev;
-       struct gfar __iomem *regs = NULL;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        unsigned long flags;
        u32 tempval;
        int magic_packet = priv->wol_en &&
@@ -1023,8 +1234,6 @@ static int gfar_resume(struct device *dev)
        /* Disable Magic Packet mode, in case something
         * else woke us up.
         */
-       regs = priv->gfargrp.regs;
-
        local_irq_save(flags);
        lock_tx_qs(priv);
        lock_rx_qs(priv);
@@ -1041,7 +1250,7 @@ static int gfar_resume(struct device *dev)
 
        netif_device_attach(ndev);
 
-       napi_enable(&priv->gfargrp.napi);
+       enable_napi(priv);
 
        return 0;
 }
@@ -1068,7 +1277,7 @@ static int gfar_restore(struct device *dev)
                phy_start(priv->phydev);
 
        netif_device_attach(ndev);
-       napi_enable(&priv->gfargrp.napi);
+       enable_napi(priv);
 
        return 0;
 }
@@ -1107,10 +1316,9 @@ static int gfar_legacy_resume(struct of_device *ofdev)
 static phy_interface_t gfar_get_interface(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = NULL;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 ecntrl;
 
-       regs = priv->gfargrp.regs;
        ecntrl = gfar_read(&regs->ecntrl);
 
        if (ecntrl & ECNTRL_SGMII_MODE)
@@ -1234,14 +1442,18 @@ static void init_registers(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = NULL;
+       int i = 0;
 
-       regs = priv->gfargrp.regs;
-       /* Clear IEVENT */
-       gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+       for (i = 0; i < priv->num_grps; i++) {
+               regs = priv->gfargrp[i].regs;
+               /* Clear IEVENT */
+               gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 
-       /* Initialize IMASK */
-       gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+               /* Initialize IMASK */
+               gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+       }
 
+       regs = priv->gfargrp[0].regs;
        /* Init hash registers to zero */
        gfar_write(&regs->igaddr0, 0);
        gfar_write(&regs->igaddr1, 0);
@@ -1282,15 +1494,20 @@ static void init_registers(struct net_device *dev)
 static void gfar_halt_nodisable(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->gfargrp.regs;
+       struct gfar __iomem *regs = NULL;
        u32 tempval;
+       int i = 0;
 
-       /* Mask all interrupts */
-       gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+       for (i = 0; i < priv->num_grps; i++) {
+               regs = priv->gfargrp[i].regs;
+               /* Mask all interrupts */
+               gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 
-       /* Clear all interrupts */
-       gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+               /* Clear all interrupts */
+               gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+       }
 
+       regs = priv->gfargrp[0].regs;
        /* Stop the DMA, and wait for it to stop */
        tempval = gfar_read(&regs->dmactrl);
        if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
@@ -1308,7 +1525,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
 void gfar_halt(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->gfargrp.regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
 
        gfar_halt_nodisable(dev);
@@ -1319,10 +1536,18 @@ void gfar_halt(struct net_device *dev)
        gfar_write(&regs->maccfg1, tempval);
 }
 
+static void free_grp_irqs(struct gfar_priv_grp *grp)
+{
+       free_irq(grp->interruptError, grp);
+       free_irq(grp->interruptTransmit, grp);
+       free_irq(grp->interruptReceive, grp);
+}
+
 void stop_gfar(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
        unsigned long flags;
+       int i;
 
        phy_stop(priv->phydev);
 
@@ -1340,11 +1565,12 @@ void stop_gfar(struct net_device *dev)
 
        /* Free the IRQs */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-               free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
-               free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
-               free_irq(priv->gfargrp.interruptReceive, &priv->gfargrp);
+               for (i = 0; i < priv->num_grps; i++)
+                       free_grp_irqs(&priv->gfargrp[i]);
        } else {
-               free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
+               for (i = 0; i < priv->num_grps; i++)
+                       free_irq(priv->gfargrp[i].interruptTransmit,
+                                       &priv->gfargrp[i]);
        }
 
        free_skb_resources(priv);
@@ -1412,13 +1638,13 @@ static void free_skb_resources(struct gfar_private *priv)
        /* Go through all the buffer descriptors and free their data buffers */
        for (i = 0; i < priv->num_tx_queues; i++) {
                tx_queue = priv->tx_queue[i];
-               if(!tx_queue->tx_skbuff)
+               if(tx_queue->tx_skbuff)
                        free_skb_tx_queue(tx_queue);
        }
 
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               if(!rx_queue->rx_skbuff)
+               if(rx_queue->rx_skbuff)
                        free_skb_rx_queue(rx_queue);
        }
 
@@ -1432,8 +1658,9 @@ static void free_skb_resources(struct gfar_private *priv)
 void gfar_start(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->gfargrp.regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
+       int i = 0;
 
        /* Enable Rx and Tx in MACCFG1 */
        tempval = gfar_read(&regs->maccfg1);
@@ -1450,92 +1677,149 @@ void gfar_start(struct net_device *dev)
        tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
        gfar_write(&regs->dmactrl, tempval);
 
-       /* Clear THLT/RHLT, so that the DMA starts polling now */
-       gfar_write(&regs->tstat, priv->gfargrp.tstat);
-       gfar_write(&regs->rstat, priv->gfargrp.rstat);
-
-       /* Unmask the interrupts we look for */
-       gfar_write(&regs->imask, IMASK_DEFAULT);
+       for (i = 0; i < priv->num_grps; i++) {
+               regs = priv->gfargrp[i].regs;
+               /* Clear THLT/RHLT, so that the DMA starts polling now */
+               gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
+               gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+               /* Unmask the interrupts we look for */
+               gfar_write(&regs->imask, IMASK_DEFAULT);
+       }
 
        dev->trans_start = jiffies;
 }
 
-/* Bring the controller up and running */
-int startup_gfar(struct net_device *ndev)
+void gfar_configure_coalescing(struct gfar_private *priv,
+       unsigned long tx_mask, unsigned long rx_mask)
 {
-       struct gfar_private *priv = netdev_priv(ndev);
-       struct gfar __iomem *regs = priv->gfargrp.regs;
-       int err;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 __iomem *baddr;
+       int i = 0;
 
-       gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+       /* Backward compatible case ---- even if we enable
+        * multiple queues, there's only single reg to program
+        */
+       gfar_write(&regs->txic, 0);
+       if(likely(priv->tx_queue[0]->txcoalescing))
+               gfar_write(&regs->txic, priv->tx_queue[0]->txic);
 
-       err = gfar_alloc_skb_resources(ndev);
-       if (err)
-               return err;
+       gfar_write(&regs->rxic, 0);
+       if(unlikely(priv->rx_queue[0]->rxcoalescing))
+               gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
 
-       gfar_init_mac(ndev);
+       if (priv->mode == MQ_MG_MODE) {
+               baddr = &regs->txic0;
+               for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+                       if (likely(priv->tx_queue[i]->txcoalescing)) {
+                               gfar_write(baddr + i, 0);
+                               gfar_write(baddr + i, priv->tx_queue[i]->txic);
+                       }
+               }
+
+               baddr = &regs->rxic0;
+               for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+                       if (likely(priv->rx_queue[i]->rxcoalescing)) {
+                               gfar_write(baddr + i, 0);
+                               gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+                       }
+               }
+       }
+}
+
+static int register_grp_irqs(struct gfar_priv_grp *grp)
+{
+       struct gfar_private *priv = grp->priv;
+       struct net_device *dev = priv->ndev;
+       int err;
 
        /* If the device has multiple interrupts, register for
         * them.  Otherwise, only register for the one */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
                /* Install our interrupt handlers for Error,
                 * Transmit, and Receive */
-               err = request_irq(priv->gfargrp.interruptError, gfar_error, 0,
-                                 priv->gfargrp.int_name_er, &priv->gfargrp);
-               if (err) {
+               if ((err = request_irq(grp->interruptError, gfar_error, 0,
+                               grp->int_name_er,grp)) < 0) {
                        if (netif_msg_intr(priv))
-                               pr_err("%s: Can't get IRQ %d\n", ndev->name,
-                                      priv->gfargrp.interruptError);
-                       goto err_irq_fail;
+                               printk(KERN_ERR "%s: Can't get IRQ %d\n",
+                                       dev->name, grp->interruptError);
+
+                               goto err_irq_fail;
                }
 
-               err = request_irq(priv->gfargrp.interruptTransmit,
-                                       gfar_transmit, 0,
-                                       priv->gfargrp.int_name_tx,
-                                       &priv->gfargrp);
-               if (err) {
+               if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
+                               0, grp->int_name_tx, grp)) < 0) {
                        if (netif_msg_intr(priv))
-                               pr_err("%s: Can't get IRQ %d\n", ndev->name,
-                                      priv->gfargrp.interruptTransmit);
+                               printk(KERN_ERR "%s: Can't get IRQ %d\n",
+                                       dev->name, grp->interruptTransmit);
                        goto tx_irq_fail;
                }
 
-               err = request_irq(priv->gfargrp.interruptReceive,
-                                       gfar_receive, 0,
-                                       priv->gfargrp.int_name_rx,
-                                       &priv->gfargrp);
-               if (err) {
+               if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
+                               grp->int_name_rx, grp)) < 0) {
                        if (netif_msg_intr(priv))
-                               pr_err("%s: Can't get IRQ %d (receive0)\n",
-                                       ndev->name,
-                                       priv->gfargrp.interruptReceive);
+                               printk(KERN_ERR "%s: Can't get IRQ %d\n",
+                                       dev->name, grp->interruptReceive);
                        goto rx_irq_fail;
                }
        } else {
-               err = request_irq(priv->gfargrp.interruptTransmit,
-                                       gfar_interrupt, 0,
-                                       priv->gfargrp.int_name_tx,
-                                       &priv->gfargrp);
-               if (err) {
+               if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
+                               grp->int_name_tx, grp)) < 0) {
                        if (netif_msg_intr(priv))
-                               pr_err("%s: Can't get IRQ %d\n", ndev->name,
-                                      priv->gfargrp.interruptTransmit);
+                               printk(KERN_ERR "%s: Can't get IRQ %d\n",
+                                       dev->name, grp->interruptTransmit);
                        goto err_irq_fail;
                }
        }
 
+       return 0;
+
+rx_irq_fail:
+       free_irq(grp->interruptTransmit, grp);
+tx_irq_fail:
+       free_irq(grp->interruptError, grp);
+err_irq_fail:
+       return err;
+
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct gfar __iomem *regs = NULL;
+       int err, i, j;
+
+       for (i = 0; i < priv->num_grps; i++) {
+               regs= priv->gfargrp[i].regs;
+               gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+       }
+
+       regs= priv->gfargrp[0].regs;
+       err = gfar_alloc_skb_resources(ndev);
+       if (err)
+               return err;
+
+       gfar_init_mac(ndev);
+
+       for (i = 0; i < priv->num_grps; i++) {
+               err = register_grp_irqs(&priv->gfargrp[i]);
+               if (err) {
+                       for (j = 0; j < i; j++)
+                               free_grp_irqs(&priv->gfargrp[j]);
+                               goto irq_fail;
+               }
+       }
+
        /* Start the controller */
        gfar_start(ndev);
 
        phy_start(priv->phydev);
 
+       gfar_configure_coalescing(priv, 0xFF, 0xFF);
+
        return 0;
 
-rx_irq_fail:
-       free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
-tx_irq_fail:
-       free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
-err_irq_fail:
+irq_fail:
        free_skb_resources(priv);
        return err;
 }
@@ -1547,7 +1831,7 @@ static int gfar_enet_open(struct net_device *dev)
        struct gfar_private *priv = netdev_priv(dev);
        int err;
 
-       napi_enable(&priv->gfargrp.napi);
+       enable_napi(priv);
 
        skb_queue_head_init(&priv->rx_recycle);
 
@@ -1559,13 +1843,13 @@ static int gfar_enet_open(struct net_device *dev)
        err = init_phy(dev);
 
        if (err) {
-               napi_disable(&priv->gfargrp.napi);
+               disable_napi(priv);
                return err;
        }
 
        err = startup_gfar(dev);
        if (err) {
-               napi_disable(&priv->gfargrp.napi);
+               disable_napi(priv);
                return err;
        }
 
@@ -1654,7 +1938,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_queue = priv->tx_queue[rq];
        txq = netdev_get_tx_queue(dev, rq);
        base = tx_queue->tx_bd_base;
-       regs = priv->gfargrp.regs;
+       regs = tx_queue->grp->regs;
 
        /* make space for additional header when fcb is needed */
        if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1675,19 +1959,17 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* total number of fragments in the SKB */
        nr_frags = skb_shinfo(skb)->nr_frags;
 
-       spin_lock_irqsave(&tx_queue->txlock, flags);
-
        /* check if there is space to queue this packet */
        if ((nr_frags+1) > tx_queue->num_txbdfree) {
                /* no space, stop the queue */
                netif_tx_stop_queue(txq);
                dev->stats.tx_fifo_errors++;
-               spin_unlock_irqrestore(&tx_queue->txlock, flags);
                return NETDEV_TX_BUSY;
        }
 
        /* Update transmit stats */
-       dev->stats.tx_bytes += skb->len;
+       txq->tx_bytes += skb->len;
+       txq->tx_packets ++;
 
        txbdp = txbdp_start = tx_queue->cur_tx;
 
@@ -1739,13 +2021,26 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        /* setup the TxBD length and buffer pointer for the first BD */
-       tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
        txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
                        skb_headlen(skb), DMA_TO_DEVICE);
 
        lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
 
        /*
+        * We can work in parallel with gfar_clean_tx_ring(), except
+        * when modifying num_txbdfree. Note that we didn't grab the lock
+        * when we were reading the num_txbdfree and checking for available
+        * space, that's because outside of this function it can only grow,
+        * and once we've got needed space, it cannot suddenly disappear.
+        *
+        * The lock also protects us from gfar_error(), which can modify
+        * regs->tstat and thus retrigger the transfers, which is why we
+        * also must grab the lock before setting ready bit for the first
+        * to be transmitted BD.
+        */
+       spin_lock_irqsave(&tx_queue->txlock, flags);
+
+       /*
         * The powerpc-specific eieio() is used, as wmb() has too strong
         * semantics (it requires synchronization between cacheable and
         * uncacheable mappings, which eieio doesn't provide and which we
@@ -1757,6 +2052,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        txbdp_start->lstatus = lstatus;
 
+       eieio(); /* force lstatus write before tx_skbuff */
+
+       tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+
        /* Update the current skb pointer to the next entry we will use
         * (wrapping if necessary) */
        tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
@@ -1791,7 +2090,7 @@ static int gfar_close(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
 
-       napi_disable(&priv->gfargrp.napi);
+       disable_napi(priv);
 
        skb_queue_purge(&priv->rx_recycle);
        cancel_work_sync(&priv->reset_task);
@@ -1824,7 +2123,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
        unsigned long flags;
        u32 tempval;
 
-       regs = priv->gfargrp.regs;
+       regs = priv->gfargrp[0].regs;
        local_irq_save(flags);
        lock_rx_qs(priv);
 
@@ -1868,7 +2167,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
 {
        int tempsize, tempval;
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->gfargrp.regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        int oldsize = priv->rx_buffer_size;
        int frame_size = new_mtu + ETH_HLEN;
 
@@ -1972,6 +2271,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
        skb_dirtytx = tx_queue->skb_dirtytx;
 
        while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+               unsigned long flags;
+
                frags = skb_shinfo(skb)->nr_frags;
                lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
 
@@ -2016,7 +2317,9 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
                        TX_RING_MOD_MASK(tx_ring_size);
 
                howmany++;
+               spin_lock_irqsave(&tx_queue->txlock, flags);
                tx_queue->num_txbdfree += frags + 1;
+               spin_unlock_irqrestore(&tx_queue->txlock, flags);
        }
 
        /* If we freed a buffer, we can restart transmission, if necessary */
@@ -2027,8 +2330,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
        tx_queue->skb_dirtytx = skb_dirtytx;
        tx_queue->dirty_tx = bdp;
 
-       dev->stats.tx_packets += howmany;
-
        return howmany;
 }
 
@@ -2092,6 +2393,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
         * as many bytes as needed to align the data properly
         */
        skb_reserve(skb, alignamount);
+       GFAR_CB(skb)->alignamount = alignamount;
 
        return skb;
 }
@@ -2166,10 +2468,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
        fcb = (struct rxfcb *)skb->data;
 
        /* Remove the FCB from the skb */
-       skb_set_queue_mapping(skb, fcb->rq);
        /* Remove the padded bytes, if there are any */
-       if (amount_pull)
+       if (amount_pull) {
+               skb_record_rx_queue(skb, fcb->rq);
                skb_pull(skb, amount_pull);
+       }
 
        if (priv->rx_csum_enable)
                gfar_rx_checksum(skb, fcb);
@@ -2231,35 +2534,33 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
                                newskb = skb;
                        else if (skb) {
                                /*
-                                * We need to reset ->data to what it
+                                * We need to un-reserve() the skb to what it
                                 * was before gfar_new_skb() re-aligned
                                 * it to an RXBUF_ALIGNMENT boundary
                                 * before we put the skb back on the
                                 * recycle list.
                                 */
-                               skb->data = skb->head + NET_SKB_PAD;
+                               skb_reserve(skb, -GFAR_CB(skb)->alignamount);
                                __skb_queue_head(&priv->rx_recycle, skb);
                        }
                } else {
                        /* Increment the number of packets */
-                       dev->stats.rx_packets++;
+                       rx_queue->stats.rx_packets++;
                        howmany++;
 
                        if (likely(skb)) {
                                pkt_len = bdp->length - ETH_FCS_LEN;
                                /* Remove the FCS from the packet length */
                                skb_put(skb, pkt_len);
-                               dev->stats.rx_bytes += pkt_len;
-
-                               if (in_irq() || irqs_disabled())
-                                       printk("Interrupt problem!\n");
+                               rx_queue->stats.rx_bytes += pkt_len;
+                               skb_record_rx_queue(skb, rx_queue->qindex);
                                gfar_process_frame(dev, skb, amount_pull);
 
                        } else {
                                if (netif_msg_rx_err(priv))
                                        printk(KERN_WARNING
                                               "%s: Missing skb!\n", dev->name);
-                               dev->stats.rx_dropped++;
+                               rx_queue->stats.rx_dropped++;
                                priv->extra_stats.rx_skbmissing++;
                        }
 
@@ -2290,13 +2591,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
        struct gfar_priv_grp *gfargrp = container_of(napi,
                        struct gfar_priv_grp, napi);
        struct gfar_private *priv = gfargrp->priv;
-       struct gfar __iomem *regs = priv->gfargrp.regs;
+       struct gfar __iomem *regs = gfargrp->regs;
        struct gfar_priv_tx_q *tx_queue = NULL;
        struct gfar_priv_rx_q *rx_queue = NULL;
        int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
-       int tx_cleaned = 0, i, left_over_budget = budget, serviced_queues = 0;
+       int tx_cleaned = 0, i, left_over_budget = budget;
+       unsigned long serviced_queues = 0;
        int num_queues = 0;
-       unsigned long flags;
 
        num_queues = gfargrp->num_rx_queues;
        budget_per_queue = budget/num_queues;
@@ -2310,20 +2611,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
                budget_per_queue = left_over_budget/num_queues;
                left_over_budget = 0;
 
-               for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+               for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
                        if (test_bit(i, &serviced_queues))
                                continue;
                        rx_queue = priv->rx_queue[i];
                        tx_queue = priv->tx_queue[rx_queue->qindex];
 
-                       /* If we fail to get the lock,
-                        * don't bother with the TX BDs */
-                       if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
-                               tx_cleaned += gfar_clean_tx_ring(tx_queue);
-                               spin_unlock_irqrestore(&tx_queue->txlock,
-                                                       flags);
-                       }
-
+                       tx_cleaned += gfar_clean_tx_ring(tx_queue);
                        rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
                                                        budget_per_queue);
                        rx_cleaned += rx_cleaned_per_queue;
@@ -2349,14 +2643,8 @@ static int gfar_poll(struct napi_struct *napi, int budget)
 
                /* If we are coalescing interrupts, update the timer */
                /* Otherwise, clear it */
-               if (likely(rx_queue->rxcoalescing)) {
-                       gfar_write(&regs->rxic, 0);
-                       gfar_write(&regs->rxic, rx_queue->rxic);
-               }
-               if (likely(tx_queue->txcoalescing)) {
-                       gfar_write(&regs->txic, 0);
-                       gfar_write(&regs->txic, tx_queue->txic);
-               }
+               gfar_configure_coalescing(priv,
+                               gfargrp->rx_bit_map, gfargrp->tx_bit_map);
        }
 
        return rx_cleaned;
@@ -2371,20 +2659,27 @@ static int gfar_poll(struct napi_struct *napi, int budget)
 static void gfar_netpoll(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
+       int i = 0;
 
        /* If the device has multiple interrupts, run tx/rx */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-               disable_irq(priv->gfargrp.interruptTransmit);
-               disable_irq(priv->gfargrp.interruptReceive);
-               disable_irq(priv->gfargrp.interruptError);
-               gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp);
-               enable_irq(priv->gfargrp.interruptError);
-               enable_irq(priv->gfargrp.interruptReceive);
-               enable_irq(priv->gfargrp.interruptTransmit);
+               for (i = 0; i < priv->num_grps; i++) {
+                       disable_irq(priv->gfargrp[i].interruptTransmit);
+                       disable_irq(priv->gfargrp[i].interruptReceive);
+                       disable_irq(priv->gfargrp[i].interruptError);
+                       gfar_interrupt(priv->gfargrp[i].interruptTransmit,
+                                               &priv->gfargrp[i]);
+                       enable_irq(priv->gfargrp[i].interruptError);
+                       enable_irq(priv->gfargrp[i].interruptReceive);
+                       enable_irq(priv->gfargrp[i].interruptTransmit);
+               }
        } else {
-               disable_irq(priv->gfargrp.interruptTransmit);
-               gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp);
-               enable_irq(priv->gfargrp.interruptTransmit);
+               for (i = 0; i < priv->num_grps; i++) {
+                       disable_irq(priv->gfargrp[i].interruptTransmit);
+                       gfar_interrupt(priv->gfargrp[i].interruptTransmit,
+                                               &priv->gfargrp[i]);
+                       enable_irq(priv->gfargrp[i].interruptTransmit);
+               }
        }
 }
 #endif
@@ -2421,7 +2716,7 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
 static void adjust_link(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->gfargrp.regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        unsigned long flags;
        struct phy_device *phydev = priv->phydev;
        int new_state = 0;
@@ -2505,7 +2800,7 @@ static void gfar_set_multi(struct net_device *dev)
 {
        struct dev_mc_list *mc_ptr;
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->gfargrp.regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
 
        if (dev->flags & IFF_PROMISC) {
@@ -2572,11 +2867,11 @@ static void gfar_set_multi(struct net_device *dev)
                        em_num = 0;
                }
 
-               if (dev->mc_count == 0)
+               if (netdev_mc_empty(dev))
                        return;
 
                /* Parse the list, and set the appropriate bits */
-               for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+               netdev_for_each_mc_addr(mc_ptr, dev) {
                        if (idx < em_num) {
                                gfar_set_mac_for_addr(dev, idx,
                                                mc_ptr->dmi_addr);
@@ -2638,7 +2933,7 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->gfargrp.regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        int idx;
        char tmpbuf[MAC_ADDR_LEN];
        u32 tempval;
@@ -2691,14 +2986,22 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
                if (events & IEVENT_CRL)
                        dev->stats.tx_aborted_errors++;
                if (events & IEVENT_XFUN) {
+                       unsigned long flags;
+
                        if (netif_msg_tx_err(priv))
                                printk(KERN_DEBUG "%s: TX FIFO underrun, "
                                       "packet dropped.\n", dev->name);
                        dev->stats.tx_dropped++;
                        priv->extra_stats.tx_underrun++;
 
+                       local_irq_save(flags);
+                       lock_tx_qs(priv);
+
                        /* Reactivate the Tx Queues */
                        gfar_write(&regs->tstat, gfargrp->tstat);
+
+                       unlock_tx_qs(priv);
+                       local_irq_restore(flags);
                }
                if (netif_msg_tx_err(priv))
                        printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
@@ -2742,6 +3045,9 @@ static struct of_device_id gfar_match[] =
                .type = "network",
                .compatible = "gianfar",
        },
+       {
+               .compatible = "fsl,etsec2",
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, gfar_match);