Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[safe/jmp/linux-2.6] / drivers / net / gianfar.c
index c9be090..ea7d5dd 100644 (file)
@@ -82,6 +82,7 @@
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/in.h>
+#include <linux/net_tstamp.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev)
                rctrl |= RCTRL_PADDING(priv->padding);
        }
 
+       /* Insert receive time stamps into padding alignment bytes */
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
+               rctrl &= ~RCTRL_PAL_MASK;
+               rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
+               priv->padding = 8;
+       }
+
        /* keep vlan related bits if it's enabled */
        if (priv->vlgrp) {
                rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv)
 /* Returns 1 if incoming frames use an FCB */
 static inline int gfar_uses_fcb(struct gfar_private *priv)
 {
-       return priv->vlgrp || priv->rx_csum_enable;
+       return priv->vlgrp || priv->rx_csum_enable ||
+               (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
 }
 
 static void free_tx_pointers(struct gfar_private *priv)
@@ -549,12 +558,8 @@ static int gfar_parse_group(struct device_node *np,
                struct gfar_private *priv, const char *model)
 {
        u32 *queue_mask;
-       u64 addr, size;
-
-       addr = of_translate_address(np,
-                       of_get_address(np, 0, &size, NULL));
-       priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
 
+       priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
        if (!priv->gfargrp[priv->num_grps].regs)
                return -ENOMEM;
 
@@ -676,7 +681,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
                priv->rx_queue[i] = NULL;
 
        for (i = 0; i < priv->num_tx_queues; i++) {
-               priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kmalloc(
+               priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kzalloc(
                                sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
                if (!priv->tx_queue[i]) {
                        err = -ENOMEM;
@@ -689,7 +694,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
        }
 
        for (i = 0; i < priv->num_rx_queues; i++) {
-               priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
+               priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
                                        sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
                if (!priv->rx_queue[i]) {
                        err = -ENOMEM;
@@ -742,7 +747,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
                        FSL_GIANFAR_DEV_HAS_CSUM |
                        FSL_GIANFAR_DEV_HAS_VLAN |
                        FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
-                       FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+                       FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+                       FSL_GIANFAR_DEV_HAS_TIMER;
 
        ctype = of_get_property(np, "phy-connection-type", NULL);
 
@@ -772,6 +778,48 @@ err_grp_init:
        return err;
 }
 
+static int gfar_hwtstamp_ioctl(struct net_device *netdev,
+                       struct ifreq *ifr, int cmd)
+{
+       struct hwtstamp_config config;
+       struct gfar_private *priv = netdev_priv(netdev);
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (config.flags)
+               return -EINVAL;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               priv->hwts_tx_en = 0;
+               break;
+       case HWTSTAMP_TX_ON:
+               if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+                       return -ERANGE;
+               priv->hwts_tx_en = 1;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               priv->hwts_rx_en = 0;
+               break;
+       default:
+               if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+                       return -ERANGE;
+               priv->hwts_rx_en = 1;
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               break;
+       }
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+               -EFAULT : 0;
+}
+
 /* Ioctl MII Interface */
 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
@@ -780,6 +828,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        if (!netif_running(dev))
                return -EINVAL;
 
+       if (cmd == SIOCSHWTSTAMP)
+               return gfar_hwtstamp_ioctl(dev, rq, cmd);
+
        if (!priv->phydev)
                return -ENODEV;
 
@@ -982,7 +1033,8 @@ static int gfar_probe(struct of_device *ofdev,
        else
                priv->padding = 0;
 
-       if (dev->features & NETIF_F_IP_CSUM)
+       if (dev->features & NETIF_F_IP_CSUM ||
+                       priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
                dev->hard_header_len += GMAC_FCB_LEN;
 
        /* Program the isrg regs only if number of grps > 1 */
@@ -998,7 +1050,7 @@ static int gfar_probe(struct of_device *ofdev,
        }
 
        /* Need to reverse the bit maps as  bit_map's MSB is q0
-        * but, for_each_bit parses from right to left, which
+        * but, for_each_set_bit parses from right to left, which
         * basically reverses the queue numbers */
        for (i = 0; i< priv->num_grps; i++) {
                priv->gfargrp[i].tx_bit_map = reverse_bitmap(
@@ -1011,7 +1063,7 @@ static int gfar_probe(struct of_device *ofdev,
         * also assign queues to groups */
        for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
                priv->gfargrp[grp_idx].num_rx_queues = 0x0;
-               for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+               for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
                                priv->num_rx_queues) {
                        priv->gfargrp[grp_idx].num_rx_queues++;
                        priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1019,7 +1071,7 @@ static int gfar_probe(struct of_device *ofdev,
                        rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
                }
                priv->gfargrp[grp_idx].num_tx_queues = 0x0;
-               for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
+               for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
                                priv->num_tx_queues) {
                        priv->gfargrp[grp_idx].num_tx_queues++;
                        priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1120,10 +1172,10 @@ static int gfar_probe(struct of_device *ofdev,
        /* provided which set of benchmarks. */
        printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
        for (i = 0; i < priv->num_rx_queues; i++)
-               printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
+               printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
                        dev->name, i, priv->rx_queue[i]->rx_ring_size);
        for(i = 0; i < priv->num_tx_queues; i++)
-                printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
+                printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
                        dev->name, i, priv->tx_queue[i]->tx_ring_size);
 
        return 0;
@@ -1515,9 +1567,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
                tempval |= (DMACTRL_GRS | DMACTRL_GTS);
                gfar_write(&regs->dmactrl, tempval);
 
-               while (!(gfar_read(&regs->ievent) &
-                        (IEVENT_GRSC | IEVENT_GTSC)))
-                       cpu_relax();
+               spin_event_timeout(((gfar_read(&regs->ievent) &
+                        (IEVENT_GRSC | IEVENT_GTSC)) ==
+                        (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
        }
 }
 
@@ -1638,13 +1690,13 @@ static void free_skb_resources(struct gfar_private *priv)
        /* Go through all the buffer descriptors and free their data buffers */
        for (i = 0; i < priv->num_tx_queues; i++) {
                tx_queue = priv->tx_queue[i];
-               if(!tx_queue->tx_skbuff)
+               if(tx_queue->tx_skbuff)
                        free_skb_tx_queue(tx_queue);
        }
 
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               if(!rx_queue->rx_skbuff)
+               if(rx_queue->rx_skbuff)
                        free_skb_rx_queue(rx_queue);
        }
 
@@ -1653,6 +1705,7 @@ static void free_skb_resources(struct gfar_private *priv)
                        sizeof(struct rxbd8) * priv->total_rx_ring_size,
                        priv->tx_queue[0]->tx_bd_base,
                        priv->tx_queue[0]->tx_bd_dma_base);
+       skb_queue_purge(&priv->rx_recycle);
 }
 
 void gfar_start(struct net_device *dev)
@@ -1686,7 +1739,7 @@ void gfar_start(struct net_device *dev)
                gfar_write(&regs->imask, IMASK_DEFAULT);
        }
 
-       dev->trans_start = jiffies;
+       dev->trans_start = jiffies; /* prevent tx timeout */
 }
 
 void gfar_configure_coalescing(struct gfar_private *priv,
@@ -1709,7 +1762,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
 
        if (priv->mode == MQ_MG_MODE) {
                baddr = &regs->txic0;
-               for_each_bit (i, &tx_mask, priv->num_tx_queues) {
+               for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
                        if (likely(priv->tx_queue[i]->txcoalescing)) {
                                gfar_write(baddr + i, 0);
                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
@@ -1717,7 +1770,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
                }
 
                baddr = &regs->rxic0;
-               for_each_bit (i, &rx_mask, priv->num_rx_queues) {
+               for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
                        if (likely(priv->rx_queue[i]->rxcoalescing)) {
                                gfar_write(baddr + i, 0);
                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
@@ -1926,23 +1979,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct netdev_queue *txq;
        struct gfar __iomem *regs = NULL;
        struct txfcb *fcb = NULL;
-       struct txbd8 *txbdp, *txbdp_start, *base;
+       struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
        u32 lstatus;
-       int i, rq = 0;
+       int i, rq = 0, do_tstamp = 0;
        u32 bufaddr;
        unsigned long flags;
-       unsigned int nr_frags, length;
-
+       unsigned int nr_frags, nr_txbds, length;
+       union skb_shared_tx *shtx;
 
        rq = skb->queue_mapping;
        tx_queue = priv->tx_queue[rq];
        txq = netdev_get_tx_queue(dev, rq);
        base = tx_queue->tx_bd_base;
        regs = tx_queue->grp->regs;
+       shtx = skb_tx(skb);
+
+       /* check if time stamp should be generated */
+       if (unlikely(shtx->hardware && priv->hwts_tx_en))
+               do_tstamp = 1;
 
        /* make space for additional header when fcb is needed */
        if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
-                       (priv->vlgrp && vlan_tx_tag_present(skb))) &&
+                       (priv->vlgrp && vlan_tx_tag_present(skb)) ||
+                       unlikely(do_tstamp)) &&
                        (skb_headroom(skb) < GMAC_FCB_LEN)) {
                struct sk_buff *skb_new;
 
@@ -1959,8 +2018,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* total number of fragments in the SKB */
        nr_frags = skb_shinfo(skb)->nr_frags;
 
+       /* calculate the required number of TxBDs for this skb */
+       if (unlikely(do_tstamp))
+               nr_txbds = nr_frags + 2;
+       else
+               nr_txbds = nr_frags + 1;
+
        /* check if there is space to queue this packet */
-       if ((nr_frags+1) > tx_queue->num_txbdfree) {
+       if (nr_txbds > tx_queue->num_txbdfree) {
                /* no space, stop the queue */
                netif_tx_stop_queue(txq);
                dev->stats.tx_fifo_errors++;
@@ -1972,9 +2037,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        txq->tx_packets ++;
 
        txbdp = txbdp_start = tx_queue->cur_tx;
+       lstatus = txbdp->lstatus;
+
+       /* Time stamp insertion requires one additional TxBD */
+       if (unlikely(do_tstamp))
+               txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+                               tx_queue->tx_ring_size);
 
        if (nr_frags == 0) {
-               lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+               if (unlikely(do_tstamp))
+                       txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
+                                       TXBD_INTERRUPT);
+               else
+                       lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
        } else {
                /* Place the fragment addresses and lengths into the TxBDs */
                for (i = 0; i < nr_frags; i++) {
@@ -2020,12 +2095,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                gfar_tx_vlan(skb, fcb);
        }
 
-       /* setup the TxBD length and buffer pointer for the first BD */
-       tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+       /* Setup tx hardware time stamping if requested */
+       if (unlikely(do_tstamp)) {
+               shtx->in_progress = 1;
+               if (fcb == NULL)
+                       fcb = gfar_add_fcb(skb);
+               fcb->ptp = 1;
+               lstatus |= BD_LFLAG(TXBD_TOE);
+       }
+
        txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
                        skb_headlen(skb), DMA_TO_DEVICE);
 
-       lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+       /*
+        * If time stamping is requested one additional TxBD must be set up. The
+        * first TxBD points to the FCB and must have a data length of
+        * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
+        * the full frame length.
+        */
+       if (unlikely(do_tstamp)) {
+               txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
+               txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
+                               (skb_headlen(skb) - GMAC_FCB_LEN);
+               lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+       } else {
+               lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+       }
 
        /*
         * We can work in parallel with gfar_clean_tx_ring(), except
@@ -2053,6 +2148,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        txbdp_start->lstatus = lstatus;
 
+       eieio(); /* force lstatus write before tx_skbuff */
+
+       tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+
        /* Update the current skb pointer to the next entry we will use
         * (wrapping if necessary) */
        tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
@@ -2061,9 +2160,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 
        /* reduce TxBD free count */
-       tx_queue->num_txbdfree -= (nr_frags + 1);
-
-       dev->trans_start = jiffies;
+       tx_queue->num_txbdfree -= (nr_txbds);
 
        /* If the next BD still needs to be cleaned up, then the bds
           are full.  We need to tell the kernel to stop sending us stuff. */
@@ -2089,7 +2186,6 @@ static int gfar_close(struct net_device *dev)
 
        disable_napi(priv);
 
-       skb_queue_purge(&priv->rx_recycle);
        cancel_work_sync(&priv->reset_task);
        stop_gfar(dev);
 
@@ -2252,16 +2348,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
        struct net_device *dev = tx_queue->dev;
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar_priv_rx_q *rx_queue = NULL;
-       struct txbd8 *bdp;
+       struct txbd8 *bdp, *next = NULL;
        struct txbd8 *lbdp = NULL;
        struct txbd8 *base = tx_queue->tx_bd_base;
        struct sk_buff *skb;
        int skb_dirtytx;
        int tx_ring_size = tx_queue->tx_ring_size;
-       int frags = 0;
+       int frags = 0, nr_txbds = 0;
        int i;
        int howmany = 0;
        u32 lstatus;
+       size_t buflen;
+       union skb_shared_tx *shtx;
 
        rx_queue = priv->rx_queue[tx_queue->qindex];
        bdp = tx_queue->dirty_tx;
@@ -2271,7 +2369,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
                unsigned long flags;
 
                frags = skb_shinfo(skb)->nr_frags;
-               lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
+
+               /*
+                * When time stamping, one additional TxBD must be freed.
+                * Also, we need to dma_unmap_single() the TxPAL.
+                */
+               shtx = skb_tx(skb);
+               if (unlikely(shtx->in_progress))
+                       nr_txbds = frags + 2;
+               else
+                       nr_txbds = frags + 1;
+
+               lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
 
                lstatus = lbdp->lstatus;
 
@@ -2280,10 +2389,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
                                (lstatus & BD_LENGTH_MASK))
                        break;
 
-               dma_unmap_single(&priv->ofdev->dev,
-                               bdp->bufPtr,
-                               bdp->length,
-                               DMA_TO_DEVICE);
+               if (unlikely(shtx->in_progress)) {
+                       next = next_txbd(bdp, base, tx_ring_size);
+                       buflen = next->length + GMAC_FCB_LEN;
+               } else
+                       buflen = bdp->length;
+
+               dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+                               buflen, DMA_TO_DEVICE);
+
+               if (unlikely(shtx->in_progress)) {
+                       struct skb_shared_hwtstamps shhwtstamps;
+                       u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+                       memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+                       shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+                       skb_tstamp_tx(skb, &shhwtstamps);
+                       bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+                       bdp = next;
+               }
 
                bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
                bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2315,7 +2438,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 
                howmany++;
                spin_lock_irqsave(&tx_queue->txlock, flags);
-               tx_queue->num_txbdfree += frags + 1;
+               tx_queue->num_txbdfree += nr_txbds;
                spin_unlock_irqrestore(&tx_queue->txlock, flags);
        }
 
@@ -2390,6 +2513,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
         * as many bytes as needed to align the data properly
         */
        skb_reserve(skb, alignamount);
+       GFAR_CB(skb)->alignamount = alignamount;
 
        return skb;
 }
@@ -2470,6 +2594,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
                skb_pull(skb, amount_pull);
        }
 
+       /* Get receive timestamp from the skb */
+       if (priv->hwts_rx_en) {
+               struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+               u64 *ns = (u64 *) skb->data;
+               memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+               shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+       }
+
+       if (priv->padding)
+               skb_pull(skb, priv->padding);
+
        if (priv->rx_csum_enable)
                gfar_rx_checksum(skb, fcb);
 
@@ -2506,8 +2641,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
        bdp = rx_queue->cur_rx;
        base = rx_queue->rx_bd_base;
 
-       amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
-               priv->padding;
+       amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
 
        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
                struct sk_buff *newskb;
@@ -2530,13 +2664,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
                                newskb = skb;
                        else if (skb) {
                                /*
-                                * We need to reset ->data to what it
+                                * We need to un-reserve() the skb to what it
                                 * was before gfar_new_skb() re-aligned
                                 * it to an RXBUF_ALIGNMENT boundary
                                 * before we put the skb back on the
                                 * recycle list.
                                 */
-                               skb->data = skb->head + NET_SKB_PAD;
+                               skb_reserve(skb, -GFAR_CB(skb)->alignamount);
                                __skb_queue_head(&priv->rx_recycle, skb);
                        }
                } else {
@@ -2607,7 +2741,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
                budget_per_queue = left_over_budget/num_queues;
                left_over_budget = 0;
 
-               for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+               for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
                        if (test_bit(i, &serviced_queues))
                                continue;
                        rx_queue = priv->rx_queue[i];
@@ -2794,7 +2928,7 @@ static void adjust_link(struct net_device *dev)
  * whenever dev->flags is changed */
 static void gfar_set_multi(struct net_device *dev)
 {
-       struct dev_mc_list *mc_ptr;
+       struct netdev_hw_addr *ha;
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
@@ -2867,13 +3001,12 @@ static void gfar_set_multi(struct net_device *dev)
                        return;
 
                /* Parse the list, and set the appropriate bits */
-               for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+               netdev_for_each_mc_addr(ha, dev) {
                        if (idx < em_num) {
-                               gfar_set_mac_for_addr(dev, idx,
-                                               mc_ptr->dmi_addr);
+                               gfar_set_mac_for_addr(dev, idx, ha->addr);
                                idx++;
                        } else
-                               gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
+                               gfar_set_hash_for_addr(dev, ha->addr);
                }
        }