2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
21 * Gianfar: AKA Lambda Draconis, "Dragon"
29 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
34 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
36 * last descriptor of the ring.
38 * When a packet is received, the RXF bit in the
39 * IEVENT register is set, triggering an interrupt when the
40 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
43 * of frames or amount of time have passed). In NAPI, the
44 * interrupt handler will signal there is work to be done, and
45 * exit. This method will start at the last known empty
46 * descriptor, and process every subsequent descriptor until there
47 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
65 #include <linux/kernel.h>
66 #include <linux/string.h>
67 #include <linux/errno.h>
68 #include <linux/unistd.h>
69 #include <linux/slab.h>
70 #include <linux/interrupt.h>
71 #include <linux/init.h>
72 #include <linux/delay.h>
73 #include <linux/netdevice.h>
74 #include <linux/etherdevice.h>
75 #include <linux/skbuff.h>
76 #include <linux/if_vlan.h>
77 #include <linux/spinlock.h>
79 #include <linux/of_mdio.h>
80 #include <linux/of_platform.h>
82 #include <linux/tcp.h>
83 #include <linux/udp.h>
88 #include <asm/uaccess.h>
89 #include <linux/module.h>
90 #include <linux/dma-mapping.h>
91 #include <linux/crc32.h>
92 #include <linux/mii.h>
93 #include <linux/phy.h>
94 #include <linux/phy_fixed.h>
98 #include "fsl_pq_mdio.h"
100 #define TX_TIMEOUT (1*HZ)
101 #undef BRIEF_GFAR_ERRORS
102 #undef VERBOSE_GFAR_ERRORS
104 const char gfar_driver_name[] = "Gianfar Ethernet";
105 const char gfar_driver_version[] = "1.3";
107 static int gfar_enet_open(struct net_device *dev);
108 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
109 static void gfar_reset_task(struct work_struct *work);
110 static void gfar_timeout(struct net_device *dev);
111 static int gfar_close(struct net_device *dev);
112 struct sk_buff *gfar_new_skb(struct net_device *dev);
113 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
114 struct sk_buff *skb);
115 static int gfar_set_mac_address(struct net_device *dev);
116 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
117 static irqreturn_t gfar_error(int irq, void *dev_id);
118 static irqreturn_t gfar_transmit(int irq, void *dev_id);
119 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
120 static void adjust_link(struct net_device *dev);
121 static void init_registers(struct net_device *dev);
122 static int init_phy(struct net_device *dev);
123 static int gfar_probe(struct of_device *ofdev,
124 const struct of_device_id *match);
125 static int gfar_remove(struct of_device *ofdev);
126 static void free_skb_resources(struct gfar_private *priv);
127 static void gfar_set_multi(struct net_device *dev);
128 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
129 static void gfar_configure_serdes(struct net_device *dev);
130 static int gfar_poll(struct napi_struct *napi, int budget);
131 #ifdef CONFIG_NET_POLL_CONTROLLER
132 static void gfar_netpoll(struct net_device *dev);
134 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
138 static void gfar_vlan_rx_register(struct net_device *netdev,
139 struct vlan_group *grp);
140 void gfar_halt(struct net_device *dev);
141 static void gfar_halt_nodisable(struct net_device *dev);
142 void gfar_start(struct net_device *dev);
143 static void gfar_clear_exact_match(struct net_device *dev);
144 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
145 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
147 MODULE_AUTHOR("Freescale Semiconductor, Inc");
148 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
149 MODULE_LICENSE("GPL");
151 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
158 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
159 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
160 lstatus |= BD_LFLAG(RXBD_WRAP);
164 bdp->lstatus = lstatus;
167 static int gfar_init_bds(struct net_device *ndev)
169 struct gfar_private *priv = netdev_priv(ndev);
170 struct gfar_priv_tx_q *tx_queue = NULL;
171 struct gfar_priv_rx_q *rx_queue = NULL;
176 tx_queue = priv->tx_queue;
177 rx_queue = priv->rx_queue;
179 /* Initialize some variables in our dev structure */
180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181 tx_queue->dirty_tx = tx_queue->cur_tx = tx_queue->tx_bd_base;
182 rx_queue->cur_rx = rx_queue->rx_bd_base;
183 tx_queue->skb_curtx = tx_queue->skb_dirtytx = 0;
184 rx_queue->skb_currx = 0;
186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (i = 0; i < tx_queue->tx_ring_size; i++) {
194 /* Set the last descriptor in the ring to indicate wrap */
196 txbdp->status |= TXBD_WRAP;
198 rxbdp = rx_queue->rx_bd_base;
199 for (i = 0; i < rx_queue->rx_ring_size; i++) {
200 struct sk_buff *skb = rx_queue->rx_skbuff[i];
203 gfar_init_rxbdp(rx_queue, rxbdp, rxbdp->bufPtr);
205 skb = gfar_new_skb(ndev);
207 pr_err("%s: Can't allocate RX buffers\n",
211 rx_queue->rx_skbuff[i] = skb;
213 gfar_new_rxbdp(rx_queue, rxbdp, skb);
222 static int gfar_alloc_skb_resources(struct net_device *ndev)
226 struct gfar_private *priv = netdev_priv(ndev);
227 struct device *dev = &priv->ofdev->dev;
228 struct gfar_priv_tx_q *tx_queue = NULL;
229 struct gfar_priv_rx_q *rx_queue = NULL;
231 tx_queue = priv->tx_queue;
232 rx_queue = priv->rx_queue;
234 /* Allocate memory for the buffer descriptors */
235 vaddr = dma_alloc_coherent(dev,
236 sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size +
237 sizeof(*rx_queue->rx_bd_base) * rx_queue->rx_ring_size,
238 &tx_queue->tx_bd_dma_base, GFP_KERNEL);
240 if (netif_msg_ifup(priv))
241 pr_err("%s: Could not allocate buffer descriptors!\n",
246 tx_queue->tx_bd_base = vaddr;
247 tx_queue->dev = ndev;
249 /* Start the rx descriptor ring where the tx ring leaves off */
250 vaddr = vaddr + sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size;
251 rx_queue->rx_bd_base = vaddr;
252 rx_queue->dev = ndev;
254 /* Setup the skbuff rings */
255 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
256 tx_queue->tx_ring_size, GFP_KERNEL);
257 if (!tx_queue->tx_skbuff) {
258 if (netif_msg_ifup(priv))
259 pr_err("%s: Could not allocate tx_skbuff\n",
264 for (i = 0; i < tx_queue->tx_ring_size; i++)
265 tx_queue->tx_skbuff[i] = NULL;
267 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
268 rx_queue->rx_ring_size, GFP_KERNEL);
269 if (!rx_queue->rx_skbuff) {
270 if (netif_msg_ifup(priv))
271 pr_err("%s: Could not allocate rx_skbuff\n",
276 for (i = 0; i < rx_queue->rx_ring_size; i++)
277 rx_queue->rx_skbuff[i] = NULL;
279 if (gfar_init_bds(ndev))
285 free_skb_resources(priv);
289 static void gfar_init_mac(struct net_device *ndev)
291 struct gfar_private *priv = netdev_priv(ndev);
292 struct gfar_priv_tx_q *tx_queue = NULL;
293 struct gfar_priv_rx_q *rx_queue = NULL;
294 struct gfar __iomem *regs = priv->gfargrp.regs;
299 tx_queue = priv->tx_queue;
300 rx_queue = priv->rx_queue;
302 /* enet DMA only understands physical addresses */
303 gfar_write(®s->tbase0, tx_queue->tx_bd_dma_base);
304 gfar_write(®s->rbase0, tx_queue->tx_bd_dma_base +
305 sizeof(*tx_queue->tx_bd_base) *
306 tx_queue->tx_ring_size);
308 /* Configure the coalescing support */
309 gfar_write(®s->txic, 0);
310 if (tx_queue->txcoalescing)
311 gfar_write(®s->txic, tx_queue->txic);
313 gfar_write(®s->rxic, 0);
314 if (rx_queue->rxcoalescing)
315 gfar_write(®s->rxic, rx_queue->rxic);
317 if (priv->rx_csum_enable)
318 rctrl |= RCTRL_CHECKSUMMING;
320 if (priv->extended_hash) {
321 rctrl |= RCTRL_EXTHASH;
323 gfar_clear_exact_match(ndev);
328 rctrl &= ~RCTRL_PAL_MASK;
329 rctrl |= RCTRL_PADDING(priv->padding);
332 /* keep vlan related bits if it's enabled */
334 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
335 tctrl |= TCTRL_VLINS;
338 /* Init rctrl based on our settings */
339 gfar_write(®s->rctrl, rctrl);
341 if (ndev->features & NETIF_F_IP_CSUM)
342 tctrl |= TCTRL_INIT_CSUM;
344 gfar_write(®s->tctrl, tctrl);
346 /* Set the extraction length and index */
347 attrs = ATTRELI_EL(priv->rx_stash_size) |
348 ATTRELI_EI(priv->rx_stash_index);
350 gfar_write(®s->attreli, attrs);
352 /* Start with defaults, and add stashing or locking
353 * depending on the approprate variables */
354 attrs = ATTR_INIT_SETTINGS;
356 if (priv->bd_stash_en)
357 attrs |= ATTR_BDSTASH;
359 if (priv->rx_stash_size != 0)
360 attrs |= ATTR_BUFSTASH;
362 gfar_write(®s->attr, attrs);
364 gfar_write(®s->fifo_tx_thr, priv->fifo_threshold);
365 gfar_write(®s->fifo_tx_starve, priv->fifo_starve);
366 gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off);
369 static const struct net_device_ops gfar_netdev_ops = {
370 .ndo_open = gfar_enet_open,
371 .ndo_start_xmit = gfar_start_xmit,
372 .ndo_stop = gfar_close,
373 .ndo_change_mtu = gfar_change_mtu,
374 .ndo_set_multicast_list = gfar_set_multi,
375 .ndo_tx_timeout = gfar_timeout,
376 .ndo_do_ioctl = gfar_ioctl,
377 .ndo_vlan_rx_register = gfar_vlan_rx_register,
378 .ndo_set_mac_address = eth_mac_addr,
379 .ndo_validate_addr = eth_validate_addr,
380 #ifdef CONFIG_NET_POLL_CONTROLLER
381 .ndo_poll_controller = gfar_netpoll,
385 /* Returns 1 if incoming frames use an FCB */
386 static inline int gfar_uses_fcb(struct gfar_private *priv)
388 return priv->vlgrp || priv->rx_csum_enable;
391 static int gfar_of_init(struct net_device *dev)
395 const void *mac_addr;
398 struct gfar_private *priv = netdev_priv(dev);
399 struct device_node *np = priv->node;
401 const u32 *stash_len;
402 const u32 *stash_idx;
404 if (!np || !of_device_is_available(np))
407 /* get a pointer to the register memory */
408 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
409 priv->gfargrp.regs = ioremap(addr, size);
411 if (priv->gfargrp.regs == NULL)
414 priv->gfargrp.priv = priv; /* back pointer from group to priv */
415 priv->gfargrp.interruptTransmit = irq_of_parse_and_map(np, 0);
417 model = of_get_property(np, "model", NULL);
419 /* If we aren't the FEC we have multiple interrupts */
420 if (model && strcasecmp(model, "FEC")) {
421 priv->gfargrp.interruptReceive = irq_of_parse_and_map(np, 1);
423 priv->gfargrp.interruptError = irq_of_parse_and_map(np, 2);
425 if (priv->gfargrp.interruptTransmit < 0 ||
426 priv->gfargrp.interruptReceive < 0 ||
427 priv->gfargrp.interruptError < 0) {
433 stash = of_get_property(np, "bd-stash", NULL);
436 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
437 priv->bd_stash_en = 1;
440 stash_len = of_get_property(np, "rx-stash-len", NULL);
443 priv->rx_stash_size = *stash_len;
445 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
448 priv->rx_stash_index = *stash_idx;
450 if (stash_len || stash_idx)
451 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
453 mac_addr = of_get_mac_address(np);
455 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
457 if (model && !strcasecmp(model, "TSEC"))
459 FSL_GIANFAR_DEV_HAS_GIGABIT |
460 FSL_GIANFAR_DEV_HAS_COALESCE |
461 FSL_GIANFAR_DEV_HAS_RMON |
462 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
463 if (model && !strcasecmp(model, "eTSEC"))
465 FSL_GIANFAR_DEV_HAS_GIGABIT |
466 FSL_GIANFAR_DEV_HAS_COALESCE |
467 FSL_GIANFAR_DEV_HAS_RMON |
468 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
469 FSL_GIANFAR_DEV_HAS_PADDING |
470 FSL_GIANFAR_DEV_HAS_CSUM |
471 FSL_GIANFAR_DEV_HAS_VLAN |
472 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
473 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
475 ctype = of_get_property(np, "phy-connection-type", NULL);
477 /* We only care about rgmii-id. The rest are autodetected */
478 if (ctype && !strcmp(ctype, "rgmii-id"))
479 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
481 priv->interface = PHY_INTERFACE_MODE_MII;
483 if (of_get_property(np, "fsl,magic-packet", NULL))
484 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
486 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
488 /* Find the TBI PHY. If it's not there, we don't support SGMII */
489 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
494 iounmap(priv->gfargrp.regs);
498 /* Ioctl MII Interface */
499 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
501 struct gfar_private *priv = netdev_priv(dev);
503 if (!netif_running(dev))
509 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
512 /* Set up the ethernet device structure, private data,
513 * and anything else we need before we start */
514 static int gfar_probe(struct of_device *ofdev,
515 const struct of_device_id *match)
518 struct net_device *dev = NULL;
519 struct gfar_private *priv = NULL;
520 struct gfar __iomem *regs = NULL;
524 /* Create an ethernet device instance */
525 dev = alloc_etherdev(sizeof (*priv));
530 priv = netdev_priv(dev);
533 priv->node = ofdev->node;
534 SET_NETDEV_DEV(dev, &ofdev->dev);
536 err = gfar_of_init(dev);
541 priv->tx_queue = (struct gfar_priv_tx_q *)kmalloc(
542 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
546 priv->rx_queue = (struct gfar_priv_rx_q *)kmalloc(
547 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
551 spin_lock_init(&priv->tx_queue->txlock);
552 spin_lock_init(&priv->rx_queue->rxlock);
553 spin_lock_init(&priv->gfargrp.grplock);
554 spin_lock_init(&priv->bflock);
555 INIT_WORK(&priv->reset_task, gfar_reset_task);
557 dev_set_drvdata(&ofdev->dev, priv);
558 regs = priv->gfargrp.regs;
560 /* Stop the DMA engine now, in case it was running before */
561 /* (The firmware could have used it, and left it running). */
564 /* Reset MAC layer */
565 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
567 /* We need to delay at least 3 TX clocks */
570 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
571 gfar_write(®s->maccfg1, tempval);
573 /* Initialize MACCFG2. */
574 gfar_write(®s->maccfg2, MACCFG2_INIT_SETTINGS);
576 /* Initialize ECNTRL */
577 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
579 /* Set the dev->base_addr to the gfar reg region */
580 dev->base_addr = (unsigned long) regs;
582 SET_NETDEV_DEV(dev, &ofdev->dev);
584 /* Fill in the dev structure */
585 dev->watchdog_timeo = TX_TIMEOUT;
587 dev->netdev_ops = &gfar_netdev_ops;
588 dev->ethtool_ops = &gfar_ethtool_ops;
590 /* Register for napi ...NAPI is for each rx_queue */
591 netif_napi_add(dev, &priv->rx_queue->napi, gfar_poll, GFAR_DEV_WEIGHT);
593 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
594 priv->rx_csum_enable = 1;
595 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
597 priv->rx_csum_enable = 0;
601 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
602 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
604 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
605 priv->extended_hash = 1;
606 priv->hash_width = 9;
608 priv->hash_regs[0] = ®s->igaddr0;
609 priv->hash_regs[1] = ®s->igaddr1;
610 priv->hash_regs[2] = ®s->igaddr2;
611 priv->hash_regs[3] = ®s->igaddr3;
612 priv->hash_regs[4] = ®s->igaddr4;
613 priv->hash_regs[5] = ®s->igaddr5;
614 priv->hash_regs[6] = ®s->igaddr6;
615 priv->hash_regs[7] = ®s->igaddr7;
616 priv->hash_regs[8] = ®s->gaddr0;
617 priv->hash_regs[9] = ®s->gaddr1;
618 priv->hash_regs[10] = ®s->gaddr2;
619 priv->hash_regs[11] = ®s->gaddr3;
620 priv->hash_regs[12] = ®s->gaddr4;
621 priv->hash_regs[13] = ®s->gaddr5;
622 priv->hash_regs[14] = ®s->gaddr6;
623 priv->hash_regs[15] = ®s->gaddr7;
626 priv->extended_hash = 0;
627 priv->hash_width = 8;
629 priv->hash_regs[0] = ®s->gaddr0;
630 priv->hash_regs[1] = ®s->gaddr1;
631 priv->hash_regs[2] = ®s->gaddr2;
632 priv->hash_regs[3] = ®s->gaddr3;
633 priv->hash_regs[4] = ®s->gaddr4;
634 priv->hash_regs[5] = ®s->gaddr5;
635 priv->hash_regs[6] = ®s->gaddr6;
636 priv->hash_regs[7] = ®s->gaddr7;
639 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
640 priv->padding = DEFAULT_PADDING;
644 if (dev->features & NETIF_F_IP_CSUM)
645 dev->hard_header_len += GMAC_FCB_LEN;
647 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
649 /* Initializing some of the rx/tx queue level parameters */
650 priv->tx_queue->tx_ring_size = DEFAULT_TX_RING_SIZE;
651 priv->tx_queue->num_txbdfree = DEFAULT_TX_RING_SIZE;
652 priv->tx_queue->txcoalescing = DEFAULT_TX_COALESCE;
653 priv->tx_queue->txic = DEFAULT_TXIC;
655 priv->rx_queue->rx_ring_size = DEFAULT_RX_RING_SIZE;
656 priv->rx_queue->rxcoalescing = DEFAULT_RX_COALESCE;
657 priv->rx_queue->rxic = DEFAULT_RXIC;
659 /* Enable most messages by default */
660 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
662 /* Carrier starts down, phylib will bring it up */
663 netif_carrier_off(dev);
665 err = register_netdev(dev);
668 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
673 device_init_wakeup(&dev->dev,
674 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
676 /* fill out IRQ number and name fields */
677 len_devname = strlen(dev->name);
678 strncpy(&priv->gfargrp.int_name_tx[0], dev->name, len_devname);
679 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
680 strncpy(&priv->gfargrp.int_name_tx[len_devname],
681 "_tx", sizeof("_tx") + 1);
683 strncpy(&priv->gfargrp.int_name_rx[0], dev->name, len_devname);
684 strncpy(&priv->gfargrp.int_name_rx[len_devname],
685 "_rx", sizeof("_rx") + 1);
687 strncpy(&priv->gfargrp.int_name_er[0], dev->name, len_devname);
688 strncpy(&priv->gfargrp.int_name_er[len_devname],
689 "_er", sizeof("_er") + 1);
691 priv->gfargrp.int_name_tx[len_devname] = '\0';
693 /* Create all the sysfs files */
694 gfar_init_sysfs(dev);
696 /* Print out the device info */
697 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
699 /* Even more device info helps when determining which kernel */
700 /* provided which set of benchmarks. */
701 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
702 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
703 dev->name, priv->rx_queue->rx_ring_size, priv->tx_queue->tx_ring_size);
708 iounmap(priv->gfargrp.regs);
709 kfree(priv->rx_queue);
711 kfree(priv->tx_queue);
714 of_node_put(priv->phy_node);
716 of_node_put(priv->tbi_node);
721 static int gfar_remove(struct of_device *ofdev)
723 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
726 of_node_put(priv->phy_node);
728 of_node_put(priv->tbi_node);
730 dev_set_drvdata(&ofdev->dev, NULL);
732 unregister_netdev(priv->ndev);
733 iounmap(priv->gfargrp.regs);
734 free_netdev(priv->ndev);
741 static int gfar_suspend(struct device *dev)
743 struct gfar_private *priv = dev_get_drvdata(dev);
744 struct net_device *ndev = priv->ndev;
745 struct gfar_priv_tx_q *tx_queue = NULL;
746 struct gfar_priv_rx_q *rx_queue = NULL;
747 struct gfar __iomem *regs = NULL;
751 int magic_packet = priv->wol_en &&
752 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
754 netif_device_detach(ndev);
755 tx_queue = priv->tx_queue;
756 rx_queue = priv->rx_queue;
757 regs = priv->gfargrp.regs;
759 if (netif_running(ndev)) {
760 spin_lock_irqsave(&tx_queue->txlock, flags);
761 spin_lock(&rx_queue->rxlock);
763 gfar_halt_nodisable(ndev);
765 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
766 tempval = gfar_read(®s->maccfg1);
768 tempval &= ~MACCFG1_TX_EN;
771 tempval &= ~MACCFG1_RX_EN;
773 gfar_write(®s->maccfg1, tempval);
775 spin_unlock(&rx_queue->rxlock);
776 spin_unlock_irqrestore(&tx_queue->txlock, flags);
778 napi_disable(&rx_queue->napi);
781 /* Enable interrupt on Magic Packet */
782 gfar_write(®s->imask, IMASK_MAG);
784 /* Enable Magic Packet mode */
785 tempval = gfar_read(®s->maccfg2);
786 tempval |= MACCFG2_MPEN;
787 gfar_write(®s->maccfg2, tempval);
789 phy_stop(priv->phydev);
796 static int gfar_resume(struct device *dev)
798 struct gfar_private *priv = dev_get_drvdata(dev);
799 struct net_device *ndev = priv->ndev;
800 struct gfar_priv_tx_q *tx_queue = NULL;
801 struct gfar_priv_rx_q *rx_queue = NULL;
802 struct gfar __iomem *regs = NULL;
805 int magic_packet = priv->wol_en &&
806 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
808 if (!netif_running(ndev)) {
809 netif_device_attach(ndev);
813 if (!magic_packet && priv->phydev)
814 phy_start(priv->phydev);
816 /* Disable Magic Packet mode, in case something
819 rx_queue = priv->rx_queue;
820 tx_queue = priv->tx_queue;
821 regs = priv->gfargrp.regs;
823 spin_lock_irqsave(&tx_queue->txlock, flags);
824 spin_lock(&rx_queue->rxlock);
826 tempval = gfar_read(®s->maccfg2);
827 tempval &= ~MACCFG2_MPEN;
828 gfar_write(®s->maccfg2, tempval);
832 spin_unlock(&rx_queue->rxlock);
833 spin_unlock_irqrestore(&tx_queue->txlock, flags);
835 netif_device_attach(ndev);
837 napi_enable(&rx_queue->napi);
842 static int gfar_restore(struct device *dev)
844 struct gfar_private *priv = dev_get_drvdata(dev);
845 struct net_device *ndev = priv->ndev;
847 if (!netif_running(ndev))
851 init_registers(ndev);
852 gfar_set_mac_address(ndev);
858 priv->oldduplex = -1;
861 phy_start(priv->phydev);
863 netif_device_attach(ndev);
864 napi_enable(&priv->napi);
869 static struct dev_pm_ops gfar_pm_ops = {
870 .suspend = gfar_suspend,
871 .resume = gfar_resume,
872 .freeze = gfar_suspend,
874 .restore = gfar_restore,
877 #define GFAR_PM_OPS (&gfar_pm_ops)
879 static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
881 return gfar_suspend(&ofdev->dev);
884 static int gfar_legacy_resume(struct of_device *ofdev)
886 return gfar_resume(&ofdev->dev);
891 #define GFAR_PM_OPS NULL
892 #define gfar_legacy_suspend NULL
893 #define gfar_legacy_resume NULL
897 /* Reads the controller's registers to determine what interface
898 * connects it to the PHY.
900 static phy_interface_t gfar_get_interface(struct net_device *dev)
902 struct gfar_private *priv = netdev_priv(dev);
903 struct gfar __iomem *regs = NULL;
906 regs = priv->gfargrp.regs;
907 ecntrl = gfar_read(®s->ecntrl);
909 if (ecntrl & ECNTRL_SGMII_MODE)
910 return PHY_INTERFACE_MODE_SGMII;
912 if (ecntrl & ECNTRL_TBI_MODE) {
913 if (ecntrl & ECNTRL_REDUCED_MODE)
914 return PHY_INTERFACE_MODE_RTBI;
916 return PHY_INTERFACE_MODE_TBI;
919 if (ecntrl & ECNTRL_REDUCED_MODE) {
920 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
921 return PHY_INTERFACE_MODE_RMII;
923 phy_interface_t interface = priv->interface;
926 * This isn't autodetected right now, so it must
927 * be set by the device tree or platform code.
929 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
930 return PHY_INTERFACE_MODE_RGMII_ID;
932 return PHY_INTERFACE_MODE_RGMII;
936 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
937 return PHY_INTERFACE_MODE_GMII;
939 return PHY_INTERFACE_MODE_MII;
943 /* Initializes driver's PHY state, and attaches to the PHY.
944 * Returns 0 on success.
946 static int init_phy(struct net_device *dev)
948 struct gfar_private *priv = netdev_priv(dev);
949 uint gigabit_support =
950 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
951 SUPPORTED_1000baseT_Full : 0;
952 phy_interface_t interface;
956 priv->oldduplex = -1;
958 interface = gfar_get_interface(dev);
960 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
963 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
966 dev_err(&dev->dev, "could not attach to PHY\n");
970 if (interface == PHY_INTERFACE_MODE_SGMII)
971 gfar_configure_serdes(dev);
973 /* Remove any features not supported by the controller */
974 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
975 priv->phydev->advertising = priv->phydev->supported;
981 * Initialize TBI PHY interface for communicating with the
982 * SERDES lynx PHY on the chip. We communicate with this PHY
983 * through the MDIO bus on each controller, treating it as a
984 * "normal" PHY at the address found in the TBIPA register. We assume
985 * that the TBIPA register is valid. Either the MDIO bus code will set
986 * it to a value that doesn't conflict with other PHYs on the bus, or the
987 * value doesn't matter, as there are no other PHYs on the bus.
989 static void gfar_configure_serdes(struct net_device *dev)
991 struct gfar_private *priv = netdev_priv(dev);
992 struct phy_device *tbiphy;
994 if (!priv->tbi_node) {
995 dev_warn(&dev->dev, "error: SGMII mode requires that the "
996 "device tree specify a tbi-handle\n");
1000 tbiphy = of_phy_find_device(priv->tbi_node);
1002 dev_err(&dev->dev, "error: Could not get TBI device\n");
1007 * If the link is already up, we must already be ok, and don't need to
1008 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1009 * everything for us? Resetting it takes the link down and requires
1010 * several seconds for it to come back.
1012 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1015 /* Single clk mode, mii mode off(for serdes communication) */
1016 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1018 phy_write(tbiphy, MII_ADVERTISE,
1019 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1020 ADVERTISE_1000XPSE_ASYM);
1022 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
1023 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1026 static void init_registers(struct net_device *dev)
1028 struct gfar_private *priv = netdev_priv(dev);
1029 struct gfar __iomem *regs = NULL;
1031 regs = priv->gfargrp.regs;
1033 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
1035 /* Initialize IMASK */
1036 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1038 /* Init hash registers to zero */
1039 gfar_write(®s->igaddr0, 0);
1040 gfar_write(®s->igaddr1, 0);
1041 gfar_write(®s->igaddr2, 0);
1042 gfar_write(®s->igaddr3, 0);
1043 gfar_write(®s->igaddr4, 0);
1044 gfar_write(®s->igaddr5, 0);
1045 gfar_write(®s->igaddr6, 0);
1046 gfar_write(®s->igaddr7, 0);
1048 gfar_write(®s->gaddr0, 0);
1049 gfar_write(®s->gaddr1, 0);
1050 gfar_write(®s->gaddr2, 0);
1051 gfar_write(®s->gaddr3, 0);
1052 gfar_write(®s->gaddr4, 0);
1053 gfar_write(®s->gaddr5, 0);
1054 gfar_write(®s->gaddr6, 0);
1055 gfar_write(®s->gaddr7, 0);
1057 /* Zero out the rmon mib registers if it has them */
1058 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1059 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1061 /* Mask off the CAM interrupts */
1062 gfar_write(®s->rmon.cam1, 0xffffffff);
1063 gfar_write(®s->rmon.cam2, 0xffffffff);
1066 /* Initialize the max receive buffer length */
1067 gfar_write(®s->mrblr, priv->rx_buffer_size);
1069 /* Initialize the Minimum Frame Length Register */
1070 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
1074 /* Halt the receive and transmit queues */
1075 static void gfar_halt_nodisable(struct net_device *dev)
1077 struct gfar_private *priv = netdev_priv(dev);
1078 struct gfar __iomem *regs = priv->gfargrp.regs;
1081 /* Mask all interrupts */
1082 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1084 /* Clear all interrupts */
1085 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
1087 /* Stop the DMA, and wait for it to stop */
1088 tempval = gfar_read(®s->dmactrl);
1089 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1090 != (DMACTRL_GRS | DMACTRL_GTS)) {
1091 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1092 gfar_write(®s->dmactrl, tempval);
1094 while (!(gfar_read(®s->ievent) &
1095 (IEVENT_GRSC | IEVENT_GTSC)))
1100 /* Halt the receive and transmit queues */
1101 void gfar_halt(struct net_device *dev)
1103 struct gfar_private *priv = netdev_priv(dev);
1104 struct gfar __iomem *regs = priv->gfargrp.regs;
1107 gfar_halt_nodisable(dev);
1109 /* Disable Rx and Tx */
1110 tempval = gfar_read(®s->maccfg1);
1111 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1112 gfar_write(®s->maccfg1, tempval);
1115 void stop_gfar(struct net_device *dev)
1117 struct gfar_private *priv = netdev_priv(dev);
1118 struct gfar_priv_tx_q *tx_queue = NULL;
1119 struct gfar_priv_rx_q *rx_queue = NULL;
1120 unsigned long flags;
1122 phy_stop(priv->phydev);
1124 tx_queue = priv->tx_queue;
1125 rx_queue = priv->rx_queue;
1128 spin_lock_irqsave(&tx_queue->txlock, flags);
1129 spin_lock(&rx_queue->rxlock);
1133 spin_unlock(&rx_queue->rxlock);
1134 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1137 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1138 free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
1139 free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
1140 free_irq(priv->gfargrp.interruptReceive, &priv->gfargrp);
1142 free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
1145 free_skb_resources(priv);
1148 /* If there are any tx skbs or rx skbs still around, free them.
1149 * Then free tx_skbuff and rx_skbuff */
1150 static void free_skb_resources(struct gfar_private *priv)
1152 struct device *dev = &priv->ofdev->dev;
1153 struct rxbd8 *rxbdp;
1154 struct txbd8 *txbdp;
1155 struct gfar_priv_tx_q *tx_queue = NULL;
1156 struct gfar_priv_rx_q *rx_queue = NULL;
1159 /* Go through all the buffer descriptors and free their data buffers */
1160 tx_queue = priv->tx_queue;
1161 txbdp = tx_queue->tx_bd_base;
1163 if (!tx_queue->tx_skbuff)
1164 goto skip_tx_skbuff;
1166 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1167 if (!tx_queue->tx_skbuff[i])
1170 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1171 txbdp->length, DMA_TO_DEVICE);
1173 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) {
1175 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1176 txbdp->length, DMA_TO_DEVICE);
1179 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1180 tx_queue->tx_skbuff[i] = NULL;
1183 kfree(tx_queue->tx_skbuff);
1186 rx_queue = priv->rx_queue;
1187 rxbdp = rx_queue->rx_bd_base;
1189 if (!rx_queue->rx_skbuff)
1190 goto skip_rx_skbuff;
1192 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1193 if (rx_queue->rx_skbuff[i]) {
1194 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
1195 priv->rx_buffer_size,
1197 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1198 rx_queue->rx_skbuff[i] = NULL;
1206 kfree(rx_queue->rx_skbuff);
1209 dma_free_coherent(dev, sizeof(*txbdp) * tx_queue->tx_ring_size +
1210 sizeof(*rxbdp) * rx_queue->rx_ring_size,
1211 tx_queue->tx_bd_base, tx_queue->tx_bd_dma_base);
1214 void gfar_start(struct net_device *dev)
1216 struct gfar_private *priv = netdev_priv(dev);
1217 struct gfar __iomem *regs = priv->gfargrp.regs;
1220 /* Enable Rx and Tx in MACCFG1 */
1221 tempval = gfar_read(®s->maccfg1);
1222 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1223 gfar_write(®s->maccfg1, tempval);
1225 /* Initialize DMACTRL to have WWR and WOP */
1226 tempval = gfar_read(®s->dmactrl);
1227 tempval |= DMACTRL_INIT_SETTINGS;
1228 gfar_write(®s->dmactrl, tempval);
1230 /* Make sure we aren't stopped */
1231 tempval = gfar_read(®s->dmactrl);
1232 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1233 gfar_write(®s->dmactrl, tempval);
1235 /* Clear THLT/RHLT, so that the DMA starts polling now */
1236 gfar_write(®s->tstat, TSTAT_CLEAR_THALT);
1237 gfar_write(®s->rstat, RSTAT_CLEAR_RHALT);
1239 /* Unmask the interrupts we look for */
1240 gfar_write(®s->imask, IMASK_DEFAULT);
1242 dev->trans_start = jiffies;
1245 /* Bring the controller up and running */
1246 int startup_gfar(struct net_device *ndev)
1248 struct gfar_private *priv = netdev_priv(ndev);
1249 struct gfar __iomem *regs = priv->gfargrp.regs;
1252 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1254 err = gfar_alloc_skb_resources(ndev);
1258 gfar_init_mac(ndev);
1260 /* If the device has multiple interrupts, register for
1261 * them. Otherwise, only register for the one */
1262 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1263 /* Install our interrupt handlers for Error,
1264 * Transmit, and Receive */
1265 err = request_irq(priv->gfargrp.interruptError, gfar_error, 0,
1266 priv->gfargrp.int_name_er, &priv->gfargrp);
1268 if (netif_msg_intr(priv))
1269 pr_err("%s: Can't get IRQ %d\n", ndev->name,
1270 priv->gfargrp.interruptError);
1274 err = request_irq(priv->gfargrp.interruptTransmit,
1276 priv->gfargrp.int_name_tx,
1279 if (netif_msg_intr(priv))
1280 pr_err("%s: Can't get IRQ %d\n", ndev->name,
1281 priv->gfargrp.interruptTransmit);
1285 err = request_irq(priv->gfargrp.interruptReceive,
1287 priv->gfargrp.int_name_rx,
1290 if (netif_msg_intr(priv))
1291 pr_err("%s: Can't get IRQ %d (receive0)\n",
1293 priv->gfargrp.interruptReceive);
1297 err = request_irq(priv->gfargrp.interruptTransmit,
1299 priv->gfargrp.int_name_tx,
1302 if (netif_msg_intr(priv))
1303 pr_err("%s: Can't get IRQ %d\n", ndev->name,
1304 priv->gfargrp.interruptTransmit);
1309 /* Start the controller */
1312 phy_start(priv->phydev);
1317 free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
1319 free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
1321 free_skb_resources(priv);
1325 /* Called when something needs to use the ethernet device */
1326 /* Returns 0 for success. */
1327 static int gfar_enet_open(struct net_device *dev)
1329 struct gfar_private *priv = netdev_priv(dev);
1332 napi_enable(&priv->rx_queue->napi);
1334 skb_queue_head_init(&priv->rx_recycle);
1336 /* Initialize a bunch of registers */
1337 init_registers(dev);
1339 gfar_set_mac_address(dev);
1341 err = init_phy(dev);
1344 napi_disable(&priv->rx_queue->napi);
1348 err = startup_gfar(dev);
1350 napi_disable(&priv->rx_queue->napi);
1354 netif_start_queue(dev);
1356 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1361 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1363 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1365 memset(fcb, 0, GMAC_FCB_LEN);
1370 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1374 /* If we're here, it's a IP packet with a TCP or UDP
1375 * payload. We set it to checksum, using a pseudo-header
1378 flags = TXFCB_DEFAULT;
1380 /* Tell the controller what the protocol is */
1381 /* And provide the already calculated phcs */
1382 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1384 fcb->phcs = udp_hdr(skb)->check;
1386 fcb->phcs = tcp_hdr(skb)->check;
1388 /* l3os is the distance between the start of the
1389 * frame (skb->data) and the start of the IP hdr.
1390 * l4os is the distance between the start of the
1391 * l3 hdr and the l4 hdr */
1392 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1393 fcb->l4os = skb_network_header_len(skb);
1398 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1400 fcb->flags |= TXFCB_VLN;
1401 fcb->vlctl = vlan_tx_tag_get(skb);
1404 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1405 struct txbd8 *base, int ring_size)
1407 struct txbd8 *new_bd = bdp + stride;
1409 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1412 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1415 return skip_txbd(bdp, 1, base, ring_size);
1418 /* This is called by the kernel when a frame is ready for transmission. */
1419 /* It is pointed to by the dev->hard_start_xmit function pointer */
1420 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1422 struct gfar_private *priv = netdev_priv(dev);
1423 struct gfar_priv_tx_q *tx_queue = NULL;
1424 struct gfar __iomem *regs = NULL;
1425 struct txfcb *fcb = NULL;
1426 struct txbd8 *txbdp, *txbdp_start, *base;
1430 unsigned long flags;
1431 unsigned int nr_frags, length;
1433 tx_queue = priv->tx_queue;
1434 base = tx_queue->tx_bd_base;
1435 regs = priv->gfargrp.regs;
1437 /* make space for additional header when fcb is needed */
1438 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1439 (priv->vlgrp && vlan_tx_tag_present(skb))) &&
1440 (skb_headroom(skb) < GMAC_FCB_LEN)) {
1441 struct sk_buff *skb_new;
1443 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
1445 dev->stats.tx_errors++;
1447 return NETDEV_TX_OK;
1453 /* total number of fragments in the SKB */
1454 nr_frags = skb_shinfo(skb)->nr_frags;
1456 spin_lock_irqsave(&tx_queue->txlock, flags);
1458 /* check if there is space to queue this packet */
1459 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1460 /* no space, stop the queue */
1461 netif_stop_queue(dev);
1462 dev->stats.tx_fifo_errors++;
1463 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1464 return NETDEV_TX_BUSY;
1467 /* Update transmit stats */
1468 dev->stats.tx_bytes += skb->len;
1470 txbdp = txbdp_start = tx_queue->cur_tx;
1472 if (nr_frags == 0) {
1473 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1475 /* Place the fragment addresses and lengths into the TxBDs */
1476 for (i = 0; i < nr_frags; i++) {
1477 /* Point at the next BD, wrapping as needed */
1478 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1480 length = skb_shinfo(skb)->frags[i].size;
1482 lstatus = txbdp->lstatus | length |
1483 BD_LFLAG(TXBD_READY);
1485 /* Handle the last BD specially */
1486 if (i == nr_frags - 1)
1487 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1489 bufaddr = dma_map_page(&priv->ofdev->dev,
1490 skb_shinfo(skb)->frags[i].page,
1491 skb_shinfo(skb)->frags[i].page_offset,
1495 /* set the TxBD length and buffer pointer */
1496 txbdp->bufPtr = bufaddr;
1497 txbdp->lstatus = lstatus;
1500 lstatus = txbdp_start->lstatus;
1503 /* Set up checksumming */
1504 if (CHECKSUM_PARTIAL == skb->ip_summed) {
1505 fcb = gfar_add_fcb(skb);
1506 lstatus |= BD_LFLAG(TXBD_TOE);
1507 gfar_tx_checksum(skb, fcb);
1510 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
1511 if (unlikely(NULL == fcb)) {
1512 fcb = gfar_add_fcb(skb);
1513 lstatus |= BD_LFLAG(TXBD_TOE);
1516 gfar_tx_vlan(skb, fcb);
1519 /* setup the TxBD length and buffer pointer for the first BD */
1520 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1521 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1522 skb_headlen(skb), DMA_TO_DEVICE);
1524 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1527 * The powerpc-specific eieio() is used, as wmb() has too strong
1528 * semantics (it requires synchronization between cacheable and
1529 * uncacheable mappings, which eieio doesn't provide and which we
1530 * don't need), thus requiring a more expensive sync instruction. At
1531 * some point, the set of architecture-independent barrier functions
1532 * should be expanded to include weaker barriers.
1536 txbdp_start->lstatus = lstatus;
1538 /* Update the current skb pointer to the next entry we will use
1539 * (wrapping if necessary) */
1540 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1541 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1543 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1545 /* reduce TxBD free count */
1546 tx_queue->num_txbdfree -= (nr_frags + 1);
1548 dev->trans_start = jiffies;
1550 /* If the next BD still needs to be cleaned up, then the bds
1551 are full. We need to tell the kernel to stop sending us stuff. */
1552 if (!tx_queue->num_txbdfree) {
1553 netif_stop_queue(dev);
1555 dev->stats.tx_fifo_errors++;
1558 /* Tell the DMA to go go go */
1559 gfar_write(®s->tstat, TSTAT_CLEAR_THALT);
1562 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1564 return NETDEV_TX_OK;
1567 /* Stops the kernel queue, and halts the controller */
1568 static int gfar_close(struct net_device *dev)
1570 struct gfar_private *priv = netdev_priv(dev);
1572 napi_disable(&priv->rx_queue->napi);
1574 skb_queue_purge(&priv->rx_recycle);
1575 cancel_work_sync(&priv->reset_task);
1578 /* Disconnect from the PHY */
1579 phy_disconnect(priv->phydev);
1580 priv->phydev = NULL;
1582 netif_stop_queue(dev);
1587 /* Changes the mac address if the controller is not running. */
1588 static int gfar_set_mac_address(struct net_device *dev)
1590 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1596 /* Enables and disables VLAN insertion/extraction */
1597 static void gfar_vlan_rx_register(struct net_device *dev,
1598 struct vlan_group *grp)
1600 struct gfar_private *priv = netdev_priv(dev);
1601 struct gfar_priv_rx_q *rx_queue = NULL;
1602 struct gfar __iomem *regs = NULL;
1603 unsigned long flags;
1606 rx_queue = priv->rx_queue;
1607 regs = priv->gfargrp.regs;
1608 spin_lock_irqsave(&rx_queue->rxlock, flags);
1613 /* Enable VLAN tag insertion */
1614 tempval = gfar_read(®s->tctrl);
1615 tempval |= TCTRL_VLINS;
1617 gfar_write(®s->tctrl, tempval);
1619 /* Enable VLAN tag extraction */
1620 tempval = gfar_read(®s->rctrl);
1621 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1622 gfar_write(®s->rctrl, tempval);
1624 /* Disable VLAN tag insertion */
1625 tempval = gfar_read(®s->tctrl);
1626 tempval &= ~TCTRL_VLINS;
1627 gfar_write(®s->tctrl, tempval);
1629 /* Disable VLAN tag extraction */
1630 tempval = gfar_read(®s->rctrl);
1631 tempval &= ~RCTRL_VLEX;
1632 /* If parse is no longer required, then disable parser */
1633 if (tempval & RCTRL_REQ_PARSER)
1634 tempval |= RCTRL_PRSDEP_INIT;
1636 tempval &= ~RCTRL_PRSDEP_INIT;
1637 gfar_write(®s->rctrl, tempval);
1640 gfar_change_mtu(dev, dev->mtu);
1642 spin_unlock_irqrestore(&rx_queue->rxlock, flags);
1645 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1647 int tempsize, tempval;
1648 struct gfar_private *priv = netdev_priv(dev);
1649 struct gfar __iomem *regs = priv->gfargrp.regs;
1650 int oldsize = priv->rx_buffer_size;
1651 int frame_size = new_mtu + ETH_HLEN;
1654 frame_size += VLAN_HLEN;
1656 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1657 if (netif_msg_drv(priv))
1658 printk(KERN_ERR "%s: Invalid MTU setting\n",
1663 if (gfar_uses_fcb(priv))
1664 frame_size += GMAC_FCB_LEN;
1666 frame_size += priv->padding;
1669 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1670 INCREMENTAL_BUFFER_SIZE;
1672 /* Only stop and start the controller if it isn't already
1673 * stopped, and we changed something */
1674 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1677 priv->rx_buffer_size = tempsize;
1681 gfar_write(®s->mrblr, priv->rx_buffer_size);
1682 gfar_write(®s->maxfrm, priv->rx_buffer_size);
1684 /* If the mtu is larger than the max size for standard
1685 * ethernet frames (ie, a jumbo frame), then set maccfg2
1686 * to allow huge frames, and to check the length */
1687 tempval = gfar_read(®s->maccfg2);
1689 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1690 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1692 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1694 gfar_write(®s->maccfg2, tempval);
1696 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1702 /* gfar_reset_task gets scheduled when a packet has not been
1703 * transmitted after a set amount of time.
1704 * For now, assume that clearing out all the structures, and
1705 * starting over will fix the problem.
1707 static void gfar_reset_task(struct work_struct *work)
1709 struct gfar_private *priv = container_of(work, struct gfar_private,
1711 struct net_device *dev = priv->ndev;
1713 if (dev->flags & IFF_UP) {
1714 netif_stop_queue(dev);
1717 netif_start_queue(dev);
1720 netif_tx_schedule_all(dev);
1723 static void gfar_timeout(struct net_device *dev)
1725 struct gfar_private *priv = netdev_priv(dev);
1727 dev->stats.tx_errors++;
1728 schedule_work(&priv->reset_task);
1731 /* Interrupt Handler for Transmit complete */
1732 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1734 struct net_device *dev = tx_queue->dev;
1735 struct gfar_private *priv = netdev_priv(dev);
1736 struct gfar_priv_rx_q *rx_queue = NULL;
1738 struct txbd8 *lbdp = NULL;
1739 struct txbd8 *base = tx_queue->tx_bd_base;
1740 struct sk_buff *skb;
1742 int tx_ring_size = tx_queue->tx_ring_size;
1748 rx_queue = priv->rx_queue;
1749 bdp = tx_queue->dirty_tx;
1750 skb_dirtytx = tx_queue->skb_dirtytx;
1752 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
1753 frags = skb_shinfo(skb)->nr_frags;
1754 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1756 lstatus = lbdp->lstatus;
1758 /* Only clean completed frames */
1759 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
1760 (lstatus & BD_LENGTH_MASK))
1763 dma_unmap_single(&priv->ofdev->dev,
1768 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1769 bdp = next_txbd(bdp, base, tx_ring_size);
1771 for (i = 0; i < frags; i++) {
1772 dma_unmap_page(&priv->ofdev->dev,
1776 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1777 bdp = next_txbd(bdp, base, tx_ring_size);
1781 * If there's room in the queue (limit it to rx_buffer_size)
1782 * we add this skb back into the pool, if it's the right size
1784 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
1785 skb_recycle_check(skb, priv->rx_buffer_size +
1787 __skb_queue_head(&priv->rx_recycle, skb);
1789 dev_kfree_skb_any(skb);
1791 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
1793 skb_dirtytx = (skb_dirtytx + 1) &
1794 TX_RING_MOD_MASK(tx_ring_size);
1797 tx_queue->num_txbdfree += frags + 1;
1800 /* If we freed a buffer, we can restart transmission, if necessary */
1801 if (netif_queue_stopped(dev) && tx_queue->num_txbdfree)
1802 netif_wake_queue(dev);
1804 /* Update dirty indicators */
1805 tx_queue->skb_dirtytx = skb_dirtytx;
1806 tx_queue->dirty_tx = bdp;
1808 dev->stats.tx_packets += howmany;
1813 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
1815 struct gfar_private *priv = gfargrp->priv;
1816 struct gfar_priv_tx_q *tx_queue = NULL;
1817 struct gfar_priv_rx_q *rx_queue = NULL;
1818 unsigned long flags;
1820 rx_queue = priv->rx_queue;
1821 tx_queue = priv->tx_queue;
1822 spin_lock_irqsave(&tx_queue->txlock, flags);
1823 spin_lock(&rx_queue->rxlock);
1825 if (napi_schedule_prep(&rx_queue->napi)) {
1826 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
1827 __napi_schedule(&rx_queue->napi);
1830 * Clear IEVENT, so interrupts aren't called again
1831 * because of the packets that have already arrived.
1833 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
1836 spin_unlock(&rx_queue->rxlock);
1837 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1840 /* Interrupt Handler for Transmit complete */
1841 static irqreturn_t gfar_transmit(int irq, void *grp_id)
1843 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1847 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
1848 struct sk_buff *skb)
1850 struct net_device *dev = rx_queue->dev;
1851 struct gfar_private *priv = netdev_priv(dev);
1854 buf = dma_map_single(&priv->ofdev->dev, skb->data,
1855 priv->rx_buffer_size, DMA_FROM_DEVICE);
1856 gfar_init_rxbdp(rx_queue, bdp, buf);
1860 struct sk_buff * gfar_new_skb(struct net_device *dev)
1862 unsigned int alignamount;
1863 struct gfar_private *priv = netdev_priv(dev);
1864 struct sk_buff *skb = NULL;
1866 skb = __skb_dequeue(&priv->rx_recycle);
1868 skb = netdev_alloc_skb(dev,
1869 priv->rx_buffer_size + RXBUF_ALIGNMENT);
1874 alignamount = RXBUF_ALIGNMENT -
1875 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
1877 /* We need the data buffer to be aligned properly. We will reserve
1878 * as many bytes as needed to align the data properly
1880 skb_reserve(skb, alignamount);
1885 static inline void count_errors(unsigned short status, struct net_device *dev)
1887 struct gfar_private *priv = netdev_priv(dev);
1888 struct net_device_stats *stats = &dev->stats;
1889 struct gfar_extra_stats *estats = &priv->extra_stats;
1891 /* If the packet was truncated, none of the other errors
1893 if (status & RXBD_TRUNCATED) {
1894 stats->rx_length_errors++;
1900 /* Count the errors, if there were any */
1901 if (status & (RXBD_LARGE | RXBD_SHORT)) {
1902 stats->rx_length_errors++;
1904 if (status & RXBD_LARGE)
1909 if (status & RXBD_NONOCTET) {
1910 stats->rx_frame_errors++;
1911 estats->rx_nonoctet++;
1913 if (status & RXBD_CRCERR) {
1914 estats->rx_crcerr++;
1915 stats->rx_crc_errors++;
1917 if (status & RXBD_OVERRUN) {
1918 estats->rx_overrun++;
1919 stats->rx_crc_errors++;
1923 irqreturn_t gfar_receive(int irq, void *grp_id)
1925 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1929 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1931 /* If valid headers were found, and valid sums
1932 * were verified, then we tell the kernel that no
1933 * checksumming is necessary. Otherwise, it is */
1934 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1935 skb->ip_summed = CHECKSUM_UNNECESSARY;
1937 skb->ip_summed = CHECKSUM_NONE;
1941 /* gfar_process_frame() -- handle one incoming packet if skb
1943 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1946 struct gfar_private *priv = netdev_priv(dev);
1947 struct rxfcb *fcb = NULL;
1951 /* fcb is at the beginning if exists */
1952 fcb = (struct rxfcb *)skb->data;
1954 /* Remove the FCB from the skb */
1955 /* Remove the padded bytes, if there are any */
1957 skb_pull(skb, amount_pull);
1959 if (priv->rx_csum_enable)
1960 gfar_rx_checksum(skb, fcb);
1962 /* Tell the skb what kind of packet this is */
1963 skb->protocol = eth_type_trans(skb, dev);
1965 /* Send the packet up the stack */
1966 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1967 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
1969 ret = netif_receive_skb(skb);
1971 if (NET_RX_DROP == ret)
1972 priv->extra_stats.kernel_dropped++;
1977 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1978 * until the budget/quota has been reached. Returns the number
1981 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
1983 struct net_device *dev = rx_queue->dev;
1984 struct rxbd8 *bdp, *base;
1985 struct sk_buff *skb;
1989 struct gfar_private *priv = netdev_priv(dev);
1991 /* Get the first full descriptor */
1992 bdp = rx_queue->cur_rx;
1993 base = rx_queue->rx_bd_base;
1995 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1998 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1999 struct sk_buff *newskb;
2002 /* Add another skb for the future */
2003 newskb = gfar_new_skb(dev);
2005 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2007 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2008 priv->rx_buffer_size, DMA_FROM_DEVICE);
2010 /* We drop the frame if we failed to allocate a new buffer */
2011 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2012 bdp->status & RXBD_ERR)) {
2013 count_errors(bdp->status, dev);
2015 if (unlikely(!newskb))
2019 * We need to reset ->data to what it
2020 * was before gfar_new_skb() re-aligned
2021 * it to an RXBUF_ALIGNMENT boundary
2022 * before we put the skb back on the
2025 skb->data = skb->head + NET_SKB_PAD;
2026 __skb_queue_head(&priv->rx_recycle, skb);
2029 /* Increment the number of packets */
2030 dev->stats.rx_packets++;
2034 pkt_len = bdp->length - ETH_FCS_LEN;
2035 /* Remove the FCS from the packet length */
2036 skb_put(skb, pkt_len);
2037 dev->stats.rx_bytes += pkt_len;
2039 if (in_irq() || irqs_disabled())
2040 printk("Interrupt problem!\n");
2041 gfar_process_frame(dev, skb, amount_pull);
2044 if (netif_msg_rx_err(priv))
2046 "%s: Missing skb!\n", dev->name);
2047 dev->stats.rx_dropped++;
2048 priv->extra_stats.rx_skbmissing++;
2053 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2055 /* Setup the new bdp */
2056 gfar_new_rxbdp(rx_queue, bdp, newskb);
2058 /* Update to the next pointer */
2059 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2061 /* update to point at the next skb */
2062 rx_queue->skb_currx =
2063 (rx_queue->skb_currx + 1) &
2064 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2067 /* Update the current rxbd pointer to be the next one */
2068 rx_queue->cur_rx = bdp;
2073 static int gfar_poll(struct napi_struct *napi, int budget)
2075 struct gfar_priv_rx_q *rx_queue = container_of(napi,
2076 struct gfar_priv_rx_q, napi);
2077 struct net_device *dev = rx_queue->dev;
2078 struct gfar_private *priv = netdev_priv(dev);
2079 struct gfar __iomem *regs = priv->gfargrp.regs;
2080 struct gfar_priv_tx_q *tx_queue = NULL;
2083 unsigned long flags;
2085 /* Clear IEVENT, so interrupts aren't called again
2086 * because of the packets that have already arrived */
2087 gfar_write(®s->ievent, IEVENT_RTX_MASK);
2088 tx_queue = priv->tx_queue;
2090 /* If we fail to get the lock, don't bother with the TX BDs */
2091 if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
2092 tx_cleaned = gfar_clean_tx_ring(tx_queue);
2093 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2096 rx_cleaned = gfar_clean_rx_ring(rx_queue, budget);
2101 if (rx_cleaned < budget) {
2102 napi_complete(napi);
2104 /* Clear the halt bit in RSTAT */
2105 gfar_write(®s->rstat, RSTAT_CLEAR_RHALT);
2107 gfar_write(®s->imask, IMASK_DEFAULT);
2109 /* If we are coalescing interrupts, update the timer */
2110 /* Otherwise, clear it */
2111 if (likely(rx_queue->rxcoalescing)) {
2112 gfar_write(®s->rxic, 0);
2113 gfar_write(®s->rxic, rx_queue->rxic);
2115 if (likely(tx_queue->txcoalescing)) {
2116 gfar_write(®s->txic, 0);
2117 gfar_write(®s->txic, tx_queue->txic);
2124 #ifdef CONFIG_NET_POLL_CONTROLLER
2126 * Polling 'interrupt' - used by things like netconsole to send skbs
2127 * without having to re-enable interrupts. It's not called while
2128 * the interrupt routine is executing.
2130 static void gfar_netpoll(struct net_device *dev)
2132 struct gfar_private *priv = netdev_priv(dev);
2134 /* If the device has multiple interrupts, run tx/rx */
2135 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2136 disable_irq(priv->gfargrp.interruptTransmit);
2137 disable_irq(priv->gfargrp.interruptReceive);
2138 disable_irq(priv->gfargrp.interruptError);
2139 gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp);
2140 enable_irq(priv->gfargrp.interruptError);
2141 enable_irq(priv->gfargrp.interruptReceive);
2142 enable_irq(priv->gfargrp.interruptTransmit);
2144 disable_irq(priv->gfargrp.interruptTransmit);
2145 gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp);
2146 enable_irq(priv->gfargrp.interruptTransmit);
2151 /* The interrupt handler for devices with one interrupt */
2152 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2154 struct gfar_priv_grp *gfargrp = grp_id;
2156 /* Save ievent for future reference */
2157 u32 events = gfar_read(&gfargrp->regs->ievent);
2159 /* Check for reception */
2160 if (events & IEVENT_RX_MASK)
2161 gfar_receive(irq, grp_id);
2163 /* Check for transmit completion */
2164 if (events & IEVENT_TX_MASK)
2165 gfar_transmit(irq, grp_id);
2167 /* Check for errors */
2168 if (events & IEVENT_ERR_MASK)
2169 gfar_error(irq, grp_id);
2174 /* Called every time the controller might need to be made
2175 * aware of new link state. The PHY code conveys this
2176 * information through variables in the phydev structure, and this
2177 * function converts those variables into the appropriate
2178 * register values, and can bring down the device if needed.
2180 static void adjust_link(struct net_device *dev)
2182 struct gfar_private *priv = netdev_priv(dev);
2183 struct gfar_priv_tx_q *tx_queue = NULL;
2184 struct gfar __iomem *regs = priv->gfargrp.regs;
2185 unsigned long flags;
2186 struct phy_device *phydev = priv->phydev;
2189 tx_queue = priv->tx_queue;
2190 spin_lock_irqsave(&tx_queue->txlock, flags);
2192 u32 tempval = gfar_read(®s->maccfg2);
2193 u32 ecntrl = gfar_read(®s->ecntrl);
2195 /* Now we make sure that we can be in full duplex mode.
2196 * If not, we operate in half-duplex mode. */
2197 if (phydev->duplex != priv->oldduplex) {
2199 if (!(phydev->duplex))
2200 tempval &= ~(MACCFG2_FULL_DUPLEX);
2202 tempval |= MACCFG2_FULL_DUPLEX;
2204 priv->oldduplex = phydev->duplex;
2207 if (phydev->speed != priv->oldspeed) {
2209 switch (phydev->speed) {
2212 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2214 ecntrl &= ~(ECNTRL_R100);
2219 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2221 /* Reduced mode distinguishes
2222 * between 10 and 100 */
2223 if (phydev->speed == SPEED_100)
2224 ecntrl |= ECNTRL_R100;
2226 ecntrl &= ~(ECNTRL_R100);
2229 if (netif_msg_link(priv))
2231 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2232 dev->name, phydev->speed);
2236 priv->oldspeed = phydev->speed;
2239 gfar_write(®s->maccfg2, tempval);
2240 gfar_write(®s->ecntrl, ecntrl);
2242 if (!priv->oldlink) {
2246 } else if (priv->oldlink) {
2250 priv->oldduplex = -1;
2253 if (new_state && netif_msg_link(priv))
2254 phy_print_status(phydev);
2256 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2259 /* Update the hash table based on the current list of multicast
2260 * addresses we subscribe to. Also, change the promiscuity of
2261 * the device based on the flags (this function is called
2262 * whenever dev->flags is changed */
2263 static void gfar_set_multi(struct net_device *dev)
2265 struct dev_mc_list *mc_ptr;
2266 struct gfar_private *priv = netdev_priv(dev);
2267 struct gfar __iomem *regs = priv->gfargrp.regs;
2270 if (dev->flags & IFF_PROMISC) {
2271 /* Set RCTRL to PROM */
2272 tempval = gfar_read(®s->rctrl);
2273 tempval |= RCTRL_PROM;
2274 gfar_write(®s->rctrl, tempval);
2276 /* Set RCTRL to not PROM */
2277 tempval = gfar_read(®s->rctrl);
2278 tempval &= ~(RCTRL_PROM);
2279 gfar_write(®s->rctrl, tempval);
2282 if (dev->flags & IFF_ALLMULTI) {
2283 /* Set the hash to rx all multicast frames */
2284 gfar_write(®s->igaddr0, 0xffffffff);
2285 gfar_write(®s->igaddr1, 0xffffffff);
2286 gfar_write(®s->igaddr2, 0xffffffff);
2287 gfar_write(®s->igaddr3, 0xffffffff);
2288 gfar_write(®s->igaddr4, 0xffffffff);
2289 gfar_write(®s->igaddr5, 0xffffffff);
2290 gfar_write(®s->igaddr6, 0xffffffff);
2291 gfar_write(®s->igaddr7, 0xffffffff);
2292 gfar_write(®s->gaddr0, 0xffffffff);
2293 gfar_write(®s->gaddr1, 0xffffffff);
2294 gfar_write(®s->gaddr2, 0xffffffff);
2295 gfar_write(®s->gaddr3, 0xffffffff);
2296 gfar_write(®s->gaddr4, 0xffffffff);
2297 gfar_write(®s->gaddr5, 0xffffffff);
2298 gfar_write(®s->gaddr6, 0xffffffff);
2299 gfar_write(®s->gaddr7, 0xffffffff);
2304 /* zero out the hash */
2305 gfar_write(®s->igaddr0, 0x0);
2306 gfar_write(®s->igaddr1, 0x0);
2307 gfar_write(®s->igaddr2, 0x0);
2308 gfar_write(®s->igaddr3, 0x0);
2309 gfar_write(®s->igaddr4, 0x0);
2310 gfar_write(®s->igaddr5, 0x0);
2311 gfar_write(®s->igaddr6, 0x0);
2312 gfar_write(®s->igaddr7, 0x0);
2313 gfar_write(®s->gaddr0, 0x0);
2314 gfar_write(®s->gaddr1, 0x0);
2315 gfar_write(®s->gaddr2, 0x0);
2316 gfar_write(®s->gaddr3, 0x0);
2317 gfar_write(®s->gaddr4, 0x0);
2318 gfar_write(®s->gaddr5, 0x0);
2319 gfar_write(®s->gaddr6, 0x0);
2320 gfar_write(®s->gaddr7, 0x0);
2322 /* If we have extended hash tables, we need to
2323 * clear the exact match registers to prepare for
2325 if (priv->extended_hash) {
2326 em_num = GFAR_EM_NUM + 1;
2327 gfar_clear_exact_match(dev);
2334 if (dev->mc_count == 0)
2337 /* Parse the list, and set the appropriate bits */
2338 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
2340 gfar_set_mac_for_addr(dev, idx,
2344 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
2352 /* Clears each of the exact match registers to zero, so they
2353 * don't interfere with normal reception */
2354 static void gfar_clear_exact_match(struct net_device *dev)
2357 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2359 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2360 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2363 /* Set the appropriate hash bit for the given addr */
2364 /* The algorithm works like so:
2365 * 1) Take the Destination Address (ie the multicast address), and
2366 * do a CRC on it (little endian), and reverse the bits of the
2368 * 2) Use the 8 most significant bits as a hash into a 256-entry
2369 * table. The table is controlled through 8 32-bit registers:
2370 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
2371 * gaddr7. This means that the 3 most significant bits in the
2372 * hash index which gaddr register to use, and the 5 other bits
2373 * indicate which bit (assuming an IBM numbering scheme, which
2374 * for PowerPC (tm) is usually the case) in the register holds
2376 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2379 struct gfar_private *priv = netdev_priv(dev);
2380 u32 result = ether_crc(MAC_ADDR_LEN, addr);
2381 int width = priv->hash_width;
2382 u8 whichbit = (result >> (32 - width)) & 0x1f;
2383 u8 whichreg = result >> (32 - width + 5);
2384 u32 value = (1 << (31-whichbit));
2386 tempval = gfar_read(priv->hash_regs[whichreg]);
2388 gfar_write(priv->hash_regs[whichreg], tempval);
2394 /* There are multiple MAC Address register pairs on some controllers
2395 * This function sets the numth pair to a given address
2397 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2399 struct gfar_private *priv = netdev_priv(dev);
2400 struct gfar __iomem *regs = priv->gfargrp.regs;
2402 char tmpbuf[MAC_ADDR_LEN];
2404 u32 __iomem *macptr = ®s->macstnaddr1;
2408 /* Now copy it into the mac registers backwards, cuz */
2409 /* little endian is silly */
2410 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2411 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2413 gfar_write(macptr, *((u32 *) (tmpbuf)));
2415 tempval = *((u32 *) (tmpbuf + 4));
2417 gfar_write(macptr+1, tempval);
2420 /* GFAR error interrupt handler */
2421 static irqreturn_t gfar_error(int irq, void *grp_id)
2423 struct gfar_priv_grp *gfargrp = grp_id;
2424 struct gfar __iomem *regs = gfargrp->regs;
2425 struct gfar_private *priv= gfargrp->priv;
2426 struct net_device *dev = priv->ndev;
2428 /* Save ievent for future reference */
2429 u32 events = gfar_read(®s->ievent);
2432 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
2434 /* Magic Packet is not an error. */
2435 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2436 (events & IEVENT_MAG))
2437 events &= ~IEVENT_MAG;
2440 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2441 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2442 dev->name, events, gfar_read(®s->imask));
2444 /* Update the error counters */
2445 if (events & IEVENT_TXE) {
2446 dev->stats.tx_errors++;
2448 if (events & IEVENT_LC)
2449 dev->stats.tx_window_errors++;
2450 if (events & IEVENT_CRL)
2451 dev->stats.tx_aborted_errors++;
2452 if (events & IEVENT_XFUN) {
2453 if (netif_msg_tx_err(priv))
2454 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2455 "packet dropped.\n", dev->name);
2456 dev->stats.tx_dropped++;
2457 priv->extra_stats.tx_underrun++;
2459 /* Reactivate the Tx Queues */
2460 gfar_write(®s->tstat, TSTAT_CLEAR_THALT);
2462 if (netif_msg_tx_err(priv))
2463 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
2465 if (events & IEVENT_BSY) {
2466 dev->stats.rx_errors++;
2467 priv->extra_stats.rx_bsy++;
2469 gfar_receive(irq, grp_id);
2471 if (netif_msg_rx_err(priv))
2472 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2473 dev->name, gfar_read(®s->rstat));
2475 if (events & IEVENT_BABR) {
2476 dev->stats.rx_errors++;
2477 priv->extra_stats.rx_babr++;
2479 if (netif_msg_rx_err(priv))
2480 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
2482 if (events & IEVENT_EBERR) {
2483 priv->extra_stats.eberr++;
2484 if (netif_msg_rx_err(priv))
2485 printk(KERN_DEBUG "%s: bus error\n", dev->name);
2487 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
2488 printk(KERN_DEBUG "%s: control frame\n", dev->name);
2490 if (events & IEVENT_BABT) {
2491 priv->extra_stats.tx_babt++;
2492 if (netif_msg_tx_err(priv))
2493 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
2498 static struct of_device_id gfar_match[] =
2502 .compatible = "gianfar",
2506 MODULE_DEVICE_TABLE(of, gfar_match);
2508 /* Structure for a device driver */
2509 static struct of_platform_driver gfar_driver = {
2510 .name = "fsl-gianfar",
2511 .match_table = gfar_match,
2513 .probe = gfar_probe,
2514 .remove = gfar_remove,
2515 .suspend = gfar_legacy_suspend,
2516 .resume = gfar_legacy_resume,
2517 .driver.pm = GFAR_PM_OPS,
2520 static int __init gfar_init(void)
2522 return of_register_platform_driver(&gfar_driver);
2525 static void __exit gfar_exit(void)
2527 of_unregister_platform_driver(&gfar_driver);
2530 module_init(gfar_init);
2531 module_exit(gfar_exit);