2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #include <linux/kernel.h>
65 #include <linux/string.h>
66 #include <linux/errno.h>
67 #include <linux/unistd.h>
68 #include <linux/slab.h>
69 #include <linux/interrupt.h>
70 #include <linux/init.h>
71 #include <linux/delay.h>
72 #include <linux/netdevice.h>
73 #include <linux/etherdevice.h>
74 #include <linux/skbuff.h>
75 #include <linux/if_vlan.h>
76 #include <linux/spinlock.h>
78 #include <linux/of_platform.h>
80 #include <linux/tcp.h>
81 #include <linux/udp.h>
86 #include <asm/uaccess.h>
87 #include <linux/module.h>
88 #include <linux/dma-mapping.h>
89 #include <linux/crc32.h>
90 #include <linux/mii.h>
91 #include <linux/phy.h>
92 #include <linux/phy_fixed.h>
96 #include "gianfar_mii.h"
98 #define TX_TIMEOUT (1*HZ)
99 #undef BRIEF_GFAR_ERRORS
100 #undef VERBOSE_GFAR_ERRORS
102 const char gfar_driver_name[] = "Gianfar Ethernet";
103 const char gfar_driver_version[] = "1.3";
105 static int gfar_enet_open(struct net_device *dev);
106 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
107 static void gfar_reset_task(struct work_struct *work);
108 static void gfar_timeout(struct net_device *dev);
109 static int gfar_close(struct net_device *dev);
110 struct sk_buff *gfar_new_skb(struct net_device *dev);
111 static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
112 struct sk_buff *skb);
113 static int gfar_set_mac_address(struct net_device *dev);
114 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
115 static irqreturn_t gfar_error(int irq, void *dev_id);
116 static irqreturn_t gfar_transmit(int irq, void *dev_id);
117 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
118 static void adjust_link(struct net_device *dev);
119 static void init_registers(struct net_device *dev);
120 static int init_phy(struct net_device *dev);
121 static int gfar_probe(struct of_device *ofdev,
122 const struct of_device_id *match);
123 static int gfar_remove(struct of_device *ofdev);
124 static void free_skb_resources(struct gfar_private *priv);
125 static void gfar_set_multi(struct net_device *dev);
126 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
127 static void gfar_configure_serdes(struct net_device *dev);
128 static int gfar_poll(struct napi_struct *napi, int budget);
129 #ifdef CONFIG_NET_POLL_CONTROLLER
130 static void gfar_netpoll(struct net_device *dev);
132 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
133 static int gfar_clean_tx_ring(struct net_device *dev);
134 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
135 static void gfar_vlan_rx_register(struct net_device *netdev,
136 struct vlan_group *grp);
137 void gfar_halt(struct net_device *dev);
138 static void gfar_halt_nodisable(struct net_device *dev);
139 void gfar_start(struct net_device *dev);
140 static void gfar_clear_exact_match(struct net_device *dev);
141 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
143 extern const struct ethtool_ops gfar_ethtool_ops;
145 MODULE_AUTHOR("Freescale Semiconductor, Inc");
146 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
147 MODULE_LICENSE("GPL");
149 /* Returns 1 if incoming frames use an FCB */
150 static inline int gfar_uses_fcb(struct gfar_private *priv)
152 return (priv->vlan_enable || priv->rx_csum_enable);
155 static int gfar_of_init(struct net_device *dev)
157 struct device_node *phy, *mdio;
158 const unsigned int *id;
161 const void *mac_addr;
165 struct gfar_private *priv = netdev_priv(dev);
166 struct device_node *np = priv->node;
167 char bus_name[MII_BUS_ID_SIZE];
169 if (!np || !of_device_is_available(np))
172 /* get a pointer to the register memory */
173 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
174 priv->regs = ioremap(addr, size);
176 if (priv->regs == NULL)
179 priv->interruptTransmit = irq_of_parse_and_map(np, 0);
181 model = of_get_property(np, "model", NULL);
183 /* If we aren't the FEC we have multiple interrupts */
184 if (model && strcasecmp(model, "FEC")) {
185 priv->interruptReceive = irq_of_parse_and_map(np, 1);
187 priv->interruptError = irq_of_parse_and_map(np, 2);
189 if (priv->interruptTransmit < 0 ||
190 priv->interruptReceive < 0 ||
191 priv->interruptError < 0) {
197 mac_addr = of_get_mac_address(np);
199 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
201 if (model && !strcasecmp(model, "TSEC"))
203 FSL_GIANFAR_DEV_HAS_GIGABIT |
204 FSL_GIANFAR_DEV_HAS_COALESCE |
205 FSL_GIANFAR_DEV_HAS_RMON |
206 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
207 if (model && !strcasecmp(model, "eTSEC"))
209 FSL_GIANFAR_DEV_HAS_GIGABIT |
210 FSL_GIANFAR_DEV_HAS_COALESCE |
211 FSL_GIANFAR_DEV_HAS_RMON |
212 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
213 FSL_GIANFAR_DEV_HAS_CSUM |
214 FSL_GIANFAR_DEV_HAS_VLAN |
215 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
216 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
218 ctype = of_get_property(np, "phy-connection-type", NULL);
220 /* We only care about rgmii-id. The rest are autodetected */
221 if (ctype && !strcmp(ctype, "rgmii-id"))
222 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
224 priv->interface = PHY_INTERFACE_MODE_MII;
226 if (of_get_property(np, "fsl,magic-packet", NULL))
227 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
229 ph = of_get_property(np, "phy-handle", NULL);
233 fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
239 snprintf(priv->phy_bus_id, BUS_ID_SIZE, PHY_ID_FMT, "0",
242 phy = of_find_node_by_phandle(*ph);
249 mdio = of_get_parent(phy);
251 id = of_get_property(phy, "reg", NULL);
256 gfar_mdio_bus_name(bus_name, mdio);
257 snprintf(priv->phy_bus_id, BUS_ID_SIZE, "%s:%02x",
261 /* Find the TBI PHY. If it's not there, we don't support SGMII */
262 ph = of_get_property(np, "tbi-handle", NULL);
264 struct device_node *tbi = of_find_node_by_phandle(*ph);
265 struct of_device *ofdev;
271 mdio = of_get_parent(tbi);
275 ofdev = of_find_device_by_node(mdio);
279 id = of_get_property(tbi, "reg", NULL);
285 bus = dev_get_drvdata(&ofdev->dev);
287 priv->tbiphy = bus->phy_map[*id];
297 /* Set up the ethernet device structure, private data,
298 * and anything else we need before we start */
299 static int gfar_probe(struct of_device *ofdev,
300 const struct of_device_id *match)
303 struct net_device *dev = NULL;
304 struct gfar_private *priv = NULL;
306 DECLARE_MAC_BUF(mac);
308 /* Create an ethernet device instance */
309 dev = alloc_etherdev(sizeof (*priv));
314 priv = netdev_priv(dev);
316 priv->node = ofdev->node;
318 err = gfar_of_init(dev);
323 spin_lock_init(&priv->txlock);
324 spin_lock_init(&priv->rxlock);
325 spin_lock_init(&priv->bflock);
326 INIT_WORK(&priv->reset_task, gfar_reset_task);
328 dev_set_drvdata(&ofdev->dev, priv);
330 /* Stop the DMA engine now, in case it was running before */
331 /* (The firmware could have used it, and left it running). */
334 /* Reset MAC layer */
335 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
337 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
338 gfar_write(&priv->regs->maccfg1, tempval);
340 /* Initialize MACCFG2. */
341 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
343 /* Initialize ECNTRL */
344 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
346 /* Set the dev->base_addr to the gfar reg region */
347 dev->base_addr = (unsigned long) (priv->regs);
349 SET_NETDEV_DEV(dev, &ofdev->dev);
351 /* Fill in the dev structure */
352 dev->open = gfar_enet_open;
353 dev->hard_start_xmit = gfar_start_xmit;
354 dev->tx_timeout = gfar_timeout;
355 dev->watchdog_timeo = TX_TIMEOUT;
356 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
357 #ifdef CONFIG_NET_POLL_CONTROLLER
358 dev->poll_controller = gfar_netpoll;
360 dev->stop = gfar_close;
361 dev->change_mtu = gfar_change_mtu;
363 dev->set_multicast_list = gfar_set_multi;
365 dev->ethtool_ops = &gfar_ethtool_ops;
367 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
368 priv->rx_csum_enable = 1;
369 dev->features |= NETIF_F_IP_CSUM;
371 priv->rx_csum_enable = 0;
375 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
376 dev->vlan_rx_register = gfar_vlan_rx_register;
378 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
380 priv->vlan_enable = 1;
383 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
384 priv->extended_hash = 1;
385 priv->hash_width = 9;
387 priv->hash_regs[0] = &priv->regs->igaddr0;
388 priv->hash_regs[1] = &priv->regs->igaddr1;
389 priv->hash_regs[2] = &priv->regs->igaddr2;
390 priv->hash_regs[3] = &priv->regs->igaddr3;
391 priv->hash_regs[4] = &priv->regs->igaddr4;
392 priv->hash_regs[5] = &priv->regs->igaddr5;
393 priv->hash_regs[6] = &priv->regs->igaddr6;
394 priv->hash_regs[7] = &priv->regs->igaddr7;
395 priv->hash_regs[8] = &priv->regs->gaddr0;
396 priv->hash_regs[9] = &priv->regs->gaddr1;
397 priv->hash_regs[10] = &priv->regs->gaddr2;
398 priv->hash_regs[11] = &priv->regs->gaddr3;
399 priv->hash_regs[12] = &priv->regs->gaddr4;
400 priv->hash_regs[13] = &priv->regs->gaddr5;
401 priv->hash_regs[14] = &priv->regs->gaddr6;
402 priv->hash_regs[15] = &priv->regs->gaddr7;
405 priv->extended_hash = 0;
406 priv->hash_width = 8;
408 priv->hash_regs[0] = &priv->regs->gaddr0;
409 priv->hash_regs[1] = &priv->regs->gaddr1;
410 priv->hash_regs[2] = &priv->regs->gaddr2;
411 priv->hash_regs[3] = &priv->regs->gaddr3;
412 priv->hash_regs[4] = &priv->regs->gaddr4;
413 priv->hash_regs[5] = &priv->regs->gaddr5;
414 priv->hash_regs[6] = &priv->regs->gaddr6;
415 priv->hash_regs[7] = &priv->regs->gaddr7;
418 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
419 priv->padding = DEFAULT_PADDING;
423 if (dev->features & NETIF_F_IP_CSUM)
424 dev->hard_header_len += GMAC_FCB_LEN;
426 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
427 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
428 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
430 priv->txcoalescing = DEFAULT_TX_COALESCE;
431 priv->txic = DEFAULT_TXIC;
432 priv->rxcoalescing = DEFAULT_RX_COALESCE;
433 priv->rxic = DEFAULT_RXIC;
435 /* Enable most messages by default */
436 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
438 /* Carrier starts down, phylib will bring it up */
439 netif_carrier_off(dev);
441 err = register_netdev(dev);
444 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
449 /* Create all the sysfs files */
450 gfar_init_sysfs(dev);
452 /* Print out the device info */
453 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
455 /* Even more device info helps when determining which kernel */
456 /* provided which set of benchmarks. */
457 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
458 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
459 dev->name, priv->rx_ring_size, priv->tx_ring_size);
470 static int gfar_remove(struct of_device *ofdev)
472 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
474 dev_set_drvdata(&ofdev->dev, NULL);
477 free_netdev(priv->dev);
483 static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
485 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
486 struct net_device *dev = priv->dev;
490 int magic_packet = priv->wol_en &&
491 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
493 netif_device_detach(dev);
495 if (netif_running(dev)) {
496 spin_lock_irqsave(&priv->txlock, flags);
497 spin_lock(&priv->rxlock);
499 gfar_halt_nodisable(dev);
501 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
502 tempval = gfar_read(&priv->regs->maccfg1);
504 tempval &= ~MACCFG1_TX_EN;
507 tempval &= ~MACCFG1_RX_EN;
509 gfar_write(&priv->regs->maccfg1, tempval);
511 spin_unlock(&priv->rxlock);
512 spin_unlock_irqrestore(&priv->txlock, flags);
514 napi_disable(&priv->napi);
517 /* Enable interrupt on Magic Packet */
518 gfar_write(&priv->regs->imask, IMASK_MAG);
520 /* Enable Magic Packet mode */
521 tempval = gfar_read(&priv->regs->maccfg2);
522 tempval |= MACCFG2_MPEN;
523 gfar_write(&priv->regs->maccfg2, tempval);
525 phy_stop(priv->phydev);
532 static int gfar_resume(struct of_device *ofdev)
534 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
535 struct net_device *dev = priv->dev;
538 int magic_packet = priv->wol_en &&
539 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
541 if (!netif_running(dev)) {
542 netif_device_attach(dev);
546 if (!magic_packet && priv->phydev)
547 phy_start(priv->phydev);
549 /* Disable Magic Packet mode, in case something
553 spin_lock_irqsave(&priv->txlock, flags);
554 spin_lock(&priv->rxlock);
556 tempval = gfar_read(&priv->regs->maccfg2);
557 tempval &= ~MACCFG2_MPEN;
558 gfar_write(&priv->regs->maccfg2, tempval);
562 spin_unlock(&priv->rxlock);
563 spin_unlock_irqrestore(&priv->txlock, flags);
565 netif_device_attach(dev);
567 napi_enable(&priv->napi);
572 #define gfar_suspend NULL
573 #define gfar_resume NULL
576 /* Reads the controller's registers to determine what interface
577 * connects it to the PHY.
579 static phy_interface_t gfar_get_interface(struct net_device *dev)
581 struct gfar_private *priv = netdev_priv(dev);
582 u32 ecntrl = gfar_read(&priv->regs->ecntrl);
584 if (ecntrl & ECNTRL_SGMII_MODE)
585 return PHY_INTERFACE_MODE_SGMII;
587 if (ecntrl & ECNTRL_TBI_MODE) {
588 if (ecntrl & ECNTRL_REDUCED_MODE)
589 return PHY_INTERFACE_MODE_RTBI;
591 return PHY_INTERFACE_MODE_TBI;
594 if (ecntrl & ECNTRL_REDUCED_MODE) {
595 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
596 return PHY_INTERFACE_MODE_RMII;
598 phy_interface_t interface = priv->interface;
601 * This isn't autodetected right now, so it must
602 * be set by the device tree or platform code.
604 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
605 return PHY_INTERFACE_MODE_RGMII_ID;
607 return PHY_INTERFACE_MODE_RGMII;
611 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
612 return PHY_INTERFACE_MODE_GMII;
614 return PHY_INTERFACE_MODE_MII;
618 /* Initializes driver's PHY state, and attaches to the PHY.
619 * Returns 0 on success.
621 static int init_phy(struct net_device *dev)
623 struct gfar_private *priv = netdev_priv(dev);
624 uint gigabit_support =
625 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
626 SUPPORTED_1000baseT_Full : 0;
627 struct phy_device *phydev;
628 phy_interface_t interface;
632 priv->oldduplex = -1;
634 interface = gfar_get_interface(dev);
636 phydev = phy_connect(dev, priv->phy_bus_id, &adjust_link, 0, interface);
638 if (interface == PHY_INTERFACE_MODE_SGMII)
639 gfar_configure_serdes(dev);
641 if (IS_ERR(phydev)) {
642 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
643 return PTR_ERR(phydev);
646 /* Remove any features not supported by the controller */
647 phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
648 phydev->advertising = phydev->supported;
650 priv->phydev = phydev;
656 * Initialize TBI PHY interface for communicating with the
657 * SERDES lynx PHY on the chip. We communicate with this PHY
658 * through the MDIO bus on each controller, treating it as a
659 * "normal" PHY at the address found in the TBIPA register. We assume
660 * that the TBIPA register is valid. Either the MDIO bus code will set
661 * it to a value that doesn't conflict with other PHYs on the bus, or the
662 * value doesn't matter, as there are no other PHYs on the bus.
664 static void gfar_configure_serdes(struct net_device *dev)
666 struct gfar_private *priv = netdev_priv(dev);
669 printk(KERN_WARNING "SGMII mode requires that the device "
670 "tree specify a tbi-handle\n");
675 * If the link is already up, we must already be ok, and don't need to
676 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
677 * everything for us? Resetting it takes the link down and requires
678 * several seconds for it to come back.
680 if (phy_read(priv->tbiphy, MII_BMSR) & BMSR_LSTATUS)
683 /* Single clk mode, mii mode off(for serdes communication) */
684 phy_write(priv->tbiphy, MII_TBICON, TBICON_CLK_SELECT);
686 phy_write(priv->tbiphy, MII_ADVERTISE,
687 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
688 ADVERTISE_1000XPSE_ASYM);
690 phy_write(priv->tbiphy, MII_BMCR, BMCR_ANENABLE |
691 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
694 static void init_registers(struct net_device *dev)
696 struct gfar_private *priv = netdev_priv(dev);
699 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
701 /* Initialize IMASK */
702 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
704 /* Init hash registers to zero */
705 gfar_write(&priv->regs->igaddr0, 0);
706 gfar_write(&priv->regs->igaddr1, 0);
707 gfar_write(&priv->regs->igaddr2, 0);
708 gfar_write(&priv->regs->igaddr3, 0);
709 gfar_write(&priv->regs->igaddr4, 0);
710 gfar_write(&priv->regs->igaddr5, 0);
711 gfar_write(&priv->regs->igaddr6, 0);
712 gfar_write(&priv->regs->igaddr7, 0);
714 gfar_write(&priv->regs->gaddr0, 0);
715 gfar_write(&priv->regs->gaddr1, 0);
716 gfar_write(&priv->regs->gaddr2, 0);
717 gfar_write(&priv->regs->gaddr3, 0);
718 gfar_write(&priv->regs->gaddr4, 0);
719 gfar_write(&priv->regs->gaddr5, 0);
720 gfar_write(&priv->regs->gaddr6, 0);
721 gfar_write(&priv->regs->gaddr7, 0);
723 /* Zero out the rmon mib registers if it has them */
724 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
725 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
727 /* Mask off the CAM interrupts */
728 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
729 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
732 /* Initialize the max receive buffer length */
733 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
735 /* Initialize the Minimum Frame Length Register */
736 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
740 /* Halt the receive and transmit queues */
741 static void gfar_halt_nodisable(struct net_device *dev)
743 struct gfar_private *priv = netdev_priv(dev);
744 struct gfar __iomem *regs = priv->regs;
747 /* Mask all interrupts */
748 gfar_write(®s->imask, IMASK_INIT_CLEAR);
750 /* Clear all interrupts */
751 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
753 /* Stop the DMA, and wait for it to stop */
754 tempval = gfar_read(&priv->regs->dmactrl);
755 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
756 != (DMACTRL_GRS | DMACTRL_GTS)) {
757 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
758 gfar_write(&priv->regs->dmactrl, tempval);
760 while (!(gfar_read(&priv->regs->ievent) &
761 (IEVENT_GRSC | IEVENT_GTSC)))
766 /* Halt the receive and transmit queues */
767 void gfar_halt(struct net_device *dev)
769 struct gfar_private *priv = netdev_priv(dev);
770 struct gfar __iomem *regs = priv->regs;
773 gfar_halt_nodisable(dev);
775 /* Disable Rx and Tx */
776 tempval = gfar_read(®s->maccfg1);
777 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
778 gfar_write(®s->maccfg1, tempval);
781 void stop_gfar(struct net_device *dev)
783 struct gfar_private *priv = netdev_priv(dev);
784 struct gfar __iomem *regs = priv->regs;
787 phy_stop(priv->phydev);
790 spin_lock_irqsave(&priv->txlock, flags);
791 spin_lock(&priv->rxlock);
795 spin_unlock(&priv->rxlock);
796 spin_unlock_irqrestore(&priv->txlock, flags);
799 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
800 free_irq(priv->interruptError, dev);
801 free_irq(priv->interruptTransmit, dev);
802 free_irq(priv->interruptReceive, dev);
804 free_irq(priv->interruptTransmit, dev);
807 free_skb_resources(priv);
809 dma_free_coherent(&dev->dev,
810 sizeof(struct txbd8)*priv->tx_ring_size
811 + sizeof(struct rxbd8)*priv->rx_ring_size,
813 gfar_read(®s->tbase0));
816 /* If there are any tx skbs or rx skbs still around, free them.
817 * Then free tx_skbuff and rx_skbuff */
818 static void free_skb_resources(struct gfar_private *priv)
824 /* Go through all the buffer descriptors and free their data buffers */
825 txbdp = priv->tx_bd_base;
827 for (i = 0; i < priv->tx_ring_size; i++) {
829 if (priv->tx_skbuff[i]) {
830 dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
833 dev_kfree_skb_any(priv->tx_skbuff[i]);
834 priv->tx_skbuff[i] = NULL;
840 kfree(priv->tx_skbuff);
842 rxbdp = priv->rx_bd_base;
844 /* rx_skbuff is not guaranteed to be allocated, so only
845 * free it and its contents if it is allocated */
846 if(priv->rx_skbuff != NULL) {
847 for (i = 0; i < priv->rx_ring_size; i++) {
848 if (priv->rx_skbuff[i]) {
849 dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
850 priv->rx_buffer_size,
853 dev_kfree_skb_any(priv->rx_skbuff[i]);
854 priv->rx_skbuff[i] = NULL;
864 kfree(priv->rx_skbuff);
868 void gfar_start(struct net_device *dev)
870 struct gfar_private *priv = netdev_priv(dev);
871 struct gfar __iomem *regs = priv->regs;
874 /* Enable Rx and Tx in MACCFG1 */
875 tempval = gfar_read(®s->maccfg1);
876 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
877 gfar_write(®s->maccfg1, tempval);
879 /* Initialize DMACTRL to have WWR and WOP */
880 tempval = gfar_read(&priv->regs->dmactrl);
881 tempval |= DMACTRL_INIT_SETTINGS;
882 gfar_write(&priv->regs->dmactrl, tempval);
884 /* Make sure we aren't stopped */
885 tempval = gfar_read(&priv->regs->dmactrl);
886 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
887 gfar_write(&priv->regs->dmactrl, tempval);
889 /* Clear THLT/RHLT, so that the DMA starts polling now */
890 gfar_write(®s->tstat, TSTAT_CLEAR_THALT);
891 gfar_write(®s->rstat, RSTAT_CLEAR_RHALT);
893 /* Unmask the interrupts we look for */
894 gfar_write(®s->imask, IMASK_DEFAULT);
896 dev->trans_start = jiffies;
899 /* Bring the controller up and running */
900 int startup_gfar(struct net_device *dev)
907 struct gfar_private *priv = netdev_priv(dev);
908 struct gfar __iomem *regs = priv->regs;
913 gfar_write(®s->imask, IMASK_INIT_CLEAR);
915 /* Allocate memory for the buffer descriptors */
916 vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
917 sizeof (struct txbd8) * priv->tx_ring_size +
918 sizeof (struct rxbd8) * priv->rx_ring_size,
922 if (netif_msg_ifup(priv))
923 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
928 priv->tx_bd_base = (struct txbd8 *) vaddr;
930 /* enet DMA only understands physical addresses */
931 gfar_write(®s->tbase0, addr);
933 /* Start the rx descriptor ring where the tx ring leaves off */
934 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
935 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
936 priv->rx_bd_base = (struct rxbd8 *) vaddr;
937 gfar_write(®s->rbase0, addr);
939 /* Setup the skbuff rings */
941 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
942 priv->tx_ring_size, GFP_KERNEL);
944 if (NULL == priv->tx_skbuff) {
945 if (netif_msg_ifup(priv))
946 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
952 for (i = 0; i < priv->tx_ring_size; i++)
953 priv->tx_skbuff[i] = NULL;
956 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
957 priv->rx_ring_size, GFP_KERNEL);
959 if (NULL == priv->rx_skbuff) {
960 if (netif_msg_ifup(priv))
961 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
967 for (i = 0; i < priv->rx_ring_size; i++)
968 priv->rx_skbuff[i] = NULL;
970 /* Initialize some variables in our dev structure */
971 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
972 priv->cur_rx = priv->rx_bd_base;
973 priv->skb_curtx = priv->skb_dirtytx = 0;
976 /* Initialize Transmit Descriptor Ring */
977 txbdp = priv->tx_bd_base;
978 for (i = 0; i < priv->tx_ring_size; i++) {
985 /* Set the last descriptor in the ring to indicate wrap */
987 txbdp->status |= TXBD_WRAP;
989 rxbdp = priv->rx_bd_base;
990 for (i = 0; i < priv->rx_ring_size; i++) {
993 skb = gfar_new_skb(dev);
996 printk(KERN_ERR "%s: Can't allocate RX buffers\n",
999 goto err_rxalloc_fail;
1002 priv->rx_skbuff[i] = skb;
1004 gfar_new_rxbdp(dev, rxbdp, skb);
1009 /* Set the last descriptor in the ring to wrap */
1011 rxbdp->status |= RXBD_WRAP;
1013 /* If the device has multiple interrupts, register for
1014 * them. Otherwise, only register for the one */
1015 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1016 /* Install our interrupt handlers for Error,
1017 * Transmit, and Receive */
1018 if (request_irq(priv->interruptError, gfar_error,
1019 0, "enet_error", dev) < 0) {
1020 if (netif_msg_intr(priv))
1021 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1022 dev->name, priv->interruptError);
1028 if (request_irq(priv->interruptTransmit, gfar_transmit,
1029 0, "enet_tx", dev) < 0) {
1030 if (netif_msg_intr(priv))
1031 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1032 dev->name, priv->interruptTransmit);
1039 if (request_irq(priv->interruptReceive, gfar_receive,
1040 0, "enet_rx", dev) < 0) {
1041 if (netif_msg_intr(priv))
1042 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
1043 dev->name, priv->interruptReceive);
1049 if (request_irq(priv->interruptTransmit, gfar_interrupt,
1050 0, "gfar_interrupt", dev) < 0) {
1051 if (netif_msg_intr(priv))
1052 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1053 dev->name, priv->interruptError);
1060 phy_start(priv->phydev);
1062 /* Configure the coalescing support */
1063 gfar_write(®s->txic, 0);
1064 if (priv->txcoalescing)
1065 gfar_write(®s->txic, priv->txic);
1067 gfar_write(®s->rxic, 0);
1068 if (priv->rxcoalescing)
1069 gfar_write(®s->rxic, priv->rxic);
1071 if (priv->rx_csum_enable)
1072 rctrl |= RCTRL_CHECKSUMMING;
1074 if (priv->extended_hash) {
1075 rctrl |= RCTRL_EXTHASH;
1077 gfar_clear_exact_match(dev);
1078 rctrl |= RCTRL_EMEN;
1081 if (priv->vlan_enable)
1082 rctrl |= RCTRL_VLAN;
1084 if (priv->padding) {
1085 rctrl &= ~RCTRL_PAL_MASK;
1086 rctrl |= RCTRL_PADDING(priv->padding);
1089 /* Init rctrl based on our settings */
1090 gfar_write(&priv->regs->rctrl, rctrl);
1092 if (dev->features & NETIF_F_IP_CSUM)
1093 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
1095 /* Set the extraction length and index */
1096 attrs = ATTRELI_EL(priv->rx_stash_size) |
1097 ATTRELI_EI(priv->rx_stash_index);
1099 gfar_write(&priv->regs->attreli, attrs);
1101 /* Start with defaults, and add stashing or locking
1102 * depending on the approprate variables */
1103 attrs = ATTR_INIT_SETTINGS;
1105 if (priv->bd_stash_en)
1106 attrs |= ATTR_BDSTASH;
1108 if (priv->rx_stash_size != 0)
1109 attrs |= ATTR_BUFSTASH;
1111 gfar_write(&priv->regs->attr, attrs);
1113 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
1114 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1115 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1117 /* Start the controller */
1123 free_irq(priv->interruptTransmit, dev);
1125 free_irq(priv->interruptError, dev);
1129 free_skb_resources(priv);
1131 dma_free_coherent(&dev->dev,
1132 sizeof(struct txbd8)*priv->tx_ring_size
1133 + sizeof(struct rxbd8)*priv->rx_ring_size,
1135 gfar_read(®s->tbase0));
1140 /* Called when something needs to use the ethernet device */
1141 /* Returns 0 for success. */
1142 static int gfar_enet_open(struct net_device *dev)
1144 struct gfar_private *priv = netdev_priv(dev);
1147 napi_enable(&priv->napi);
1149 /* Initialize a bunch of registers */
1150 init_registers(dev);
1152 gfar_set_mac_address(dev);
1154 err = init_phy(dev);
1157 napi_disable(&priv->napi);
1161 err = startup_gfar(dev);
1163 napi_disable(&priv->napi);
1167 netif_start_queue(dev);
1172 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
1174 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
1176 memset(fcb, 0, GMAC_FCB_LEN);
1181 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1185 /* If we're here, it's a IP packet with a TCP or UDP
1186 * payload. We set it to checksum, using a pseudo-header
1189 flags = TXFCB_DEFAULT;
1191 /* Tell the controller what the protocol is */
1192 /* And provide the already calculated phcs */
1193 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1195 fcb->phcs = udp_hdr(skb)->check;
1197 fcb->phcs = tcp_hdr(skb)->check;
1199 /* l3os is the distance between the start of the
1200 * frame (skb->data) and the start of the IP hdr.
1201 * l4os is the distance between the start of the
1202 * l3 hdr and the l4 hdr */
1203 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1204 fcb->l4os = skb_network_header_len(skb);
1209 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1211 fcb->flags |= TXFCB_VLN;
1212 fcb->vlctl = vlan_tx_tag_get(skb);
1215 /* This is called by the kernel when a frame is ready for transmission. */
1216 /* It is pointed to by the dev->hard_start_xmit function pointer */
1217 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1219 struct gfar_private *priv = netdev_priv(dev);
1220 struct txfcb *fcb = NULL;
1221 struct txbd8 *txbdp;
1223 unsigned long flags;
1225 /* Update transmit stats */
1226 dev->stats.tx_bytes += skb->len;
1229 spin_lock_irqsave(&priv->txlock, flags);
1231 /* Point at the first free tx descriptor */
1232 txbdp = priv->cur_tx;
1234 /* Clear all but the WRAP status flags */
1235 status = txbdp->status & TXBD_WRAP;
1237 /* Set up checksumming */
1238 if (CHECKSUM_PARTIAL == skb->ip_summed) {
1239 fcb = gfar_add_fcb(skb, txbdp);
1241 gfar_tx_checksum(skb, fcb);
1244 if (priv->vlan_enable &&
1245 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
1246 if (unlikely(NULL == fcb)) {
1247 fcb = gfar_add_fcb(skb, txbdp);
1251 gfar_tx_vlan(skb, fcb);
1254 /* Set buffer length and pointer */
1255 txbdp->length = skb->len;
1256 txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1257 skb->len, DMA_TO_DEVICE);
1259 /* Save the skb pointer so we can free it later */
1260 priv->tx_skbuff[priv->skb_curtx] = skb;
1262 /* Update the current skb pointer (wrapping if this was the last) */
1264 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1266 /* Flag the BD as interrupt-causing */
1267 status |= TXBD_INTERRUPT;
1269 /* Flag the BD as ready to go, last in frame, and */
1270 /* in need of CRC */
1271 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
1273 dev->trans_start = jiffies;
1275 /* The powerpc-specific eieio() is used, as wmb() has too strong
1276 * semantics (it requires synchronization between cacheable and
1277 * uncacheable mappings, which eieio doesn't provide and which we
1278 * don't need), thus requiring a more expensive sync instruction. At
1279 * some point, the set of architecture-independent barrier functions
1280 * should be expanded to include weaker barriers.
1284 txbdp->status = status;
1286 /* If this was the last BD in the ring, the next one */
1287 /* is at the beginning of the ring */
1288 if (txbdp->status & TXBD_WRAP)
1289 txbdp = priv->tx_bd_base;
1293 /* If the next BD still needs to be cleaned up, then the bds
1294 are full. We need to tell the kernel to stop sending us stuff. */
1295 if (txbdp == priv->dirty_tx) {
1296 netif_stop_queue(dev);
1298 dev->stats.tx_fifo_errors++;
1301 /* Update the current txbd to the next one */
1302 priv->cur_tx = txbdp;
1304 /* Tell the DMA to go go go */
1305 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1308 spin_unlock_irqrestore(&priv->txlock, flags);
1313 /* Stops the kernel queue, and halts the controller */
1314 static int gfar_close(struct net_device *dev)
1316 struct gfar_private *priv = netdev_priv(dev);
1318 napi_disable(&priv->napi);
1320 cancel_work_sync(&priv->reset_task);
1323 /* Disconnect from the PHY */
1324 phy_disconnect(priv->phydev);
1325 priv->phydev = NULL;
1327 netif_stop_queue(dev);
1332 /* Changes the mac address if the controller is not running. */
1333 static int gfar_set_mac_address(struct net_device *dev)
1335 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1341 /* Enables and disables VLAN insertion/extraction */
1342 static void gfar_vlan_rx_register(struct net_device *dev,
1343 struct vlan_group *grp)
1345 struct gfar_private *priv = netdev_priv(dev);
1346 unsigned long flags;
1349 spin_lock_irqsave(&priv->rxlock, flags);
1354 /* Enable VLAN tag insertion */
1355 tempval = gfar_read(&priv->regs->tctrl);
1356 tempval |= TCTRL_VLINS;
1358 gfar_write(&priv->regs->tctrl, tempval);
1360 /* Enable VLAN tag extraction */
1361 tempval = gfar_read(&priv->regs->rctrl);
1362 tempval |= RCTRL_VLEX;
1363 gfar_write(&priv->regs->rctrl, tempval);
1365 /* Disable VLAN tag insertion */
1366 tempval = gfar_read(&priv->regs->tctrl);
1367 tempval &= ~TCTRL_VLINS;
1368 gfar_write(&priv->regs->tctrl, tempval);
1370 /* Disable VLAN tag extraction */
1371 tempval = gfar_read(&priv->regs->rctrl);
1372 tempval &= ~RCTRL_VLEX;
1373 gfar_write(&priv->regs->rctrl, tempval);
1376 spin_unlock_irqrestore(&priv->rxlock, flags);
1379 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1381 int tempsize, tempval;
1382 struct gfar_private *priv = netdev_priv(dev);
1383 int oldsize = priv->rx_buffer_size;
1384 int frame_size = new_mtu + ETH_HLEN;
1386 if (priv->vlan_enable)
1387 frame_size += VLAN_HLEN;
1389 if (gfar_uses_fcb(priv))
1390 frame_size += GMAC_FCB_LEN;
1392 frame_size += priv->padding;
1394 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1395 if (netif_msg_drv(priv))
1396 printk(KERN_ERR "%s: Invalid MTU setting\n",
1402 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1403 INCREMENTAL_BUFFER_SIZE;
1405 /* Only stop and start the controller if it isn't already
1406 * stopped, and we changed something */
1407 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1410 priv->rx_buffer_size = tempsize;
1414 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1415 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1417 /* If the mtu is larger than the max size for standard
1418 * ethernet frames (ie, a jumbo frame), then set maccfg2
1419 * to allow huge frames, and to check the length */
1420 tempval = gfar_read(&priv->regs->maccfg2);
1422 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1423 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1425 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1427 gfar_write(&priv->regs->maccfg2, tempval);
1429 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1435 /* gfar_reset_task gets scheduled when a packet has not been
1436 * transmitted after a set amount of time.
1437 * For now, assume that clearing out all the structures, and
1438 * starting over will fix the problem.
1440 static void gfar_reset_task(struct work_struct *work)
1442 struct gfar_private *priv = container_of(work, struct gfar_private,
1444 struct net_device *dev = priv->dev;
1446 if (dev->flags & IFF_UP) {
1451 netif_tx_schedule_all(dev);
1454 static void gfar_timeout(struct net_device *dev)
1456 struct gfar_private *priv = netdev_priv(dev);
1458 dev->stats.tx_errors++;
1459 schedule_work(&priv->reset_task);
1462 /* Interrupt Handler for Transmit complete */
1463 static int gfar_clean_tx_ring(struct net_device *dev)
1466 struct gfar_private *priv = netdev_priv(dev);
1469 bdp = priv->dirty_tx;
1470 while ((bdp->status & TXBD_READY) == 0) {
1471 /* If dirty_tx and cur_tx are the same, then either the */
1472 /* ring is empty or full now (it could only be full in the beginning, */
1473 /* obviously). If it is empty, we are done. */
1474 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1479 /* Deferred means some collisions occurred during transmit, */
1480 /* but we eventually sent the packet. */
1481 if (bdp->status & TXBD_DEF)
1482 dev->stats.collisions++;
1484 /* Unmap the DMA memory */
1485 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1486 bdp->length, DMA_TO_DEVICE);
1488 /* Free the sk buffer associated with this TxBD */
1489 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1491 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1493 (priv->skb_dirtytx +
1494 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1496 /* Clean BD length for empty detection */
1499 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1500 if (bdp->status & TXBD_WRAP)
1501 bdp = priv->tx_bd_base;
1505 /* Move dirty_tx to be the next bd */
1506 priv->dirty_tx = bdp;
1508 /* We freed a buffer, so now we can restart transmission */
1509 if (netif_queue_stopped(dev))
1510 netif_wake_queue(dev);
1511 } /* while ((bdp->status & TXBD_READY) == 0) */
1513 dev->stats.tx_packets += howmany;
1518 /* Interrupt Handler for Transmit complete */
1519 static irqreturn_t gfar_transmit(int irq, void *dev_id)
1521 struct net_device *dev = (struct net_device *) dev_id;
1522 struct gfar_private *priv = netdev_priv(dev);
1525 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1528 spin_lock(&priv->txlock);
1530 gfar_clean_tx_ring(dev);
1532 /* If we are coalescing the interrupts, reset the timer */
1533 /* Otherwise, clear it */
1534 if (likely(priv->txcoalescing)) {
1535 gfar_write(&priv->regs->txic, 0);
1536 gfar_write(&priv->regs->txic, priv->txic);
1539 spin_unlock(&priv->txlock);
1544 static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1545 struct sk_buff *skb)
1547 struct gfar_private *priv = netdev_priv(dev);
1548 u32 * status_len = (u32 *)bdp;
1551 bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1552 priv->rx_buffer_size, DMA_FROM_DEVICE);
1554 flags = RXBD_EMPTY | RXBD_INTERRUPT;
1556 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1561 *status_len = (u32)flags << 16;
1565 struct sk_buff * gfar_new_skb(struct net_device *dev)
1567 unsigned int alignamount;
1568 struct gfar_private *priv = netdev_priv(dev);
1569 struct sk_buff *skb = NULL;
1571 /* We have to allocate the skb, so keep trying till we succeed */
1572 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
1577 alignamount = RXBUF_ALIGNMENT -
1578 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
1580 /* We need the data buffer to be aligned properly. We will reserve
1581 * as many bytes as needed to align the data properly
1583 skb_reserve(skb, alignamount);
1588 static inline void count_errors(unsigned short status, struct net_device *dev)
1590 struct gfar_private *priv = netdev_priv(dev);
1591 struct net_device_stats *stats = &dev->stats;
1592 struct gfar_extra_stats *estats = &priv->extra_stats;
1594 /* If the packet was truncated, none of the other errors
1596 if (status & RXBD_TRUNCATED) {
1597 stats->rx_length_errors++;
1603 /* Count the errors, if there were any */
1604 if (status & (RXBD_LARGE | RXBD_SHORT)) {
1605 stats->rx_length_errors++;
1607 if (status & RXBD_LARGE)
1612 if (status & RXBD_NONOCTET) {
1613 stats->rx_frame_errors++;
1614 estats->rx_nonoctet++;
1616 if (status & RXBD_CRCERR) {
1617 estats->rx_crcerr++;
1618 stats->rx_crc_errors++;
1620 if (status & RXBD_OVERRUN) {
1621 estats->rx_overrun++;
1622 stats->rx_crc_errors++;
1626 irqreturn_t gfar_receive(int irq, void *dev_id)
1628 struct net_device *dev = (struct net_device *) dev_id;
1629 struct gfar_private *priv = netdev_priv(dev);
1633 /* Clear IEVENT, so interrupts aren't called again
1634 * because of the packets that have already arrived */
1635 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1637 if (netif_rx_schedule_prep(dev, &priv->napi)) {
1638 tempval = gfar_read(&priv->regs->imask);
1639 tempval &= IMASK_RTX_DISABLED;
1640 gfar_write(&priv->regs->imask, tempval);
1642 __netif_rx_schedule(dev, &priv->napi);
1644 if (netif_msg_rx_err(priv))
1645 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1646 dev->name, gfar_read(&priv->regs->ievent),
1647 gfar_read(&priv->regs->imask));
1653 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1655 /* If valid headers were found, and valid sums
1656 * were verified, then we tell the kernel that no
1657 * checksumming is necessary. Otherwise, it is */
1658 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1659 skb->ip_summed = CHECKSUM_UNNECESSARY;
1661 skb->ip_summed = CHECKSUM_NONE;
1665 static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1667 struct rxfcb *fcb = (struct rxfcb *)skb->data;
1669 /* Remove the FCB from the skb */
1670 skb_pull(skb, GMAC_FCB_LEN);
1675 /* gfar_process_frame() -- handle one incoming packet if skb
1677 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1680 struct gfar_private *priv = netdev_priv(dev);
1681 struct rxfcb *fcb = NULL;
1684 if (netif_msg_rx_err(priv))
1685 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1686 dev->stats.rx_dropped++;
1687 priv->extra_stats.rx_skbmissing++;
1691 /* Prep the skb for the packet */
1692 skb_put(skb, length);
1694 /* Grab the FCB if there is one */
1695 if (gfar_uses_fcb(priv))
1696 fcb = gfar_get_fcb(skb);
1698 /* Remove the padded bytes, if there are any */
1700 skb_pull(skb, priv->padding);
1702 if (priv->rx_csum_enable)
1703 gfar_rx_checksum(skb, fcb);
1705 /* Tell the skb what kind of packet this is */
1706 skb->protocol = eth_type_trans(skb, dev);
1708 /* Send the packet up the stack */
1709 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) {
1710 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp,
1713 ret = netif_receive_skb(skb);
1715 if (NET_RX_DROP == ret)
1716 priv->extra_stats.kernel_dropped++;
1722 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1723 * until the budget/quota has been reached. Returns the number
1726 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1729 struct sk_buff *skb;
1732 struct gfar_private *priv = netdev_priv(dev);
1734 /* Get the first full descriptor */
1737 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1738 struct sk_buff *newskb;
1741 /* Add another skb for the future */
1742 newskb = gfar_new_skb(dev);
1744 skb = priv->rx_skbuff[priv->skb_currx];
1746 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1747 priv->rx_buffer_size, DMA_FROM_DEVICE);
1749 /* We drop the frame if we failed to allocate a new buffer */
1750 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1751 bdp->status & RXBD_ERR)) {
1752 count_errors(bdp->status, dev);
1754 if (unlikely(!newskb))
1758 dev_kfree_skb_any(skb);
1760 /* Increment the number of packets */
1761 dev->stats.rx_packets++;
1764 /* Remove the FCS from the packet length */
1765 pkt_len = bdp->length - 4;
1767 gfar_process_frame(dev, skb, pkt_len);
1769 dev->stats.rx_bytes += pkt_len;
1772 priv->rx_skbuff[priv->skb_currx] = newskb;
1774 /* Setup the new bdp */
1775 gfar_new_rxbdp(dev, bdp, newskb);
1777 /* Update to the next pointer */
1778 if (bdp->status & RXBD_WRAP)
1779 bdp = priv->rx_bd_base;
1783 /* update to point at the next skb */
1785 (priv->skb_currx + 1) &
1786 RX_RING_MOD_MASK(priv->rx_ring_size);
1789 /* Update the current rxbd pointer to be the next one */
1795 static int gfar_poll(struct napi_struct *napi, int budget)
1797 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1798 struct net_device *dev = priv->dev;
1800 unsigned long flags;
1802 /* If we fail to get the lock, don't bother with the TX BDs */
1803 if (spin_trylock_irqsave(&priv->txlock, flags)) {
1804 gfar_clean_tx_ring(dev);
1805 spin_unlock_irqrestore(&priv->txlock, flags);
1808 howmany = gfar_clean_rx_ring(dev, budget);
1810 if (howmany < budget) {
1811 netif_rx_complete(dev, napi);
1813 /* Clear the halt bit in RSTAT */
1814 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1816 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1818 /* If we are coalescing interrupts, update the timer */
1819 /* Otherwise, clear it */
1820 if (likely(priv->rxcoalescing)) {
1821 gfar_write(&priv->regs->rxic, 0);
1822 gfar_write(&priv->regs->rxic, priv->rxic);
1829 #ifdef CONFIG_NET_POLL_CONTROLLER
1831 * Polling 'interrupt' - used by things like netconsole to send skbs
1832 * without having to re-enable interrupts. It's not called while
1833 * the interrupt routine is executing.
1835 static void gfar_netpoll(struct net_device *dev)
1837 struct gfar_private *priv = netdev_priv(dev);
1839 /* If the device has multiple interrupts, run tx/rx */
1840 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1841 disable_irq(priv->interruptTransmit);
1842 disable_irq(priv->interruptReceive);
1843 disable_irq(priv->interruptError);
1844 gfar_interrupt(priv->interruptTransmit, dev);
1845 enable_irq(priv->interruptError);
1846 enable_irq(priv->interruptReceive);
1847 enable_irq(priv->interruptTransmit);
1849 disable_irq(priv->interruptTransmit);
1850 gfar_interrupt(priv->interruptTransmit, dev);
1851 enable_irq(priv->interruptTransmit);
1856 /* The interrupt handler for devices with one interrupt */
1857 static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1859 struct net_device *dev = dev_id;
1860 struct gfar_private *priv = netdev_priv(dev);
1862 /* Save ievent for future reference */
1863 u32 events = gfar_read(&priv->regs->ievent);
1865 /* Check for reception */
1866 if (events & IEVENT_RX_MASK)
1867 gfar_receive(irq, dev_id);
1869 /* Check for transmit completion */
1870 if (events & IEVENT_TX_MASK)
1871 gfar_transmit(irq, dev_id);
1873 /* Check for errors */
1874 if (events & IEVENT_ERR_MASK)
1875 gfar_error(irq, dev_id);
1880 /* Called every time the controller might need to be made
1881 * aware of new link state. The PHY code conveys this
1882 * information through variables in the phydev structure, and this
1883 * function converts those variables into the appropriate
1884 * register values, and can bring down the device if needed.
1886 static void adjust_link(struct net_device *dev)
1888 struct gfar_private *priv = netdev_priv(dev);
1889 struct gfar __iomem *regs = priv->regs;
1890 unsigned long flags;
1891 struct phy_device *phydev = priv->phydev;
1894 spin_lock_irqsave(&priv->txlock, flags);
1896 u32 tempval = gfar_read(®s->maccfg2);
1897 u32 ecntrl = gfar_read(®s->ecntrl);
1899 /* Now we make sure that we can be in full duplex mode.
1900 * If not, we operate in half-duplex mode. */
1901 if (phydev->duplex != priv->oldduplex) {
1903 if (!(phydev->duplex))
1904 tempval &= ~(MACCFG2_FULL_DUPLEX);
1906 tempval |= MACCFG2_FULL_DUPLEX;
1908 priv->oldduplex = phydev->duplex;
1911 if (phydev->speed != priv->oldspeed) {
1913 switch (phydev->speed) {
1916 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1921 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1923 /* Reduced mode distinguishes
1924 * between 10 and 100 */
1925 if (phydev->speed == SPEED_100)
1926 ecntrl |= ECNTRL_R100;
1928 ecntrl &= ~(ECNTRL_R100);
1931 if (netif_msg_link(priv))
1933 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1934 dev->name, phydev->speed);
1938 priv->oldspeed = phydev->speed;
1941 gfar_write(®s->maccfg2, tempval);
1942 gfar_write(®s->ecntrl, ecntrl);
1944 if (!priv->oldlink) {
1948 } else if (priv->oldlink) {
1952 priv->oldduplex = -1;
1955 if (new_state && netif_msg_link(priv))
1956 phy_print_status(phydev);
1958 spin_unlock_irqrestore(&priv->txlock, flags);
1961 /* Update the hash table based on the current list of multicast
1962 * addresses we subscribe to. Also, change the promiscuity of
1963 * the device based on the flags (this function is called
1964 * whenever dev->flags is changed */
1965 static void gfar_set_multi(struct net_device *dev)
1967 struct dev_mc_list *mc_ptr;
1968 struct gfar_private *priv = netdev_priv(dev);
1969 struct gfar __iomem *regs = priv->regs;
1972 if(dev->flags & IFF_PROMISC) {
1973 /* Set RCTRL to PROM */
1974 tempval = gfar_read(®s->rctrl);
1975 tempval |= RCTRL_PROM;
1976 gfar_write(®s->rctrl, tempval);
1978 /* Set RCTRL to not PROM */
1979 tempval = gfar_read(®s->rctrl);
1980 tempval &= ~(RCTRL_PROM);
1981 gfar_write(®s->rctrl, tempval);
1984 if(dev->flags & IFF_ALLMULTI) {
1985 /* Set the hash to rx all multicast frames */
1986 gfar_write(®s->igaddr0, 0xffffffff);
1987 gfar_write(®s->igaddr1, 0xffffffff);
1988 gfar_write(®s->igaddr2, 0xffffffff);
1989 gfar_write(®s->igaddr3, 0xffffffff);
1990 gfar_write(®s->igaddr4, 0xffffffff);
1991 gfar_write(®s->igaddr5, 0xffffffff);
1992 gfar_write(®s->igaddr6, 0xffffffff);
1993 gfar_write(®s->igaddr7, 0xffffffff);
1994 gfar_write(®s->gaddr0, 0xffffffff);
1995 gfar_write(®s->gaddr1, 0xffffffff);
1996 gfar_write(®s->gaddr2, 0xffffffff);
1997 gfar_write(®s->gaddr3, 0xffffffff);
1998 gfar_write(®s->gaddr4, 0xffffffff);
1999 gfar_write(®s->gaddr5, 0xffffffff);
2000 gfar_write(®s->gaddr6, 0xffffffff);
2001 gfar_write(®s->gaddr7, 0xffffffff);
2006 /* zero out the hash */
2007 gfar_write(®s->igaddr0, 0x0);
2008 gfar_write(®s->igaddr1, 0x0);
2009 gfar_write(®s->igaddr2, 0x0);
2010 gfar_write(®s->igaddr3, 0x0);
2011 gfar_write(®s->igaddr4, 0x0);
2012 gfar_write(®s->igaddr5, 0x0);
2013 gfar_write(®s->igaddr6, 0x0);
2014 gfar_write(®s->igaddr7, 0x0);
2015 gfar_write(®s->gaddr0, 0x0);
2016 gfar_write(®s->gaddr1, 0x0);
2017 gfar_write(®s->gaddr2, 0x0);
2018 gfar_write(®s->gaddr3, 0x0);
2019 gfar_write(®s->gaddr4, 0x0);
2020 gfar_write(®s->gaddr5, 0x0);
2021 gfar_write(®s->gaddr6, 0x0);
2022 gfar_write(®s->gaddr7, 0x0);
2024 /* If we have extended hash tables, we need to
2025 * clear the exact match registers to prepare for
2027 if (priv->extended_hash) {
2028 em_num = GFAR_EM_NUM + 1;
2029 gfar_clear_exact_match(dev);
2036 if(dev->mc_count == 0)
2039 /* Parse the list, and set the appropriate bits */
2040 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
2042 gfar_set_mac_for_addr(dev, idx,
2046 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
2054 /* Clears each of the exact match registers to zero, so they
2055 * don't interfere with normal reception */
2056 static void gfar_clear_exact_match(struct net_device *dev)
2059 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2061 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2062 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2065 /* Set the appropriate hash bit for the given addr */
2066 /* The algorithm works like so:
2067 * 1) Take the Destination Address (ie the multicast address), and
2068 * do a CRC on it (little endian), and reverse the bits of the
2070 * 2) Use the 8 most significant bits as a hash into a 256-entry
2071 * table. The table is controlled through 8 32-bit registers:
2072 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
2073 * gaddr7. This means that the 3 most significant bits in the
2074 * hash index which gaddr register to use, and the 5 other bits
2075 * indicate which bit (assuming an IBM numbering scheme, which
2076 * for PowerPC (tm) is usually the case) in the register holds
2078 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2081 struct gfar_private *priv = netdev_priv(dev);
2082 u32 result = ether_crc(MAC_ADDR_LEN, addr);
2083 int width = priv->hash_width;
2084 u8 whichbit = (result >> (32 - width)) & 0x1f;
2085 u8 whichreg = result >> (32 - width + 5);
2086 u32 value = (1 << (31-whichbit));
2088 tempval = gfar_read(priv->hash_regs[whichreg]);
2090 gfar_write(priv->hash_regs[whichreg], tempval);
2096 /* There are multiple MAC Address register pairs on some controllers
2097 * This function sets the numth pair to a given address
2099 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2101 struct gfar_private *priv = netdev_priv(dev);
2103 char tmpbuf[MAC_ADDR_LEN];
2105 u32 __iomem *macptr = &priv->regs->macstnaddr1;
2109 /* Now copy it into the mac registers backwards, cuz */
2110 /* little endian is silly */
2111 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2112 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2114 gfar_write(macptr, *((u32 *) (tmpbuf)));
2116 tempval = *((u32 *) (tmpbuf + 4));
2118 gfar_write(macptr+1, tempval);
2121 /* GFAR error interrupt handler */
2122 static irqreturn_t gfar_error(int irq, void *dev_id)
2124 struct net_device *dev = dev_id;
2125 struct gfar_private *priv = netdev_priv(dev);
2127 /* Save ievent for future reference */
2128 u32 events = gfar_read(&priv->regs->ievent);
2131 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
2133 /* Magic Packet is not an error. */
2134 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2135 (events & IEVENT_MAG))
2136 events &= ~IEVENT_MAG;
2139 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2140 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2141 dev->name, events, gfar_read(&priv->regs->imask));
2143 /* Update the error counters */
2144 if (events & IEVENT_TXE) {
2145 dev->stats.tx_errors++;
2147 if (events & IEVENT_LC)
2148 dev->stats.tx_window_errors++;
2149 if (events & IEVENT_CRL)
2150 dev->stats.tx_aborted_errors++;
2151 if (events & IEVENT_XFUN) {
2152 if (netif_msg_tx_err(priv))
2153 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2154 "packet dropped.\n", dev->name);
2155 dev->stats.tx_dropped++;
2156 priv->extra_stats.tx_underrun++;
2158 /* Reactivate the Tx Queues */
2159 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
2161 if (netif_msg_tx_err(priv))
2162 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
2164 if (events & IEVENT_BSY) {
2165 dev->stats.rx_errors++;
2166 priv->extra_stats.rx_bsy++;
2168 gfar_receive(irq, dev_id);
2170 if (netif_msg_rx_err(priv))
2171 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2172 dev->name, gfar_read(&priv->regs->rstat));
2174 if (events & IEVENT_BABR) {
2175 dev->stats.rx_errors++;
2176 priv->extra_stats.rx_babr++;
2178 if (netif_msg_rx_err(priv))
2179 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
2181 if (events & IEVENT_EBERR) {
2182 priv->extra_stats.eberr++;
2183 if (netif_msg_rx_err(priv))
2184 printk(KERN_DEBUG "%s: bus error\n", dev->name);
2186 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
2187 printk(KERN_DEBUG "%s: control frame\n", dev->name);
2189 if (events & IEVENT_BABT) {
2190 priv->extra_stats.tx_babt++;
2191 if (netif_msg_tx_err(priv))
2192 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
2197 /* work with hotplug and coldplug */
2198 MODULE_ALIAS("platform:fsl-gianfar");
2200 static struct of_device_id gfar_match[] =
2204 .compatible = "gianfar",
2209 /* Structure for a device driver */
2210 static struct of_platform_driver gfar_driver = {
2211 .name = "fsl-gianfar",
2212 .match_table = gfar_match,
2214 .probe = gfar_probe,
2215 .remove = gfar_remove,
2216 .suspend = gfar_suspend,
2217 .resume = gfar_resume,
2220 static int __init gfar_init(void)
2222 int err = gfar_mdio_init();
2227 err = of_register_platform_driver(&gfar_driver);
2235 static void __exit gfar_exit(void)
2237 of_unregister_platform_driver(&gfar_driver);
2241 module_init(gfar_init);
2242 module_exit(gfar_exit);