2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
21 * Gianfar: AKA Lambda Draconis, "Dragon"
29 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
34 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
36 * last descriptor of the ring.
38 * When a packet is received, the RXF bit in the
39 * IEVENT register is set, triggering an interrupt when the
40 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
43 * of frames or amount of time have passed). In NAPI, the
44 * interrupt handler will signal there is work to be done, and
45 * exit. This method will start at the last known empty
46 * descriptor, and process every subsequent descriptor until there
47 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
65 #include <linux/kernel.h>
66 #include <linux/string.h>
67 #include <linux/errno.h>
68 #include <linux/unistd.h>
69 #include <linux/slab.h>
70 #include <linux/interrupt.h>
71 #include <linux/init.h>
72 #include <linux/delay.h>
73 #include <linux/netdevice.h>
74 #include <linux/etherdevice.h>
75 #include <linux/skbuff.h>
76 #include <linux/if_vlan.h>
77 #include <linux/spinlock.h>
79 #include <linux/of_mdio.h>
80 #include <linux/of_platform.h>
82 #include <linux/tcp.h>
83 #include <linux/udp.h>
88 #include <asm/uaccess.h>
89 #include <linux/module.h>
90 #include <linux/dma-mapping.h>
91 #include <linux/crc32.h>
92 #include <linux/mii.h>
93 #include <linux/phy.h>
94 #include <linux/phy_fixed.h>
98 #include "fsl_pq_mdio.h"
100 #define TX_TIMEOUT (1*HZ)
101 #undef BRIEF_GFAR_ERRORS
102 #undef VERBOSE_GFAR_ERRORS
104 const char gfar_driver_name[] = "Gianfar Ethernet";
105 const char gfar_driver_version[] = "1.3";
107 static int gfar_enet_open(struct net_device *dev);
108 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
109 static void gfar_reset_task(struct work_struct *work);
110 static void gfar_timeout(struct net_device *dev);
111 static int gfar_close(struct net_device *dev);
112 struct sk_buff *gfar_new_skb(struct net_device *dev);
113 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
114 struct sk_buff *skb);
115 static int gfar_set_mac_address(struct net_device *dev);
116 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
117 static irqreturn_t gfar_error(int irq, void *dev_id);
118 static irqreturn_t gfar_transmit(int irq, void *dev_id);
119 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
120 static void adjust_link(struct net_device *dev);
121 static void init_registers(struct net_device *dev);
122 static int init_phy(struct net_device *dev);
123 static int gfar_probe(struct of_device *ofdev,
124 const struct of_device_id *match);
125 static int gfar_remove(struct of_device *ofdev);
126 static void free_skb_resources(struct gfar_private *priv);
127 static void gfar_set_multi(struct net_device *dev);
128 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
129 static void gfar_configure_serdes(struct net_device *dev);
130 static int gfar_poll(struct napi_struct *napi, int budget);
131 #ifdef CONFIG_NET_POLL_CONTROLLER
132 static void gfar_netpoll(struct net_device *dev);
134 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
138 static void gfar_vlan_rx_register(struct net_device *netdev,
139 struct vlan_group *grp);
140 void gfar_halt(struct net_device *dev);
141 static void gfar_halt_nodisable(struct net_device *dev);
142 void gfar_start(struct net_device *dev);
143 static void gfar_clear_exact_match(struct net_device *dev);
144 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
145 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146 u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
148 MODULE_AUTHOR("Freescale Semiconductor, Inc");
149 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150 MODULE_LICENSE("GPL");
152 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
161 lstatus |= BD_LFLAG(RXBD_WRAP);
165 bdp->lstatus = lstatus;
168 static int gfar_init_bds(struct net_device *ndev)
170 struct gfar_private *priv = netdev_priv(ndev);
171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
177 for (i = 0; i < priv->num_tx_queues; i++) {
178 tx_queue = priv->tx_queue[i];
179 /* Initialize some variables in our dev structure */
180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181 tx_queue->dirty_tx = tx_queue->tx_bd_base;
182 tx_queue->cur_tx = tx_queue->tx_bd_base;
183 tx_queue->skb_curtx = 0;
184 tx_queue->skb_dirtytx = 0;
186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (j = 0; j < tx_queue->tx_ring_size; j++) {
194 /* Set the last descriptor in the ring to indicate wrap */
196 txbdp->status |= TXBD_WRAP;
199 for (i = 0; i < priv->num_rx_queues; i++) {
200 rx_queue = priv->rx_queue[i];
201 rx_queue->cur_rx = rx_queue->rx_bd_base;
202 rx_queue->skb_currx = 0;
203 rxbdp = rx_queue->rx_bd_base;
205 for (j = 0; j < rx_queue->rx_ring_size; j++) {
206 struct sk_buff *skb = rx_queue->rx_skbuff[j];
209 gfar_init_rxbdp(rx_queue, rxbdp,
212 skb = gfar_new_skb(ndev);
214 pr_err("%s: Can't allocate RX buffers\n",
216 goto err_rxalloc_fail;
218 rx_queue->rx_skbuff[j] = skb;
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
231 free_skb_resources(priv);
235 static int gfar_alloc_skb_resources(struct net_device *ndev)
240 struct gfar_private *priv = netdev_priv(ndev);
241 struct device *dev = &priv->ofdev->dev;
242 struct gfar_priv_tx_q *tx_queue = NULL;
243 struct gfar_priv_rx_q *rx_queue = NULL;
245 priv->total_tx_ring_size = 0;
246 for (i = 0; i < priv->num_tx_queues; i++)
247 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
249 priv->total_rx_ring_size = 0;
250 for (i = 0; i < priv->num_rx_queues; i++)
251 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
253 /* Allocate memory for the buffer descriptors */
254 vaddr = dma_alloc_coherent(dev,
255 sizeof(struct txbd8) * priv->total_tx_ring_size +
256 sizeof(struct rxbd8) * priv->total_rx_ring_size,
259 if (netif_msg_ifup(priv))
260 pr_err("%s: Could not allocate buffer descriptors!\n",
265 for (i = 0; i < priv->num_tx_queues; i++) {
266 tx_queue = priv->tx_queue[i];
267 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268 tx_queue->tx_bd_dma_base = addr;
269 tx_queue->dev = ndev;
270 /* enet DMA only understands physical addresses */
271 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
275 /* Start the rx descriptor ring where the tx ring leaves off */
276 for (i = 0; i < priv->num_rx_queues; i++) {
277 rx_queue = priv->rx_queue[i];
278 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279 rx_queue->rx_bd_dma_base = addr;
280 rx_queue->dev = ndev;
281 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
285 /* Setup the skbuff rings */
286 for (i = 0; i < priv->num_tx_queues; i++) {
287 tx_queue = priv->tx_queue[i];
288 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
289 tx_queue->tx_ring_size, GFP_KERNEL);
290 if (!tx_queue->tx_skbuff) {
291 if (netif_msg_ifup(priv))
292 pr_err("%s: Could not allocate tx_skbuff\n",
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
304 rx_queue->rx_ring_size, GFP_KERNEL);
306 if (!rx_queue->rx_skbuff) {
307 if (netif_msg_ifup(priv))
308 pr_err("%s: Could not allocate rx_skbuff\n",
313 for (j = 0; j < rx_queue->rx_ring_size; j++)
314 rx_queue->rx_skbuff[j] = NULL;
317 if (gfar_init_bds(ndev))
323 free_skb_resources(priv);
327 static void gfar_init_tx_rx_base(struct gfar_private *priv)
329 struct gfar __iomem *regs = priv->gfargrp[0].regs;
333 baddr = ®s->tbase0;
334 for(i = 0; i < priv->num_tx_queues; i++) {
335 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
339 baddr = ®s->rbase0;
340 for(i = 0; i < priv->num_rx_queues; i++) {
341 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
346 static void gfar_init_mac(struct net_device *ndev)
348 struct gfar_private *priv = netdev_priv(ndev);
349 struct gfar __iomem *regs = priv->gfargrp[0].regs;
354 /* write the tx/rx base registers */
355 gfar_init_tx_rx_base(priv);
357 /* Configure the coalescing support */
358 gfar_configure_coalescing(priv, 0xFF, 0xFF);
360 if (priv->rx_filer_enable)
361 rctrl |= RCTRL_FILREN;
363 if (priv->rx_csum_enable)
364 rctrl |= RCTRL_CHECKSUMMING;
366 if (priv->extended_hash) {
367 rctrl |= RCTRL_EXTHASH;
369 gfar_clear_exact_match(ndev);
374 rctrl &= ~RCTRL_PAL_MASK;
375 rctrl |= RCTRL_PADDING(priv->padding);
378 /* keep vlan related bits if it's enabled */
380 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
381 tctrl |= TCTRL_VLINS;
384 /* Init rctrl based on our settings */
385 gfar_write(®s->rctrl, rctrl);
387 if (ndev->features & NETIF_F_IP_CSUM)
388 tctrl |= TCTRL_INIT_CSUM;
390 tctrl |= TCTRL_TXSCHED_PRIO;
392 gfar_write(®s->tctrl, tctrl);
394 /* Set the extraction length and index */
395 attrs = ATTRELI_EL(priv->rx_stash_size) |
396 ATTRELI_EI(priv->rx_stash_index);
398 gfar_write(®s->attreli, attrs);
400 /* Start with defaults, and add stashing or locking
401 * depending on the approprate variables */
402 attrs = ATTR_INIT_SETTINGS;
404 if (priv->bd_stash_en)
405 attrs |= ATTR_BDSTASH;
407 if (priv->rx_stash_size != 0)
408 attrs |= ATTR_BUFSTASH;
410 gfar_write(®s->attr, attrs);
412 gfar_write(®s->fifo_tx_thr, priv->fifo_threshold);
413 gfar_write(®s->fifo_tx_starve, priv->fifo_starve);
414 gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off);
417 static const struct net_device_ops gfar_netdev_ops = {
418 .ndo_open = gfar_enet_open,
419 .ndo_start_xmit = gfar_start_xmit,
420 .ndo_stop = gfar_close,
421 .ndo_change_mtu = gfar_change_mtu,
422 .ndo_set_multicast_list = gfar_set_multi,
423 .ndo_tx_timeout = gfar_timeout,
424 .ndo_do_ioctl = gfar_ioctl,
425 .ndo_select_queue = gfar_select_queue,
426 .ndo_vlan_rx_register = gfar_vlan_rx_register,
427 .ndo_set_mac_address = eth_mac_addr,
428 .ndo_validate_addr = eth_validate_addr,
429 #ifdef CONFIG_NET_POLL_CONTROLLER
430 .ndo_poll_controller = gfar_netpoll,
434 void lock_rx_qs(struct gfar_private *priv)
438 for (i = 0; i < priv->num_rx_queues; i++)
439 spin_lock(&priv->rx_queue[i]->rxlock);
442 void lock_tx_qs(struct gfar_private *priv)
446 for (i = 0; i < priv->num_tx_queues; i++)
447 spin_lock(&priv->tx_queue[i]->txlock);
450 void unlock_rx_qs(struct gfar_private *priv)
454 for (i = 0; i < priv->num_rx_queues; i++)
455 spin_unlock(&priv->rx_queue[i]->rxlock);
458 void unlock_tx_qs(struct gfar_private *priv)
462 for (i = 0; i < priv->num_tx_queues; i++)
463 spin_unlock(&priv->tx_queue[i]->txlock);
466 /* Returns 1 if incoming frames use an FCB */
467 static inline int gfar_uses_fcb(struct gfar_private *priv)
469 return priv->vlgrp || priv->rx_csum_enable;
472 u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
474 return skb_get_queue_mapping(skb);
476 static void free_tx_pointers(struct gfar_private *priv)
480 for (i = 0; i < priv->num_tx_queues; i++)
481 kfree(priv->tx_queue[i]);
484 static void free_rx_pointers(struct gfar_private *priv)
488 for (i = 0; i < priv->num_rx_queues; i++)
489 kfree(priv->rx_queue[i]);
492 static void unmap_group_regs(struct gfar_private *priv)
496 for (i = 0; i < MAXGROUPS; i++)
497 if (priv->gfargrp[i].regs)
498 iounmap(priv->gfargrp[i].regs);
501 static void disable_napi(struct gfar_private *priv)
505 for (i = 0; i < priv->num_grps; i++)
506 napi_disable(&priv->gfargrp[i].napi);
509 static void enable_napi(struct gfar_private *priv)
513 for (i = 0; i < priv->num_grps; i++)
514 napi_enable(&priv->gfargrp[i].napi);
517 static int gfar_parse_group(struct device_node *np,
518 struct gfar_private *priv, const char *model)
523 addr = of_translate_address(np,
524 of_get_address(np, 0, &size, NULL));
525 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
527 if (!priv->gfargrp[priv->num_grps].regs)
530 priv->gfargrp[priv->num_grps].interruptTransmit =
531 irq_of_parse_and_map(np, 0);
533 /* If we aren't the FEC we have multiple interrupts */
534 if (model && strcasecmp(model, "FEC")) {
535 priv->gfargrp[priv->num_grps].interruptReceive =
536 irq_of_parse_and_map(np, 1);
537 priv->gfargrp[priv->num_grps].interruptError =
538 irq_of_parse_and_map(np,2);
539 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
540 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
541 priv->gfargrp[priv->num_grps].interruptError < 0) {
546 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
547 priv->gfargrp[priv->num_grps].priv = priv;
548 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
549 if(priv->mode == MQ_MG_MODE) {
550 queue_mask = (u32 *)of_get_property(np,
551 "fsl,rx-bit-map", NULL);
552 priv->gfargrp[priv->num_grps].rx_bit_map =
553 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
554 queue_mask = (u32 *)of_get_property(np,
555 "fsl,tx-bit-map", NULL);
556 priv->gfargrp[priv->num_grps].tx_bit_map =
557 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
559 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
560 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
567 static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
571 const void *mac_addr;
573 struct net_device *dev = NULL;
574 struct gfar_private *priv = NULL;
575 struct device_node *np = ofdev->node;
576 struct device_node *child = NULL;
578 const u32 *stash_len;
579 const u32 *stash_idx;
580 unsigned int num_tx_qs, num_rx_qs;
581 u32 *tx_queues, *rx_queues;
583 if (!np || !of_device_is_available(np))
586 /* parse the num of tx and rx queues */
587 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
588 num_tx_qs = tx_queues ? *tx_queues : 1;
590 if (num_tx_qs > MAX_TX_QS) {
591 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
592 num_tx_qs, MAX_TX_QS);
593 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
597 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
598 num_rx_qs = rx_queues ? *rx_queues : 1;
600 if (num_rx_qs > MAX_RX_QS) {
601 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
602 num_tx_qs, MAX_TX_QS);
603 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
607 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
612 priv = netdev_priv(dev);
613 priv->node = ofdev->node;
616 dev->num_tx_queues = num_tx_qs;
617 dev->real_num_tx_queues = num_tx_qs;
618 priv->num_tx_queues = num_tx_qs;
619 priv->num_rx_queues = num_rx_qs;
620 priv->num_grps = 0x0;
622 model = of_get_property(np, "model", NULL);
624 for (i = 0; i < MAXGROUPS; i++)
625 priv->gfargrp[i].regs = NULL;
627 /* Parse and initialize group specific information */
628 if (of_device_is_compatible(np, "fsl,etsec2")) {
629 priv->mode = MQ_MG_MODE;
630 for_each_child_of_node(np, child) {
631 err = gfar_parse_group(child, priv, model);
636 priv->mode = SQ_SG_MODE;
637 err = gfar_parse_group(np, priv, model);
642 for (i = 0; i < priv->num_tx_queues; i++)
643 priv->tx_queue[i] = NULL;
644 for (i = 0; i < priv->num_rx_queues; i++)
645 priv->rx_queue[i] = NULL;
647 for (i = 0; i < priv->num_tx_queues; i++) {
648 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
649 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
650 if (!priv->tx_queue[i]) {
652 goto tx_alloc_failed;
654 priv->tx_queue[i]->tx_skbuff = NULL;
655 priv->tx_queue[i]->qindex = i;
656 priv->tx_queue[i]->dev = dev;
657 spin_lock_init(&(priv->tx_queue[i]->txlock));
660 for (i = 0; i < priv->num_rx_queues; i++) {
661 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
662 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
663 if (!priv->rx_queue[i]) {
665 goto rx_alloc_failed;
667 priv->rx_queue[i]->rx_skbuff = NULL;
668 priv->rx_queue[i]->qindex = i;
669 priv->rx_queue[i]->dev = dev;
670 spin_lock_init(&(priv->rx_queue[i]->rxlock));
674 stash = of_get_property(np, "bd-stash", NULL);
677 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
678 priv->bd_stash_en = 1;
681 stash_len = of_get_property(np, "rx-stash-len", NULL);
684 priv->rx_stash_size = *stash_len;
686 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
689 priv->rx_stash_index = *stash_idx;
691 if (stash_len || stash_idx)
692 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
694 mac_addr = of_get_mac_address(np);
696 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
698 if (model && !strcasecmp(model, "TSEC"))
700 FSL_GIANFAR_DEV_HAS_GIGABIT |
701 FSL_GIANFAR_DEV_HAS_COALESCE |
702 FSL_GIANFAR_DEV_HAS_RMON |
703 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
704 if (model && !strcasecmp(model, "eTSEC"))
706 FSL_GIANFAR_DEV_HAS_GIGABIT |
707 FSL_GIANFAR_DEV_HAS_COALESCE |
708 FSL_GIANFAR_DEV_HAS_RMON |
709 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
710 FSL_GIANFAR_DEV_HAS_PADDING |
711 FSL_GIANFAR_DEV_HAS_CSUM |
712 FSL_GIANFAR_DEV_HAS_VLAN |
713 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
714 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
716 ctype = of_get_property(np, "phy-connection-type", NULL);
718 /* We only care about rgmii-id. The rest are autodetected */
719 if (ctype && !strcmp(ctype, "rgmii-id"))
720 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
722 priv->interface = PHY_INTERFACE_MODE_MII;
724 if (of_get_property(np, "fsl,magic-packet", NULL))
725 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
727 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
729 /* Find the TBI PHY. If it's not there, we don't support SGMII */
730 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
735 free_rx_pointers(priv);
737 free_tx_pointers(priv);
739 unmap_group_regs(priv);
744 /* Ioctl MII Interface */
745 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
747 struct gfar_private *priv = netdev_priv(dev);
749 if (!netif_running(dev))
755 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
758 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
760 unsigned int new_bit_map = 0x0;
761 int mask = 0x1 << (max_qs - 1), i;
762 for (i = 0; i < max_qs; i++) {
764 new_bit_map = new_bit_map + (1 << i);
769 /* Set up the ethernet device structure, private data,
770 * and anything else we need before we start */
771 static int gfar_probe(struct of_device *ofdev,
772 const struct of_device_id *match)
775 struct net_device *dev = NULL;
776 struct gfar_private *priv = NULL;
777 struct gfar __iomem *regs = NULL;
778 int err = 0, i, grp_idx = 0;
780 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
784 err = gfar_of_init(ofdev, &dev);
789 priv = netdev_priv(dev);
792 priv->node = ofdev->node;
793 SET_NETDEV_DEV(dev, &ofdev->dev);
795 spin_lock_init(&priv->bflock);
796 INIT_WORK(&priv->reset_task, gfar_reset_task);
798 dev_set_drvdata(&ofdev->dev, priv);
799 regs = priv->gfargrp[0].regs;
801 /* Stop the DMA engine now, in case it was running before */
802 /* (The firmware could have used it, and left it running). */
805 /* Reset MAC layer */
806 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
808 /* We need to delay at least 3 TX clocks */
811 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
812 gfar_write(®s->maccfg1, tempval);
814 /* Initialize MACCFG2. */
815 gfar_write(®s->maccfg2, MACCFG2_INIT_SETTINGS);
817 /* Initialize ECNTRL */
818 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
820 /* Set the dev->base_addr to the gfar reg region */
821 dev->base_addr = (unsigned long) regs;
823 SET_NETDEV_DEV(dev, &ofdev->dev);
825 /* Fill in the dev structure */
826 dev->watchdog_timeo = TX_TIMEOUT;
828 dev->netdev_ops = &gfar_netdev_ops;
829 dev->ethtool_ops = &gfar_ethtool_ops;
831 /* Register for napi ...We are registering NAPI for each grp */
832 for (i = 0; i < priv->num_grps; i++)
833 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
835 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
836 priv->rx_csum_enable = 1;
837 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
839 priv->rx_csum_enable = 0;
843 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
844 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
846 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
847 priv->extended_hash = 1;
848 priv->hash_width = 9;
850 priv->hash_regs[0] = ®s->igaddr0;
851 priv->hash_regs[1] = ®s->igaddr1;
852 priv->hash_regs[2] = ®s->igaddr2;
853 priv->hash_regs[3] = ®s->igaddr3;
854 priv->hash_regs[4] = ®s->igaddr4;
855 priv->hash_regs[5] = ®s->igaddr5;
856 priv->hash_regs[6] = ®s->igaddr6;
857 priv->hash_regs[7] = ®s->igaddr7;
858 priv->hash_regs[8] = ®s->gaddr0;
859 priv->hash_regs[9] = ®s->gaddr1;
860 priv->hash_regs[10] = ®s->gaddr2;
861 priv->hash_regs[11] = ®s->gaddr3;
862 priv->hash_regs[12] = ®s->gaddr4;
863 priv->hash_regs[13] = ®s->gaddr5;
864 priv->hash_regs[14] = ®s->gaddr6;
865 priv->hash_regs[15] = ®s->gaddr7;
868 priv->extended_hash = 0;
869 priv->hash_width = 8;
871 priv->hash_regs[0] = ®s->gaddr0;
872 priv->hash_regs[1] = ®s->gaddr1;
873 priv->hash_regs[2] = ®s->gaddr2;
874 priv->hash_regs[3] = ®s->gaddr3;
875 priv->hash_regs[4] = ®s->gaddr4;
876 priv->hash_regs[5] = ®s->gaddr5;
877 priv->hash_regs[6] = ®s->gaddr6;
878 priv->hash_regs[7] = ®s->gaddr7;
881 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
882 priv->padding = DEFAULT_PADDING;
886 if (dev->features & NETIF_F_IP_CSUM)
887 dev->hard_header_len += GMAC_FCB_LEN;
889 /* Program the isrg regs only if number of grps > 1 */
890 if (priv->num_grps > 1) {
891 baddr = ®s->isrg0;
892 for (i = 0; i < priv->num_grps; i++) {
893 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
894 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
895 gfar_write(baddr, isrg);
901 /* Need to reverse the bit maps as bit_map's MSB is q0
902 * but, for_each_bit parses from right to left, which
903 * basically reverses the queue numbers */
904 for (i = 0; i< priv->num_grps; i++) {
905 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
906 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
907 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
908 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
911 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
912 * also assign queues to groups */
913 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
914 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
915 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
916 priv->num_rx_queues) {
917 priv->gfargrp[grp_idx].num_rx_queues++;
918 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
919 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
920 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
922 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
923 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
924 priv->num_tx_queues) {
925 priv->gfargrp[grp_idx].num_tx_queues++;
926 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
927 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
928 tqueue = tqueue | (TQUEUE_EN0 >> i);
930 priv->gfargrp[grp_idx].rstat = rstat;
931 priv->gfargrp[grp_idx].tstat = tstat;
935 gfar_write(®s->rqueue, rqueue);
936 gfar_write(®s->tqueue, tqueue);
938 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
940 /* Initializing some of the rx/tx queue level parameters */
941 for (i = 0; i < priv->num_tx_queues; i++) {
942 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
943 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
944 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
945 priv->tx_queue[i]->txic = DEFAULT_TXIC;
948 for (i = 0; i < priv->num_rx_queues; i++) {
949 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
950 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
951 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
954 /* Enable most messages by default */
955 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
957 /* Carrier starts down, phylib will bring it up */
958 netif_carrier_off(dev);
960 err = register_netdev(dev);
963 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
968 device_init_wakeup(&dev->dev,
969 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
971 /* fill out IRQ number and name fields */
972 len_devname = strlen(dev->name);
973 for (i = 0; i < priv->num_grps; i++) {
974 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
976 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
977 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
979 priv->gfargrp[i].int_name_tx[
980 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
981 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
982 priv->gfargrp[i].int_name_tx)],
983 "_tx", sizeof("_tx") + 1);
985 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
987 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
989 priv->gfargrp[i].int_name_rx[
990 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
991 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
992 priv->gfargrp[i].int_name_rx)],
993 "_rx", sizeof("_rx") + 1);
995 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
997 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
999 priv->gfargrp[i].int_name_er[strlen(
1000 priv->gfargrp[i].int_name_er)] = i+48;
1001 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1002 priv->gfargrp[i].int_name_er)],
1003 "_er", sizeof("_er") + 1);
1005 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1008 /* Create all the sysfs files */
1009 gfar_init_sysfs(dev);
1011 /* Print out the device info */
1012 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
1014 /* Even more device info helps when determining which kernel */
1015 /* provided which set of benchmarks. */
1016 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
1017 for (i = 0; i < priv->num_rx_queues; i++)
1018 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
1019 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1020 for(i = 0; i < priv->num_tx_queues; i++)
1021 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
1022 dev->name, i, priv->tx_queue[i]->tx_ring_size);
1027 unmap_group_regs(priv);
1028 free_tx_pointers(priv);
1029 free_rx_pointers(priv);
1031 of_node_put(priv->phy_node);
1033 of_node_put(priv->tbi_node);
1038 static int gfar_remove(struct of_device *ofdev)
1040 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1043 of_node_put(priv->phy_node);
1045 of_node_put(priv->tbi_node);
1047 dev_set_drvdata(&ofdev->dev, NULL);
1049 unregister_netdev(priv->ndev);
1050 unmap_group_regs(priv);
1051 free_netdev(priv->ndev);
1058 static int gfar_suspend(struct device *dev)
1060 struct gfar_private *priv = dev_get_drvdata(dev);
1061 struct net_device *ndev = priv->ndev;
1062 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1063 unsigned long flags;
1066 int magic_packet = priv->wol_en &&
1067 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1069 netif_device_detach(ndev);
1071 if (netif_running(ndev)) {
1073 local_irq_save(flags);
1077 gfar_halt_nodisable(ndev);
1079 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1080 tempval = gfar_read(®s->maccfg1);
1082 tempval &= ~MACCFG1_TX_EN;
1085 tempval &= ~MACCFG1_RX_EN;
1087 gfar_write(®s->maccfg1, tempval);
1091 local_irq_restore(flags);
1096 /* Enable interrupt on Magic Packet */
1097 gfar_write(®s->imask, IMASK_MAG);
1099 /* Enable Magic Packet mode */
1100 tempval = gfar_read(®s->maccfg2);
1101 tempval |= MACCFG2_MPEN;
1102 gfar_write(®s->maccfg2, tempval);
1104 phy_stop(priv->phydev);
1111 static int gfar_resume(struct device *dev)
1113 struct gfar_private *priv = dev_get_drvdata(dev);
1114 struct net_device *ndev = priv->ndev;
1115 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1116 unsigned long flags;
1118 int magic_packet = priv->wol_en &&
1119 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1121 if (!netif_running(ndev)) {
1122 netif_device_attach(ndev);
1126 if (!magic_packet && priv->phydev)
1127 phy_start(priv->phydev);
1129 /* Disable Magic Packet mode, in case something
1132 local_irq_save(flags);
1136 tempval = gfar_read(®s->maccfg2);
1137 tempval &= ~MACCFG2_MPEN;
1138 gfar_write(®s->maccfg2, tempval);
1144 local_irq_restore(flags);
1146 netif_device_attach(ndev);
1153 static int gfar_restore(struct device *dev)
1155 struct gfar_private *priv = dev_get_drvdata(dev);
1156 struct net_device *ndev = priv->ndev;
1158 if (!netif_running(ndev))
1161 gfar_init_bds(ndev);
1162 init_registers(ndev);
1163 gfar_set_mac_address(ndev);
1164 gfar_init_mac(ndev);
1169 priv->oldduplex = -1;
1172 phy_start(priv->phydev);
1174 netif_device_attach(ndev);
1175 napi_enable(&priv->gfargrp.napi);
1180 static struct dev_pm_ops gfar_pm_ops = {
1181 .suspend = gfar_suspend,
1182 .resume = gfar_resume,
1183 .freeze = gfar_suspend,
1184 .thaw = gfar_resume,
1185 .restore = gfar_restore,
1188 #define GFAR_PM_OPS (&gfar_pm_ops)
1190 static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1192 return gfar_suspend(&ofdev->dev);
1195 static int gfar_legacy_resume(struct of_device *ofdev)
1197 return gfar_resume(&ofdev->dev);
1202 #define GFAR_PM_OPS NULL
1203 #define gfar_legacy_suspend NULL
1204 #define gfar_legacy_resume NULL
1208 /* Reads the controller's registers to determine what interface
1209 * connects it to the PHY.
1211 static phy_interface_t gfar_get_interface(struct net_device *dev)
1213 struct gfar_private *priv = netdev_priv(dev);
1214 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1217 ecntrl = gfar_read(®s->ecntrl);
1219 if (ecntrl & ECNTRL_SGMII_MODE)
1220 return PHY_INTERFACE_MODE_SGMII;
1222 if (ecntrl & ECNTRL_TBI_MODE) {
1223 if (ecntrl & ECNTRL_REDUCED_MODE)
1224 return PHY_INTERFACE_MODE_RTBI;
1226 return PHY_INTERFACE_MODE_TBI;
1229 if (ecntrl & ECNTRL_REDUCED_MODE) {
1230 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1231 return PHY_INTERFACE_MODE_RMII;
1233 phy_interface_t interface = priv->interface;
1236 * This isn't autodetected right now, so it must
1237 * be set by the device tree or platform code.
1239 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1240 return PHY_INTERFACE_MODE_RGMII_ID;
1242 return PHY_INTERFACE_MODE_RGMII;
1246 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1247 return PHY_INTERFACE_MODE_GMII;
1249 return PHY_INTERFACE_MODE_MII;
1253 /* Initializes driver's PHY state, and attaches to the PHY.
1254 * Returns 0 on success.
1256 static int init_phy(struct net_device *dev)
1258 struct gfar_private *priv = netdev_priv(dev);
1259 uint gigabit_support =
1260 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1261 SUPPORTED_1000baseT_Full : 0;
1262 phy_interface_t interface;
1266 priv->oldduplex = -1;
1268 interface = gfar_get_interface(dev);
1270 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1273 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1275 if (!priv->phydev) {
1276 dev_err(&dev->dev, "could not attach to PHY\n");
1280 if (interface == PHY_INTERFACE_MODE_SGMII)
1281 gfar_configure_serdes(dev);
1283 /* Remove any features not supported by the controller */
1284 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1285 priv->phydev->advertising = priv->phydev->supported;
1291 * Initialize TBI PHY interface for communicating with the
1292 * SERDES lynx PHY on the chip. We communicate with this PHY
1293 * through the MDIO bus on each controller, treating it as a
1294 * "normal" PHY at the address found in the TBIPA register. We assume
1295 * that the TBIPA register is valid. Either the MDIO bus code will set
1296 * it to a value that doesn't conflict with other PHYs on the bus, or the
1297 * value doesn't matter, as there are no other PHYs on the bus.
1299 static void gfar_configure_serdes(struct net_device *dev)
1301 struct gfar_private *priv = netdev_priv(dev);
1302 struct phy_device *tbiphy;
1304 if (!priv->tbi_node) {
1305 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1306 "device tree specify a tbi-handle\n");
1310 tbiphy = of_phy_find_device(priv->tbi_node);
1312 dev_err(&dev->dev, "error: Could not get TBI device\n");
1317 * If the link is already up, we must already be ok, and don't need to
1318 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1319 * everything for us? Resetting it takes the link down and requires
1320 * several seconds for it to come back.
1322 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1325 /* Single clk mode, mii mode off(for serdes communication) */
1326 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1328 phy_write(tbiphy, MII_ADVERTISE,
1329 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1330 ADVERTISE_1000XPSE_ASYM);
1332 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
1333 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1336 static void init_registers(struct net_device *dev)
1338 struct gfar_private *priv = netdev_priv(dev);
1339 struct gfar __iomem *regs = NULL;
1342 for (i = 0; i < priv->num_grps; i++) {
1343 regs = priv->gfargrp[i].regs;
1345 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
1347 /* Initialize IMASK */
1348 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1351 regs = priv->gfargrp[0].regs;
1352 /* Init hash registers to zero */
1353 gfar_write(®s->igaddr0, 0);
1354 gfar_write(®s->igaddr1, 0);
1355 gfar_write(®s->igaddr2, 0);
1356 gfar_write(®s->igaddr3, 0);
1357 gfar_write(®s->igaddr4, 0);
1358 gfar_write(®s->igaddr5, 0);
1359 gfar_write(®s->igaddr6, 0);
1360 gfar_write(®s->igaddr7, 0);
1362 gfar_write(®s->gaddr0, 0);
1363 gfar_write(®s->gaddr1, 0);
1364 gfar_write(®s->gaddr2, 0);
1365 gfar_write(®s->gaddr3, 0);
1366 gfar_write(®s->gaddr4, 0);
1367 gfar_write(®s->gaddr5, 0);
1368 gfar_write(®s->gaddr6, 0);
1369 gfar_write(®s->gaddr7, 0);
1371 /* Zero out the rmon mib registers if it has them */
1372 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1373 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1375 /* Mask off the CAM interrupts */
1376 gfar_write(®s->rmon.cam1, 0xffffffff);
1377 gfar_write(®s->rmon.cam2, 0xffffffff);
1380 /* Initialize the max receive buffer length */
1381 gfar_write(®s->mrblr, priv->rx_buffer_size);
1383 /* Initialize the Minimum Frame Length Register */
1384 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
1388 /* Halt the receive and transmit queues */
1389 static void gfar_halt_nodisable(struct net_device *dev)
1391 struct gfar_private *priv = netdev_priv(dev);
1392 struct gfar __iomem *regs = NULL;
1396 for (i = 0; i < priv->num_grps; i++) {
1397 regs = priv->gfargrp[i].regs;
1398 /* Mask all interrupts */
1399 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1401 /* Clear all interrupts */
1402 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
1405 regs = priv->gfargrp[0].regs;
1406 /* Stop the DMA, and wait for it to stop */
1407 tempval = gfar_read(®s->dmactrl);
1408 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1409 != (DMACTRL_GRS | DMACTRL_GTS)) {
1410 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1411 gfar_write(®s->dmactrl, tempval);
1413 while (!(gfar_read(®s->ievent) &
1414 (IEVENT_GRSC | IEVENT_GTSC)))
1419 /* Halt the receive and transmit queues */
1420 void gfar_halt(struct net_device *dev)
1422 struct gfar_private *priv = netdev_priv(dev);
1423 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1426 gfar_halt_nodisable(dev);
1428 /* Disable Rx and Tx */
1429 tempval = gfar_read(®s->maccfg1);
1430 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1431 gfar_write(®s->maccfg1, tempval);
1434 static void free_grp_irqs(struct gfar_priv_grp *grp)
1436 free_irq(grp->interruptError, grp);
1437 free_irq(grp->interruptTransmit, grp);
1438 free_irq(grp->interruptReceive, grp);
1441 void stop_gfar(struct net_device *dev)
1443 struct gfar_private *priv = netdev_priv(dev);
1444 unsigned long flags;
1447 phy_stop(priv->phydev);
1451 local_irq_save(flags);
1459 local_irq_restore(flags);
1462 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1463 for (i = 0; i < priv->num_grps; i++)
1464 free_grp_irqs(&priv->gfargrp[i]);
1466 for (i = 0; i < priv->num_grps; i++)
1467 free_irq(priv->gfargrp[i].interruptTransmit,
1471 free_skb_resources(priv);
1474 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1476 struct txbd8 *txbdp;
1477 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1480 txbdp = tx_queue->tx_bd_base;
1482 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1483 if (!tx_queue->tx_skbuff[i])
1486 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1487 txbdp->length, DMA_TO_DEVICE);
1489 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1492 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1493 txbdp->length, DMA_TO_DEVICE);
1496 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1497 tx_queue->tx_skbuff[i] = NULL;
1499 kfree(tx_queue->tx_skbuff);
1502 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1504 struct rxbd8 *rxbdp;
1505 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1508 rxbdp = rx_queue->rx_bd_base;
1510 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1511 if (rx_queue->rx_skbuff[i]) {
1512 dma_unmap_single(&priv->ofdev->dev,
1513 rxbdp->bufPtr, priv->rx_buffer_size,
1515 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1516 rx_queue->rx_skbuff[i] = NULL;
1522 kfree(rx_queue->rx_skbuff);
1525 /* If there are any tx skbs or rx skbs still around, free them.
1526 * Then free tx_skbuff and rx_skbuff */
1527 static void free_skb_resources(struct gfar_private *priv)
1529 struct gfar_priv_tx_q *tx_queue = NULL;
1530 struct gfar_priv_rx_q *rx_queue = NULL;
1533 /* Go through all the buffer descriptors and free their data buffers */
1534 for (i = 0; i < priv->num_tx_queues; i++) {
1535 tx_queue = priv->tx_queue[i];
1536 if(!tx_queue->tx_skbuff)
1537 free_skb_tx_queue(tx_queue);
1540 for (i = 0; i < priv->num_rx_queues; i++) {
1541 rx_queue = priv->rx_queue[i];
1542 if(!rx_queue->rx_skbuff)
1543 free_skb_rx_queue(rx_queue);
1546 dma_free_coherent(&priv->ofdev->dev,
1547 sizeof(struct txbd8) * priv->total_tx_ring_size +
1548 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1549 priv->tx_queue[0]->tx_bd_base,
1550 priv->tx_queue[0]->tx_bd_dma_base);
1553 void gfar_start(struct net_device *dev)
1555 struct gfar_private *priv = netdev_priv(dev);
1556 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1560 /* Enable Rx and Tx in MACCFG1 */
1561 tempval = gfar_read(®s->maccfg1);
1562 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1563 gfar_write(®s->maccfg1, tempval);
1565 /* Initialize DMACTRL to have WWR and WOP */
1566 tempval = gfar_read(®s->dmactrl);
1567 tempval |= DMACTRL_INIT_SETTINGS;
1568 gfar_write(®s->dmactrl, tempval);
1570 /* Make sure we aren't stopped */
1571 tempval = gfar_read(®s->dmactrl);
1572 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1573 gfar_write(®s->dmactrl, tempval);
1575 for (i = 0; i < priv->num_grps; i++) {
1576 regs = priv->gfargrp[i].regs;
1577 /* Clear THLT/RHLT, so that the DMA starts polling now */
1578 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1579 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1580 /* Unmask the interrupts we look for */
1581 gfar_write(®s->imask, IMASK_DEFAULT);
1584 dev->trans_start = jiffies;
1587 void gfar_configure_coalescing(struct gfar_private *priv,
1588 unsigned int tx_mask, unsigned int rx_mask)
1590 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1594 /* Backward compatible case ---- even if we enable
1595 * multiple queues, there's only single reg to program
1597 gfar_write(®s->txic, 0);
1598 if(likely(priv->tx_queue[0]->txcoalescing))
1599 gfar_write(®s->txic, priv->tx_queue[0]->txic);
1601 gfar_write(®s->rxic, 0);
1602 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1603 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
1605 if (priv->mode == MQ_MG_MODE) {
1606 baddr = ®s->txic0;
1607 for_each_bit (i, &tx_mask, priv->num_tx_queues) {
1608 if (likely(priv->tx_queue[i]->txcoalescing)) {
1609 gfar_write(baddr + i, 0);
1610 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1614 baddr = ®s->rxic0;
1615 for_each_bit (i, &rx_mask, priv->num_rx_queues) {
1616 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1617 gfar_write(baddr + i, 0);
1618 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1624 static int register_grp_irqs(struct gfar_priv_grp *grp)
1626 struct gfar_private *priv = grp->priv;
1627 struct net_device *dev = priv->ndev;
1630 /* If the device has multiple interrupts, register for
1631 * them. Otherwise, only register for the one */
1632 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1633 /* Install our interrupt handlers for Error,
1634 * Transmit, and Receive */
1635 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1636 grp->int_name_er,grp)) < 0) {
1637 if (netif_msg_intr(priv))
1638 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1639 dev->name, grp->interruptError);
1644 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1645 0, grp->int_name_tx, grp)) < 0) {
1646 if (netif_msg_intr(priv))
1647 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1648 dev->name, grp->interruptTransmit);
1652 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1653 grp->int_name_rx, grp)) < 0) {
1654 if (netif_msg_intr(priv))
1655 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1656 dev->name, grp->interruptReceive);
1660 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1661 grp->int_name_tx, grp)) < 0) {
1662 if (netif_msg_intr(priv))
1663 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1664 dev->name, grp->interruptTransmit);
1672 free_irq(grp->interruptTransmit, grp);
1674 free_irq(grp->interruptError, grp);
1680 /* Bring the controller up and running */
1681 int startup_gfar(struct net_device *ndev)
1683 struct gfar_private *priv = netdev_priv(ndev);
1684 struct gfar __iomem *regs = NULL;
1687 for (i = 0; i < priv->num_grps; i++) {
1688 regs= priv->gfargrp[i].regs;
1689 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1692 regs= priv->gfargrp[0].regs;
1693 err = gfar_alloc_skb_resources(ndev);
1697 gfar_init_mac(ndev);
1699 for (i = 0; i < priv->num_grps; i++) {
1700 err = register_grp_irqs(&priv->gfargrp[i]);
1702 for (j = 0; j < i; j++)
1703 free_grp_irqs(&priv->gfargrp[j]);
1708 /* Start the controller */
1711 phy_start(priv->phydev);
1713 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1718 free_skb_resources(priv);
1722 /* Called when something needs to use the ethernet device */
1723 /* Returns 0 for success. */
1724 static int gfar_enet_open(struct net_device *dev)
1726 struct gfar_private *priv = netdev_priv(dev);
1731 skb_queue_head_init(&priv->rx_recycle);
1733 /* Initialize a bunch of registers */
1734 init_registers(dev);
1736 gfar_set_mac_address(dev);
1738 err = init_phy(dev);
1745 err = startup_gfar(dev);
1751 netif_tx_start_all_queues(dev);
1753 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1758 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1760 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1762 memset(fcb, 0, GMAC_FCB_LEN);
1767 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1771 /* If we're here, it's a IP packet with a TCP or UDP
1772 * payload. We set it to checksum, using a pseudo-header
1775 flags = TXFCB_DEFAULT;
1777 /* Tell the controller what the protocol is */
1778 /* And provide the already calculated phcs */
1779 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1781 fcb->phcs = udp_hdr(skb)->check;
1783 fcb->phcs = tcp_hdr(skb)->check;
1785 /* l3os is the distance between the start of the
1786 * frame (skb->data) and the start of the IP hdr.
1787 * l4os is the distance between the start of the
1788 * l3 hdr and the l4 hdr */
1789 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1790 fcb->l4os = skb_network_header_len(skb);
1795 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1797 fcb->flags |= TXFCB_VLN;
1798 fcb->vlctl = vlan_tx_tag_get(skb);
1801 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1802 struct txbd8 *base, int ring_size)
1804 struct txbd8 *new_bd = bdp + stride;
1806 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1809 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1812 return skip_txbd(bdp, 1, base, ring_size);
1815 /* This is called by the kernel when a frame is ready for transmission. */
1816 /* It is pointed to by the dev->hard_start_xmit function pointer */
1817 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1819 struct gfar_private *priv = netdev_priv(dev);
1820 struct gfar_priv_tx_q *tx_queue = NULL;
1821 struct netdev_queue *txq;
1822 struct gfar __iomem *regs = NULL;
1823 struct txfcb *fcb = NULL;
1824 struct txbd8 *txbdp, *txbdp_start, *base;
1828 unsigned long flags;
1829 unsigned int nr_frags, length;
1832 rq = skb->queue_mapping;
1833 tx_queue = priv->tx_queue[rq];
1834 txq = netdev_get_tx_queue(dev, rq);
1835 base = tx_queue->tx_bd_base;
1836 regs = tx_queue->grp->regs;
1838 /* make space for additional header when fcb is needed */
1839 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1840 (priv->vlgrp && vlan_tx_tag_present(skb))) &&
1841 (skb_headroom(skb) < GMAC_FCB_LEN)) {
1842 struct sk_buff *skb_new;
1844 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
1846 dev->stats.tx_errors++;
1848 return NETDEV_TX_OK;
1854 /* total number of fragments in the SKB */
1855 nr_frags = skb_shinfo(skb)->nr_frags;
1857 spin_lock_irqsave(&tx_queue->txlock, flags);
1859 /* check if there is space to queue this packet */
1860 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1861 /* no space, stop the queue */
1862 netif_tx_stop_queue(txq);
1863 dev->stats.tx_fifo_errors++;
1864 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1865 return NETDEV_TX_BUSY;
1868 /* Update transmit stats */
1869 dev->stats.tx_bytes += skb->len;
1871 txbdp = txbdp_start = tx_queue->cur_tx;
1873 if (nr_frags == 0) {
1874 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1876 /* Place the fragment addresses and lengths into the TxBDs */
1877 for (i = 0; i < nr_frags; i++) {
1878 /* Point at the next BD, wrapping as needed */
1879 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1881 length = skb_shinfo(skb)->frags[i].size;
1883 lstatus = txbdp->lstatus | length |
1884 BD_LFLAG(TXBD_READY);
1886 /* Handle the last BD specially */
1887 if (i == nr_frags - 1)
1888 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1890 bufaddr = dma_map_page(&priv->ofdev->dev,
1891 skb_shinfo(skb)->frags[i].page,
1892 skb_shinfo(skb)->frags[i].page_offset,
1896 /* set the TxBD length and buffer pointer */
1897 txbdp->bufPtr = bufaddr;
1898 txbdp->lstatus = lstatus;
1901 lstatus = txbdp_start->lstatus;
1904 /* Set up checksumming */
1905 if (CHECKSUM_PARTIAL == skb->ip_summed) {
1906 fcb = gfar_add_fcb(skb);
1907 lstatus |= BD_LFLAG(TXBD_TOE);
1908 gfar_tx_checksum(skb, fcb);
1911 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
1912 if (unlikely(NULL == fcb)) {
1913 fcb = gfar_add_fcb(skb);
1914 lstatus |= BD_LFLAG(TXBD_TOE);
1917 gfar_tx_vlan(skb, fcb);
1920 /* setup the TxBD length and buffer pointer for the first BD */
1921 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1922 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1923 skb_headlen(skb), DMA_TO_DEVICE);
1925 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1928 * The powerpc-specific eieio() is used, as wmb() has too strong
1929 * semantics (it requires synchronization between cacheable and
1930 * uncacheable mappings, which eieio doesn't provide and which we
1931 * don't need), thus requiring a more expensive sync instruction. At
1932 * some point, the set of architecture-independent barrier functions
1933 * should be expanded to include weaker barriers.
1937 txbdp_start->lstatus = lstatus;
1939 /* Update the current skb pointer to the next entry we will use
1940 * (wrapping if necessary) */
1941 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1942 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1944 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1946 /* reduce TxBD free count */
1947 tx_queue->num_txbdfree -= (nr_frags + 1);
1949 dev->trans_start = jiffies;
1951 /* If the next BD still needs to be cleaned up, then the bds
1952 are full. We need to tell the kernel to stop sending us stuff. */
1953 if (!tx_queue->num_txbdfree) {
1954 netif_tx_stop_queue(txq);
1956 dev->stats.tx_fifo_errors++;
1959 /* Tell the DMA to go go go */
1960 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1963 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1965 return NETDEV_TX_OK;
1968 /* Stops the kernel queue, and halts the controller */
1969 static int gfar_close(struct net_device *dev)
1971 struct gfar_private *priv = netdev_priv(dev);
1975 skb_queue_purge(&priv->rx_recycle);
1976 cancel_work_sync(&priv->reset_task);
1979 /* Disconnect from the PHY */
1980 phy_disconnect(priv->phydev);
1981 priv->phydev = NULL;
1983 netif_tx_stop_all_queues(dev);
1988 /* Changes the mac address if the controller is not running. */
1989 static int gfar_set_mac_address(struct net_device *dev)
1991 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1997 /* Enables and disables VLAN insertion/extraction */
1998 static void gfar_vlan_rx_register(struct net_device *dev,
1999 struct vlan_group *grp)
2001 struct gfar_private *priv = netdev_priv(dev);
2002 struct gfar __iomem *regs = NULL;
2003 unsigned long flags;
2006 regs = priv->gfargrp[0].regs;
2007 local_irq_save(flags);
2013 /* Enable VLAN tag insertion */
2014 tempval = gfar_read(®s->tctrl);
2015 tempval |= TCTRL_VLINS;
2017 gfar_write(®s->tctrl, tempval);
2019 /* Enable VLAN tag extraction */
2020 tempval = gfar_read(®s->rctrl);
2021 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2022 gfar_write(®s->rctrl, tempval);
2024 /* Disable VLAN tag insertion */
2025 tempval = gfar_read(®s->tctrl);
2026 tempval &= ~TCTRL_VLINS;
2027 gfar_write(®s->tctrl, tempval);
2029 /* Disable VLAN tag extraction */
2030 tempval = gfar_read(®s->rctrl);
2031 tempval &= ~RCTRL_VLEX;
2032 /* If parse is no longer required, then disable parser */
2033 if (tempval & RCTRL_REQ_PARSER)
2034 tempval |= RCTRL_PRSDEP_INIT;
2036 tempval &= ~RCTRL_PRSDEP_INIT;
2037 gfar_write(®s->rctrl, tempval);
2040 gfar_change_mtu(dev, dev->mtu);
2043 local_irq_restore(flags);
2046 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2048 int tempsize, tempval;
2049 struct gfar_private *priv = netdev_priv(dev);
2050 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2051 int oldsize = priv->rx_buffer_size;
2052 int frame_size = new_mtu + ETH_HLEN;
2055 frame_size += VLAN_HLEN;
2057 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2058 if (netif_msg_drv(priv))
2059 printk(KERN_ERR "%s: Invalid MTU setting\n",
2064 if (gfar_uses_fcb(priv))
2065 frame_size += GMAC_FCB_LEN;
2067 frame_size += priv->padding;
2070 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2071 INCREMENTAL_BUFFER_SIZE;
2073 /* Only stop and start the controller if it isn't already
2074 * stopped, and we changed something */
2075 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2078 priv->rx_buffer_size = tempsize;
2082 gfar_write(®s->mrblr, priv->rx_buffer_size);
2083 gfar_write(®s->maxfrm, priv->rx_buffer_size);
2085 /* If the mtu is larger than the max size for standard
2086 * ethernet frames (ie, a jumbo frame), then set maccfg2
2087 * to allow huge frames, and to check the length */
2088 tempval = gfar_read(®s->maccfg2);
2090 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
2091 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2093 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2095 gfar_write(®s->maccfg2, tempval);
2097 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2103 /* gfar_reset_task gets scheduled when a packet has not been
2104 * transmitted after a set amount of time.
2105 * For now, assume that clearing out all the structures, and
2106 * starting over will fix the problem.
2108 static void gfar_reset_task(struct work_struct *work)
2110 struct gfar_private *priv = container_of(work, struct gfar_private,
2112 struct net_device *dev = priv->ndev;
2114 if (dev->flags & IFF_UP) {
2115 netif_tx_stop_all_queues(dev);
2118 netif_tx_start_all_queues(dev);
2121 netif_tx_schedule_all(dev);
2124 static void gfar_timeout(struct net_device *dev)
2126 struct gfar_private *priv = netdev_priv(dev);
2128 dev->stats.tx_errors++;
2129 schedule_work(&priv->reset_task);
2132 /* Interrupt Handler for Transmit complete */
2133 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2135 struct net_device *dev = tx_queue->dev;
2136 struct gfar_private *priv = netdev_priv(dev);
2137 struct gfar_priv_rx_q *rx_queue = NULL;
2139 struct txbd8 *lbdp = NULL;
2140 struct txbd8 *base = tx_queue->tx_bd_base;
2141 struct sk_buff *skb;
2143 int tx_ring_size = tx_queue->tx_ring_size;
2149 rx_queue = priv->rx_queue[tx_queue->qindex];
2150 bdp = tx_queue->dirty_tx;
2151 skb_dirtytx = tx_queue->skb_dirtytx;
2153 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2154 frags = skb_shinfo(skb)->nr_frags;
2155 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
2157 lstatus = lbdp->lstatus;
2159 /* Only clean completed frames */
2160 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2161 (lstatus & BD_LENGTH_MASK))
2164 dma_unmap_single(&priv->ofdev->dev,
2169 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2170 bdp = next_txbd(bdp, base, tx_ring_size);
2172 for (i = 0; i < frags; i++) {
2173 dma_unmap_page(&priv->ofdev->dev,
2177 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2178 bdp = next_txbd(bdp, base, tx_ring_size);
2182 * If there's room in the queue (limit it to rx_buffer_size)
2183 * we add this skb back into the pool, if it's the right size
2185 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2186 skb_recycle_check(skb, priv->rx_buffer_size +
2188 __skb_queue_head(&priv->rx_recycle, skb);
2190 dev_kfree_skb_any(skb);
2192 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2194 skb_dirtytx = (skb_dirtytx + 1) &
2195 TX_RING_MOD_MASK(tx_ring_size);
2198 tx_queue->num_txbdfree += frags + 1;
2201 /* If we freed a buffer, we can restart transmission, if necessary */
2202 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2203 netif_wake_subqueue(dev, tx_queue->qindex);
2205 /* Update dirty indicators */
2206 tx_queue->skb_dirtytx = skb_dirtytx;
2207 tx_queue->dirty_tx = bdp;
2209 dev->stats.tx_packets += howmany;
2214 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2216 unsigned long flags;
2218 spin_lock_irqsave(&gfargrp->grplock, flags);
2219 if (napi_schedule_prep(&gfargrp->napi)) {
2220 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2221 __napi_schedule(&gfargrp->napi);
2224 * Clear IEVENT, so interrupts aren't called again
2225 * because of the packets that have already arrived.
2227 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2229 spin_unlock_irqrestore(&gfargrp->grplock, flags);
2233 /* Interrupt Handler for Transmit complete */
2234 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2236 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2240 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2241 struct sk_buff *skb)
2243 struct net_device *dev = rx_queue->dev;
2244 struct gfar_private *priv = netdev_priv(dev);
2247 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2248 priv->rx_buffer_size, DMA_FROM_DEVICE);
2249 gfar_init_rxbdp(rx_queue, bdp, buf);
2253 struct sk_buff * gfar_new_skb(struct net_device *dev)
2255 unsigned int alignamount;
2256 struct gfar_private *priv = netdev_priv(dev);
2257 struct sk_buff *skb = NULL;
2259 skb = __skb_dequeue(&priv->rx_recycle);
2261 skb = netdev_alloc_skb(dev,
2262 priv->rx_buffer_size + RXBUF_ALIGNMENT);
2267 alignamount = RXBUF_ALIGNMENT -
2268 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
2270 /* We need the data buffer to be aligned properly. We will reserve
2271 * as many bytes as needed to align the data properly
2273 skb_reserve(skb, alignamount);
2278 static inline void count_errors(unsigned short status, struct net_device *dev)
2280 struct gfar_private *priv = netdev_priv(dev);
2281 struct net_device_stats *stats = &dev->stats;
2282 struct gfar_extra_stats *estats = &priv->extra_stats;
2284 /* If the packet was truncated, none of the other errors
2286 if (status & RXBD_TRUNCATED) {
2287 stats->rx_length_errors++;
2293 /* Count the errors, if there were any */
2294 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2295 stats->rx_length_errors++;
2297 if (status & RXBD_LARGE)
2302 if (status & RXBD_NONOCTET) {
2303 stats->rx_frame_errors++;
2304 estats->rx_nonoctet++;
2306 if (status & RXBD_CRCERR) {
2307 estats->rx_crcerr++;
2308 stats->rx_crc_errors++;
2310 if (status & RXBD_OVERRUN) {
2311 estats->rx_overrun++;
2312 stats->rx_crc_errors++;
2316 irqreturn_t gfar_receive(int irq, void *grp_id)
2318 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2322 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2324 /* If valid headers were found, and valid sums
2325 * were verified, then we tell the kernel that no
2326 * checksumming is necessary. Otherwise, it is */
2327 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2328 skb->ip_summed = CHECKSUM_UNNECESSARY;
2330 skb->ip_summed = CHECKSUM_NONE;
2334 /* gfar_process_frame() -- handle one incoming packet if skb
2336 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2339 struct gfar_private *priv = netdev_priv(dev);
2340 struct rxfcb *fcb = NULL;
2344 /* fcb is at the beginning if exists */
2345 fcb = (struct rxfcb *)skb->data;
2347 /* Remove the FCB from the skb */
2348 skb_set_queue_mapping(skb, fcb->rq);
2349 /* Remove the padded bytes, if there are any */
2351 skb_pull(skb, amount_pull);
2353 if (priv->rx_csum_enable)
2354 gfar_rx_checksum(skb, fcb);
2356 /* Tell the skb what kind of packet this is */
2357 skb->protocol = eth_type_trans(skb, dev);
2359 /* Send the packet up the stack */
2360 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2361 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2363 ret = netif_receive_skb(skb);
2365 if (NET_RX_DROP == ret)
2366 priv->extra_stats.kernel_dropped++;
2371 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2372 * until the budget/quota has been reached. Returns the number
2375 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2377 struct net_device *dev = rx_queue->dev;
2378 struct rxbd8 *bdp, *base;
2379 struct sk_buff *skb;
2383 struct gfar_private *priv = netdev_priv(dev);
2385 /* Get the first full descriptor */
2386 bdp = rx_queue->cur_rx;
2387 base = rx_queue->rx_bd_base;
2389 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
2392 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2393 struct sk_buff *newskb;
2396 /* Add another skb for the future */
2397 newskb = gfar_new_skb(dev);
2399 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2401 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2402 priv->rx_buffer_size, DMA_FROM_DEVICE);
2404 /* We drop the frame if we failed to allocate a new buffer */
2405 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2406 bdp->status & RXBD_ERR)) {
2407 count_errors(bdp->status, dev);
2409 if (unlikely(!newskb))
2413 * We need to reset ->data to what it
2414 * was before gfar_new_skb() re-aligned
2415 * it to an RXBUF_ALIGNMENT boundary
2416 * before we put the skb back on the
2419 skb->data = skb->head + NET_SKB_PAD;
2420 __skb_queue_head(&priv->rx_recycle, skb);
2423 /* Increment the number of packets */
2424 dev->stats.rx_packets++;
2428 pkt_len = bdp->length - ETH_FCS_LEN;
2429 /* Remove the FCS from the packet length */
2430 skb_put(skb, pkt_len);
2431 dev->stats.rx_bytes += pkt_len;
2433 if (in_irq() || irqs_disabled())
2434 printk("Interrupt problem!\n");
2435 gfar_process_frame(dev, skb, amount_pull);
2438 if (netif_msg_rx_err(priv))
2440 "%s: Missing skb!\n", dev->name);
2441 dev->stats.rx_dropped++;
2442 priv->extra_stats.rx_skbmissing++;
2447 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2449 /* Setup the new bdp */
2450 gfar_new_rxbdp(rx_queue, bdp, newskb);
2452 /* Update to the next pointer */
2453 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2455 /* update to point at the next skb */
2456 rx_queue->skb_currx =
2457 (rx_queue->skb_currx + 1) &
2458 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2461 /* Update the current rxbd pointer to be the next one */
2462 rx_queue->cur_rx = bdp;
2467 static int gfar_poll(struct napi_struct *napi, int budget)
2469 struct gfar_priv_grp *gfargrp = container_of(napi,
2470 struct gfar_priv_grp, napi);
2471 struct gfar_private *priv = gfargrp->priv;
2472 struct gfar __iomem *regs = gfargrp->regs;
2473 struct gfar_priv_tx_q *tx_queue = NULL;
2474 struct gfar_priv_rx_q *rx_queue = NULL;
2475 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2476 int tx_cleaned = 0, i, left_over_budget = budget, serviced_queues = 0;
2478 unsigned long flags;
2480 num_queues = gfargrp->num_rx_queues;
2481 budget_per_queue = budget/num_queues;
2483 /* Clear IEVENT, so interrupts aren't called again
2484 * because of the packets that have already arrived */
2485 gfar_write(®s->ievent, IEVENT_RTX_MASK);
2487 while (num_queues && left_over_budget) {
2489 budget_per_queue = left_over_budget/num_queues;
2490 left_over_budget = 0;
2492 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2493 if (test_bit(i, &serviced_queues))
2495 rx_queue = priv->rx_queue[i];
2496 tx_queue = priv->tx_queue[rx_queue->qindex];
2498 /* If we fail to get the lock,
2499 * don't bother with the TX BDs */
2500 if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
2501 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2502 spin_unlock_irqrestore(&tx_queue->txlock,
2506 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2508 rx_cleaned += rx_cleaned_per_queue;
2509 if(rx_cleaned_per_queue < budget_per_queue) {
2510 left_over_budget = left_over_budget +
2511 (budget_per_queue - rx_cleaned_per_queue);
2512 set_bit(i, &serviced_queues);
2521 if (rx_cleaned < budget) {
2522 napi_complete(napi);
2524 /* Clear the halt bit in RSTAT */
2525 gfar_write(®s->rstat, gfargrp->rstat);
2527 gfar_write(®s->imask, IMASK_DEFAULT);
2529 /* If we are coalescing interrupts, update the timer */
2530 /* Otherwise, clear it */
2531 gfar_configure_coalescing(priv,
2532 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
2538 #ifdef CONFIG_NET_POLL_CONTROLLER
2540 * Polling 'interrupt' - used by things like netconsole to send skbs
2541 * without having to re-enable interrupts. It's not called while
2542 * the interrupt routine is executing.
2544 static void gfar_netpoll(struct net_device *dev)
2546 struct gfar_private *priv = netdev_priv(dev);
2549 /* If the device has multiple interrupts, run tx/rx */
2550 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2551 for (i = 0; i < priv->num_grps; i++) {
2552 disable_irq(priv->gfargrp[i].interruptTransmit);
2553 disable_irq(priv->gfargrp[i].interruptReceive);
2554 disable_irq(priv->gfargrp[i].interruptError);
2555 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2557 enable_irq(priv->gfargrp[i].interruptError);
2558 enable_irq(priv->gfargrp[i].interruptReceive);
2559 enable_irq(priv->gfargrp[i].interruptTransmit);
2562 for (i = 0; i < priv->num_grps; i++) {
2563 disable_irq(priv->gfargrp[i].interruptTransmit);
2564 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2566 enable_irq(priv->gfargrp[i].interruptTransmit);
2571 /* The interrupt handler for devices with one interrupt */
2572 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2574 struct gfar_priv_grp *gfargrp = grp_id;
2576 /* Save ievent for future reference */
2577 u32 events = gfar_read(&gfargrp->regs->ievent);
2579 /* Check for reception */
2580 if (events & IEVENT_RX_MASK)
2581 gfar_receive(irq, grp_id);
2583 /* Check for transmit completion */
2584 if (events & IEVENT_TX_MASK)
2585 gfar_transmit(irq, grp_id);
2587 /* Check for errors */
2588 if (events & IEVENT_ERR_MASK)
2589 gfar_error(irq, grp_id);
2594 /* Called every time the controller might need to be made
2595 * aware of new link state. The PHY code conveys this
2596 * information through variables in the phydev structure, and this
2597 * function converts those variables into the appropriate
2598 * register values, and can bring down the device if needed.
2600 static void adjust_link(struct net_device *dev)
2602 struct gfar_private *priv = netdev_priv(dev);
2603 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2604 unsigned long flags;
2605 struct phy_device *phydev = priv->phydev;
2608 local_irq_save(flags);
2612 u32 tempval = gfar_read(®s->maccfg2);
2613 u32 ecntrl = gfar_read(®s->ecntrl);
2615 /* Now we make sure that we can be in full duplex mode.
2616 * If not, we operate in half-duplex mode. */
2617 if (phydev->duplex != priv->oldduplex) {
2619 if (!(phydev->duplex))
2620 tempval &= ~(MACCFG2_FULL_DUPLEX);
2622 tempval |= MACCFG2_FULL_DUPLEX;
2624 priv->oldduplex = phydev->duplex;
2627 if (phydev->speed != priv->oldspeed) {
2629 switch (phydev->speed) {
2632 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2634 ecntrl &= ~(ECNTRL_R100);
2639 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2641 /* Reduced mode distinguishes
2642 * between 10 and 100 */
2643 if (phydev->speed == SPEED_100)
2644 ecntrl |= ECNTRL_R100;
2646 ecntrl &= ~(ECNTRL_R100);
2649 if (netif_msg_link(priv))
2651 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2652 dev->name, phydev->speed);
2656 priv->oldspeed = phydev->speed;
2659 gfar_write(®s->maccfg2, tempval);
2660 gfar_write(®s->ecntrl, ecntrl);
2662 if (!priv->oldlink) {
2666 } else if (priv->oldlink) {
2670 priv->oldduplex = -1;
2673 if (new_state && netif_msg_link(priv))
2674 phy_print_status(phydev);
2676 local_irq_restore(flags);
2679 /* Update the hash table based on the current list of multicast
2680 * addresses we subscribe to. Also, change the promiscuity of
2681 * the device based on the flags (this function is called
2682 * whenever dev->flags is changed */
2683 static void gfar_set_multi(struct net_device *dev)
2685 struct dev_mc_list *mc_ptr;
2686 struct gfar_private *priv = netdev_priv(dev);
2687 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2690 if (dev->flags & IFF_PROMISC) {
2691 /* Set RCTRL to PROM */
2692 tempval = gfar_read(®s->rctrl);
2693 tempval |= RCTRL_PROM;
2694 gfar_write(®s->rctrl, tempval);
2696 /* Set RCTRL to not PROM */
2697 tempval = gfar_read(®s->rctrl);
2698 tempval &= ~(RCTRL_PROM);
2699 gfar_write(®s->rctrl, tempval);
2702 if (dev->flags & IFF_ALLMULTI) {
2703 /* Set the hash to rx all multicast frames */
2704 gfar_write(®s->igaddr0, 0xffffffff);
2705 gfar_write(®s->igaddr1, 0xffffffff);
2706 gfar_write(®s->igaddr2, 0xffffffff);
2707 gfar_write(®s->igaddr3, 0xffffffff);
2708 gfar_write(®s->igaddr4, 0xffffffff);
2709 gfar_write(®s->igaddr5, 0xffffffff);
2710 gfar_write(®s->igaddr6, 0xffffffff);
2711 gfar_write(®s->igaddr7, 0xffffffff);
2712 gfar_write(®s->gaddr0, 0xffffffff);
2713 gfar_write(®s->gaddr1, 0xffffffff);
2714 gfar_write(®s->gaddr2, 0xffffffff);
2715 gfar_write(®s->gaddr3, 0xffffffff);
2716 gfar_write(®s->gaddr4, 0xffffffff);
2717 gfar_write(®s->gaddr5, 0xffffffff);
2718 gfar_write(®s->gaddr6, 0xffffffff);
2719 gfar_write(®s->gaddr7, 0xffffffff);
2724 /* zero out the hash */
2725 gfar_write(®s->igaddr0, 0x0);
2726 gfar_write(®s->igaddr1, 0x0);
2727 gfar_write(®s->igaddr2, 0x0);
2728 gfar_write(®s->igaddr3, 0x0);
2729 gfar_write(®s->igaddr4, 0x0);
2730 gfar_write(®s->igaddr5, 0x0);
2731 gfar_write(®s->igaddr6, 0x0);
2732 gfar_write(®s->igaddr7, 0x0);
2733 gfar_write(®s->gaddr0, 0x0);
2734 gfar_write(®s->gaddr1, 0x0);
2735 gfar_write(®s->gaddr2, 0x0);
2736 gfar_write(®s->gaddr3, 0x0);
2737 gfar_write(®s->gaddr4, 0x0);
2738 gfar_write(®s->gaddr5, 0x0);
2739 gfar_write(®s->gaddr6, 0x0);
2740 gfar_write(®s->gaddr7, 0x0);
2742 /* If we have extended hash tables, we need to
2743 * clear the exact match registers to prepare for
2745 if (priv->extended_hash) {
2746 em_num = GFAR_EM_NUM + 1;
2747 gfar_clear_exact_match(dev);
2754 if (dev->mc_count == 0)
2757 /* Parse the list, and set the appropriate bits */
2758 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
2760 gfar_set_mac_for_addr(dev, idx,
2764 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
2772 /* Clears each of the exact match registers to zero, so they
2773 * don't interfere with normal reception */
2774 static void gfar_clear_exact_match(struct net_device *dev)
2777 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2779 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2780 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2783 /* Set the appropriate hash bit for the given addr */
2784 /* The algorithm works like so:
2785 * 1) Take the Destination Address (ie the multicast address), and
2786 * do a CRC on it (little endian), and reverse the bits of the
2788 * 2) Use the 8 most significant bits as a hash into a 256-entry
2789 * table. The table is controlled through 8 32-bit registers:
2790 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
2791 * gaddr7. This means that the 3 most significant bits in the
2792 * hash index which gaddr register to use, and the 5 other bits
2793 * indicate which bit (assuming an IBM numbering scheme, which
2794 * for PowerPC (tm) is usually the case) in the register holds
2796 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2799 struct gfar_private *priv = netdev_priv(dev);
2800 u32 result = ether_crc(MAC_ADDR_LEN, addr);
2801 int width = priv->hash_width;
2802 u8 whichbit = (result >> (32 - width)) & 0x1f;
2803 u8 whichreg = result >> (32 - width + 5);
2804 u32 value = (1 << (31-whichbit));
2806 tempval = gfar_read(priv->hash_regs[whichreg]);
2808 gfar_write(priv->hash_regs[whichreg], tempval);
2814 /* There are multiple MAC Address register pairs on some controllers
2815 * This function sets the numth pair to a given address
2817 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2819 struct gfar_private *priv = netdev_priv(dev);
2820 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2822 char tmpbuf[MAC_ADDR_LEN];
2824 u32 __iomem *macptr = ®s->macstnaddr1;
2828 /* Now copy it into the mac registers backwards, cuz */
2829 /* little endian is silly */
2830 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2831 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2833 gfar_write(macptr, *((u32 *) (tmpbuf)));
2835 tempval = *((u32 *) (tmpbuf + 4));
2837 gfar_write(macptr+1, tempval);
2840 /* GFAR error interrupt handler */
2841 static irqreturn_t gfar_error(int irq, void *grp_id)
2843 struct gfar_priv_grp *gfargrp = grp_id;
2844 struct gfar __iomem *regs = gfargrp->regs;
2845 struct gfar_private *priv= gfargrp->priv;
2846 struct net_device *dev = priv->ndev;
2848 /* Save ievent for future reference */
2849 u32 events = gfar_read(®s->ievent);
2852 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
2854 /* Magic Packet is not an error. */
2855 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2856 (events & IEVENT_MAG))
2857 events &= ~IEVENT_MAG;
2860 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2861 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2862 dev->name, events, gfar_read(®s->imask));
2864 /* Update the error counters */
2865 if (events & IEVENT_TXE) {
2866 dev->stats.tx_errors++;
2868 if (events & IEVENT_LC)
2869 dev->stats.tx_window_errors++;
2870 if (events & IEVENT_CRL)
2871 dev->stats.tx_aborted_errors++;
2872 if (events & IEVENT_XFUN) {
2873 if (netif_msg_tx_err(priv))
2874 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2875 "packet dropped.\n", dev->name);
2876 dev->stats.tx_dropped++;
2877 priv->extra_stats.tx_underrun++;
2879 /* Reactivate the Tx Queues */
2880 gfar_write(®s->tstat, gfargrp->tstat);
2882 if (netif_msg_tx_err(priv))
2883 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
2885 if (events & IEVENT_BSY) {
2886 dev->stats.rx_errors++;
2887 priv->extra_stats.rx_bsy++;
2889 gfar_receive(irq, grp_id);
2891 if (netif_msg_rx_err(priv))
2892 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2893 dev->name, gfar_read(®s->rstat));
2895 if (events & IEVENT_BABR) {
2896 dev->stats.rx_errors++;
2897 priv->extra_stats.rx_babr++;
2899 if (netif_msg_rx_err(priv))
2900 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
2902 if (events & IEVENT_EBERR) {
2903 priv->extra_stats.eberr++;
2904 if (netif_msg_rx_err(priv))
2905 printk(KERN_DEBUG "%s: bus error\n", dev->name);
2907 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
2908 printk(KERN_DEBUG "%s: control frame\n", dev->name);
2910 if (events & IEVENT_BABT) {
2911 priv->extra_stats.tx_babt++;
2912 if (netif_msg_tx_err(priv))
2913 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
2918 static struct of_device_id gfar_match[] =
2922 .compatible = "gianfar",
2925 .compatible = "fsl,etsec2",
2929 MODULE_DEVICE_TABLE(of, gfar_match);
2931 /* Structure for a device driver */
2932 static struct of_platform_driver gfar_driver = {
2933 .name = "fsl-gianfar",
2934 .match_table = gfar_match,
2936 .probe = gfar_probe,
2937 .remove = gfar_remove,
2938 .suspend = gfar_legacy_suspend,
2939 .resume = gfar_legacy_resume,
2940 .driver.pm = GFAR_PM_OPS,
2943 static int __init gfar_init(void)
2945 return of_register_platform_driver(&gfar_driver);
2948 static void __exit gfar_exit(void)
2950 of_unregister_platform_driver(&gfar_driver);
2953 module_init(gfar_init);
2954 module_exit(gfar_exit);