[PATCH] sky2: msi enhancements.
[safe/jmp/linux-2.6] / drivers / net / gianfar.c
1 /*
2  * drivers/net/gianfar.c
3  *
4  * Gianfar Ethernet Driver
5  * This driver is designed for the non-CPM ethernet controllers
6  * on the 85xx and 83xx family of integrated processors
7  * Based on 8260_io/fcc_enet.c
8  *
9  * Author: Andy Fleming
10  * Maintainer: Kumar Gala
11  *
12  * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
13  *
14  * This program is free software; you can redistribute  it and/or modify it
15  * under  the terms of  the GNU General  Public License as published by the
16  * Free Software Foundation;  either version 2 of the  License, or (at your
17  * option) any later version.
18  *
19  *  Gianfar:  AKA Lambda Draconis, "Dragon"
20  *  RA 11 31 24.2
21  *  Dec +69 19 52
22  *  V 3.84
23  *  B-V +1.62
24  *
25  *  Theory of operation
26  *
27  *  The driver is initialized through platform_device.  Structures which
28  *  define the configuration needed by the board are defined in a
29  *  board structure in arch/ppc/platforms (though I do not
30  *  discount the possibility that other architectures could one
31  *  day be supported.
32  *
33  *  The Gianfar Ethernet Controller uses a ring of buffer
34  *  descriptors.  The beginning is indicated by a register
35  *  pointing to the physical address of the start of the ring.
36  *  The end is determined by a "wrap" bit being set in the
37  *  last descriptor of the ring.
38  *
39  *  When a packet is received, the RXF bit in the
40  *  IEVENT register is set, triggering an interrupt when the
41  *  corresponding bit in the IMASK register is also set (if
42  *  interrupt coalescing is active, then the interrupt may not
43  *  happen immediately, but will wait until either a set number
44  *  of frames or amount of time have passed).  In NAPI, the
45  *  interrupt handler will signal there is work to be done, and
46  *  exit.  Without NAPI, the packet(s) will be handled
47  *  immediately.  Both methods will start at the last known empty
48  *  descriptor, and process every subsequent descriptor until there
49  *  are none left with data (NAPI will stop after a set number of
50  *  packets to give time to other tasks, but will eventually
51  *  process all the packets).  The data arrives inside a
52  *  pre-allocated skb, and so after the skb is passed up to the
53  *  stack, a new skb must be allocated, and the address field in
54  *  the buffer descriptor must be updated to indicate this new
55  *  skb.
56  *
57  *  When the kernel requests that a packet be transmitted, the
58  *  driver starts where it left off last time, and points the
59  *  descriptor at the buffer which was passed in.  The driver
60  *  then informs the DMA engine that there are packets ready to
61  *  be transmitted.  Once the controller is finished transmitting
62  *  the packet, an interrupt may be triggered (under the same
63  *  conditions as for reception, but depending on the TXF bit).
64  *  The driver then cleans up the buffer.
65  */
66
67 #include <linux/kernel.h>
68 #include <linux/sched.h>
69 #include <linux/string.h>
70 #include <linux/errno.h>
71 #include <linux/unistd.h>
72 #include <linux/slab.h>
73 #include <linux/interrupt.h>
74 #include <linux/init.h>
75 #include <linux/delay.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_vlan.h>
80 #include <linux/spinlock.h>
81 #include <linux/mm.h>
82 #include <linux/platform_device.h>
83 #include <linux/ip.h>
84 #include <linux/tcp.h>
85 #include <linux/udp.h>
86 #include <linux/in.h>
87
88 #include <asm/io.h>
89 #include <asm/irq.h>
90 #include <asm/uaccess.h>
91 #include <linux/module.h>
92 #include <linux/dma-mapping.h>
93 #include <linux/crc32.h>
94 #include <linux/mii.h>
95 #include <linux/phy.h>
96
97 #include "gianfar.h"
98 #include "gianfar_mii.h"
99
100 #define TX_TIMEOUT      (1*HZ)
101 #define SKB_ALLOC_TIMEOUT 1000000
102 #undef BRIEF_GFAR_ERRORS
103 #undef VERBOSE_GFAR_ERRORS
104
105 #ifdef CONFIG_GFAR_NAPI
106 #define RECEIVE(x) netif_receive_skb(x)
107 #else
108 #define RECEIVE(x) netif_rx(x)
109 #endif
110
111 const char gfar_driver_name[] = "Gianfar Ethernet";
112 const char gfar_driver_version[] = "1.3";
113
114 static int gfar_enet_open(struct net_device *dev);
115 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116 static void gfar_timeout(struct net_device *dev);
117 static int gfar_close(struct net_device *dev);
118 struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
119 static struct net_device_stats *gfar_get_stats(struct net_device *dev);
120 static int gfar_set_mac_address(struct net_device *dev);
121 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
122 static irqreturn_t gfar_error(int irq, void *dev_id);
123 static irqreturn_t gfar_transmit(int irq, void *dev_id);
124 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
125 static void adjust_link(struct net_device *dev);
126 static void init_registers(struct net_device *dev);
127 static int init_phy(struct net_device *dev);
128 static int gfar_probe(struct platform_device *pdev);
129 static int gfar_remove(struct platform_device *pdev);
130 static void free_skb_resources(struct gfar_private *priv);
131 static void gfar_set_multi(struct net_device *dev);
132 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
133 #ifdef CONFIG_GFAR_NAPI
134 static int gfar_poll(struct net_device *dev, int *budget);
135 #endif
136 #ifdef CONFIG_NET_POLL_CONTROLLER
137 static void gfar_netpoll(struct net_device *dev);
138 #endif
139 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
140 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
141 static void gfar_vlan_rx_register(struct net_device *netdev,
142                                 struct vlan_group *grp);
143 static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
144 void gfar_halt(struct net_device *dev);
145 void gfar_start(struct net_device *dev);
146 static void gfar_clear_exact_match(struct net_device *dev);
147 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
148
149 extern const struct ethtool_ops gfar_ethtool_ops;
150
151 MODULE_AUTHOR("Freescale Semiconductor, Inc");
152 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
153 MODULE_LICENSE("GPL");
154
155 /* Returns 1 if incoming frames use an FCB */
156 static inline int gfar_uses_fcb(struct gfar_private *priv)
157 {
158         return (priv->vlan_enable || priv->rx_csum_enable);
159 }
160
161 /* Set up the ethernet device structure, private data,
162  * and anything else we need before we start */
163 static int gfar_probe(struct platform_device *pdev)
164 {
165         u32 tempval;
166         struct net_device *dev = NULL;
167         struct gfar_private *priv = NULL;
168         struct gianfar_platform_data *einfo;
169         struct resource *r;
170         int idx;
171         int err = 0;
172
173         einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
174
175         if (NULL == einfo) {
176                 printk(KERN_ERR "gfar %d: Missing additional data!\n",
177                        pdev->id);
178
179                 return -ENODEV;
180         }
181
182         /* Create an ethernet device instance */
183         dev = alloc_etherdev(sizeof (*priv));
184
185         if (NULL == dev)
186                 return -ENOMEM;
187
188         priv = netdev_priv(dev);
189
190         /* Set the info in the priv to the current info */
191         priv->einfo = einfo;
192
193         /* fill out IRQ fields */
194         if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
195                 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
196                 priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
197                 priv->interruptError = platform_get_irq_byname(pdev, "error");
198                 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
199                         goto regs_fail;
200         } else {
201                 priv->interruptTransmit = platform_get_irq(pdev, 0);
202                 if (priv->interruptTransmit < 0)
203                         goto regs_fail;
204         }
205
206         /* get a pointer to the register memory */
207         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
208         priv->regs = ioremap(r->start, sizeof (struct gfar));
209
210         if (NULL == priv->regs) {
211                 err = -ENOMEM;
212                 goto regs_fail;
213         }
214
215         spin_lock_init(&priv->txlock);
216         spin_lock_init(&priv->rxlock);
217
218         platform_set_drvdata(pdev, dev);
219
220         /* Stop the DMA engine now, in case it was running before */
221         /* (The firmware could have used it, and left it running). */
222         /* To do this, we write Graceful Receive Stop and Graceful */
223         /* Transmit Stop, and then wait until the corresponding bits */
224         /* in IEVENT indicate the stops have completed. */
225         tempval = gfar_read(&priv->regs->dmactrl);
226         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
227         gfar_write(&priv->regs->dmactrl, tempval);
228
229         tempval = gfar_read(&priv->regs->dmactrl);
230         tempval |= (DMACTRL_GRS | DMACTRL_GTS);
231         gfar_write(&priv->regs->dmactrl, tempval);
232
233         while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
234                 cpu_relax();
235
236         /* Reset MAC layer */
237         gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
238
239         tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
240         gfar_write(&priv->regs->maccfg1, tempval);
241
242         /* Initialize MACCFG2. */
243         gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
244
245         /* Initialize ECNTRL */
246         gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
247
248         /* Copy the station address into the dev structure, */
249         memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
250
251         /* Set the dev->base_addr to the gfar reg region */
252         dev->base_addr = (unsigned long) (priv->regs);
253
254         SET_MODULE_OWNER(dev);
255         SET_NETDEV_DEV(dev, &pdev->dev);
256
257         /* Fill in the dev structure */
258         dev->open = gfar_enet_open;
259         dev->hard_start_xmit = gfar_start_xmit;
260         dev->tx_timeout = gfar_timeout;
261         dev->watchdog_timeo = TX_TIMEOUT;
262 #ifdef CONFIG_GFAR_NAPI
263         dev->poll = gfar_poll;
264         dev->weight = GFAR_DEV_WEIGHT;
265 #endif
266 #ifdef CONFIG_NET_POLL_CONTROLLER
267         dev->poll_controller = gfar_netpoll;
268 #endif
269         dev->stop = gfar_close;
270         dev->get_stats = gfar_get_stats;
271         dev->change_mtu = gfar_change_mtu;
272         dev->mtu = 1500;
273         dev->set_multicast_list = gfar_set_multi;
274
275         dev->ethtool_ops = &gfar_ethtool_ops;
276
277         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
278                 priv->rx_csum_enable = 1;
279                 dev->features |= NETIF_F_IP_CSUM;
280         } else
281                 priv->rx_csum_enable = 0;
282
283         priv->vlgrp = NULL;
284
285         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
286                 dev->vlan_rx_register = gfar_vlan_rx_register;
287                 dev->vlan_rx_kill_vid = gfar_vlan_rx_kill_vid;
288
289                 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
290
291                 priv->vlan_enable = 1;
292         }
293
294         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
295                 priv->extended_hash = 1;
296                 priv->hash_width = 9;
297
298                 priv->hash_regs[0] = &priv->regs->igaddr0;
299                 priv->hash_regs[1] = &priv->regs->igaddr1;
300                 priv->hash_regs[2] = &priv->regs->igaddr2;
301                 priv->hash_regs[3] = &priv->regs->igaddr3;
302                 priv->hash_regs[4] = &priv->regs->igaddr4;
303                 priv->hash_regs[5] = &priv->regs->igaddr5;
304                 priv->hash_regs[6] = &priv->regs->igaddr6;
305                 priv->hash_regs[7] = &priv->regs->igaddr7;
306                 priv->hash_regs[8] = &priv->regs->gaddr0;
307                 priv->hash_regs[9] = &priv->regs->gaddr1;
308                 priv->hash_regs[10] = &priv->regs->gaddr2;
309                 priv->hash_regs[11] = &priv->regs->gaddr3;
310                 priv->hash_regs[12] = &priv->regs->gaddr4;
311                 priv->hash_regs[13] = &priv->regs->gaddr5;
312                 priv->hash_regs[14] = &priv->regs->gaddr6;
313                 priv->hash_regs[15] = &priv->regs->gaddr7;
314
315         } else {
316                 priv->extended_hash = 0;
317                 priv->hash_width = 8;
318
319                 priv->hash_regs[0] = &priv->regs->gaddr0;
320                 priv->hash_regs[1] = &priv->regs->gaddr1;
321                 priv->hash_regs[2] = &priv->regs->gaddr2;
322                 priv->hash_regs[3] = &priv->regs->gaddr3;
323                 priv->hash_regs[4] = &priv->regs->gaddr4;
324                 priv->hash_regs[5] = &priv->regs->gaddr5;
325                 priv->hash_regs[6] = &priv->regs->gaddr6;
326                 priv->hash_regs[7] = &priv->regs->gaddr7;
327         }
328
329         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
330                 priv->padding = DEFAULT_PADDING;
331         else
332                 priv->padding = 0;
333
334         if (dev->features & NETIF_F_IP_CSUM)
335                 dev->hard_header_len += GMAC_FCB_LEN;
336
337         priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
338         priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
339         priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
340
341         priv->txcoalescing = DEFAULT_TX_COALESCE;
342         priv->txcount = DEFAULT_TXCOUNT;
343         priv->txtime = DEFAULT_TXTIME;
344         priv->rxcoalescing = DEFAULT_RX_COALESCE;
345         priv->rxcount = DEFAULT_RXCOUNT;
346         priv->rxtime = DEFAULT_RXTIME;
347
348         /* Enable most messages by default */
349         priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
350
351         err = register_netdev(dev);
352
353         if (err) {
354                 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
355                                 dev->name);
356                 goto register_fail;
357         }
358
359         /* Create all the sysfs files */
360         gfar_init_sysfs(dev);
361
362         /* Print out the device info */
363         printk(KERN_INFO DEVICE_NAME, dev->name);
364         for (idx = 0; idx < 6; idx++)
365                 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
366         printk("\n");
367
368         /* Even more device info helps when determining which kernel */
369         /* provided which set of benchmarks. */
370 #ifdef CONFIG_GFAR_NAPI
371         printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
372 #else
373         printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
374 #endif
375         printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
376                dev->name, priv->rx_ring_size, priv->tx_ring_size);
377
378         return 0;
379
380 register_fail:
381         iounmap(priv->regs);
382 regs_fail:
383         free_netdev(dev);
384         return err;
385 }
386
387 static int gfar_remove(struct platform_device *pdev)
388 {
389         struct net_device *dev = platform_get_drvdata(pdev);
390         struct gfar_private *priv = netdev_priv(dev);
391
392         platform_set_drvdata(pdev, NULL);
393
394         iounmap(priv->regs);
395         free_netdev(dev);
396
397         return 0;
398 }
399
400
401 /* Initializes driver's PHY state, and attaches to the PHY.
402  * Returns 0 on success.
403  */
404 static int init_phy(struct net_device *dev)
405 {
406         struct gfar_private *priv = netdev_priv(dev);
407         uint gigabit_support =
408                 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
409                 SUPPORTED_1000baseT_Full : 0;
410         struct phy_device *phydev;
411         char phy_id[BUS_ID_SIZE];
412
413         priv->oldlink = 0;
414         priv->oldspeed = 0;
415         priv->oldduplex = -1;
416
417         snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
418
419         phydev = phy_connect(dev, phy_id, &adjust_link, 0);
420
421         if (IS_ERR(phydev)) {
422                 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
423                 return PTR_ERR(phydev);
424         }
425
426         /* Remove any features not supported by the controller */
427         phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
428         phydev->advertising = phydev->supported;
429
430         priv->phydev = phydev;
431
432         return 0;
433 }
434
435 static void init_registers(struct net_device *dev)
436 {
437         struct gfar_private *priv = netdev_priv(dev);
438
439         /* Clear IEVENT */
440         gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
441
442         /* Initialize IMASK */
443         gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
444
445         /* Init hash registers to zero */
446         gfar_write(&priv->regs->igaddr0, 0);
447         gfar_write(&priv->regs->igaddr1, 0);
448         gfar_write(&priv->regs->igaddr2, 0);
449         gfar_write(&priv->regs->igaddr3, 0);
450         gfar_write(&priv->regs->igaddr4, 0);
451         gfar_write(&priv->regs->igaddr5, 0);
452         gfar_write(&priv->regs->igaddr6, 0);
453         gfar_write(&priv->regs->igaddr7, 0);
454
455         gfar_write(&priv->regs->gaddr0, 0);
456         gfar_write(&priv->regs->gaddr1, 0);
457         gfar_write(&priv->regs->gaddr2, 0);
458         gfar_write(&priv->regs->gaddr3, 0);
459         gfar_write(&priv->regs->gaddr4, 0);
460         gfar_write(&priv->regs->gaddr5, 0);
461         gfar_write(&priv->regs->gaddr6, 0);
462         gfar_write(&priv->regs->gaddr7, 0);
463
464         /* Zero out the rmon mib registers if it has them */
465         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
466                 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
467
468                 /* Mask off the CAM interrupts */
469                 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
470                 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
471         }
472
473         /* Initialize the max receive buffer length */
474         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
475
476         /* Initialize the Minimum Frame Length Register */
477         gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
478
479         /* Assign the TBI an address which won't conflict with the PHYs */
480         gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
481 }
482
483
484 /* Halt the receive and transmit queues */
485 void gfar_halt(struct net_device *dev)
486 {
487         struct gfar_private *priv = netdev_priv(dev);
488         struct gfar __iomem *regs = priv->regs;
489         u32 tempval;
490
491         /* Mask all interrupts */
492         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
493
494         /* Clear all interrupts */
495         gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
496
497         /* Stop the DMA, and wait for it to stop */
498         tempval = gfar_read(&priv->regs->dmactrl);
499         if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
500             != (DMACTRL_GRS | DMACTRL_GTS)) {
501                 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
502                 gfar_write(&priv->regs->dmactrl, tempval);
503
504                 while (!(gfar_read(&priv->regs->ievent) &
505                          (IEVENT_GRSC | IEVENT_GTSC)))
506                         cpu_relax();
507         }
508
509         /* Disable Rx and Tx */
510         tempval = gfar_read(&regs->maccfg1);
511         tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
512         gfar_write(&regs->maccfg1, tempval);
513 }
514
515 void stop_gfar(struct net_device *dev)
516 {
517         struct gfar_private *priv = netdev_priv(dev);
518         struct gfar __iomem *regs = priv->regs;
519         unsigned long flags;
520
521         phy_stop(priv->phydev);
522
523         /* Lock it down */
524         spin_lock_irqsave(&priv->txlock, flags);
525         spin_lock(&priv->rxlock);
526
527         gfar_halt(dev);
528
529         spin_unlock(&priv->rxlock);
530         spin_unlock_irqrestore(&priv->txlock, flags);
531
532         /* Free the IRQs */
533         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
534                 free_irq(priv->interruptError, dev);
535                 free_irq(priv->interruptTransmit, dev);
536                 free_irq(priv->interruptReceive, dev);
537         } else {
538                 free_irq(priv->interruptTransmit, dev);
539         }
540
541         free_skb_resources(priv);
542
543         dma_free_coherent(NULL,
544                         sizeof(struct txbd8)*priv->tx_ring_size
545                         + sizeof(struct rxbd8)*priv->rx_ring_size,
546                         priv->tx_bd_base,
547                         gfar_read(&regs->tbase0));
548 }
549
550 /* If there are any tx skbs or rx skbs still around, free them.
551  * Then free tx_skbuff and rx_skbuff */
552 static void free_skb_resources(struct gfar_private *priv)
553 {
554         struct rxbd8 *rxbdp;
555         struct txbd8 *txbdp;
556         int i;
557
558         /* Go through all the buffer descriptors and free their data buffers */
559         txbdp = priv->tx_bd_base;
560
561         for (i = 0; i < priv->tx_ring_size; i++) {
562
563                 if (priv->tx_skbuff[i]) {
564                         dma_unmap_single(NULL, txbdp->bufPtr,
565                                         txbdp->length,
566                                         DMA_TO_DEVICE);
567                         dev_kfree_skb_any(priv->tx_skbuff[i]);
568                         priv->tx_skbuff[i] = NULL;
569                 }
570         }
571
572         kfree(priv->tx_skbuff);
573
574         rxbdp = priv->rx_bd_base;
575
576         /* rx_skbuff is not guaranteed to be allocated, so only
577          * free it and its contents if it is allocated */
578         if(priv->rx_skbuff != NULL) {
579                 for (i = 0; i < priv->rx_ring_size; i++) {
580                         if (priv->rx_skbuff[i]) {
581                                 dma_unmap_single(NULL, rxbdp->bufPtr,
582                                                 priv->rx_buffer_size,
583                                                 DMA_FROM_DEVICE);
584
585                                 dev_kfree_skb_any(priv->rx_skbuff[i]);
586                                 priv->rx_skbuff[i] = NULL;
587                         }
588
589                         rxbdp->status = 0;
590                         rxbdp->length = 0;
591                         rxbdp->bufPtr = 0;
592
593                         rxbdp++;
594                 }
595
596                 kfree(priv->rx_skbuff);
597         }
598 }
599
600 void gfar_start(struct net_device *dev)
601 {
602         struct gfar_private *priv = netdev_priv(dev);
603         struct gfar __iomem *regs = priv->regs;
604         u32 tempval;
605
606         /* Enable Rx and Tx in MACCFG1 */
607         tempval = gfar_read(&regs->maccfg1);
608         tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
609         gfar_write(&regs->maccfg1, tempval);
610
611         /* Initialize DMACTRL to have WWR and WOP */
612         tempval = gfar_read(&priv->regs->dmactrl);
613         tempval |= DMACTRL_INIT_SETTINGS;
614         gfar_write(&priv->regs->dmactrl, tempval);
615
616         /* Make sure we aren't stopped */
617         tempval = gfar_read(&priv->regs->dmactrl);
618         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
619         gfar_write(&priv->regs->dmactrl, tempval);
620
621         /* Clear THLT/RHLT, so that the DMA starts polling now */
622         gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
623         gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
624
625         /* Unmask the interrupts we look for */
626         gfar_write(&regs->imask, IMASK_DEFAULT);
627 }
628
629 /* Bring the controller up and running */
630 int startup_gfar(struct net_device *dev)
631 {
632         struct txbd8 *txbdp;
633         struct rxbd8 *rxbdp;
634         dma_addr_t addr;
635         unsigned long vaddr;
636         int i;
637         struct gfar_private *priv = netdev_priv(dev);
638         struct gfar __iomem *regs = priv->regs;
639         int err = 0;
640         u32 rctrl = 0;
641         u32 attrs = 0;
642
643         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
644
645         /* Allocate memory for the buffer descriptors */
646         vaddr = (unsigned long) dma_alloc_coherent(NULL,
647                         sizeof (struct txbd8) * priv->tx_ring_size +
648                         sizeof (struct rxbd8) * priv->rx_ring_size,
649                         &addr, GFP_KERNEL);
650
651         if (vaddr == 0) {
652                 if (netif_msg_ifup(priv))
653                         printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
654                                         dev->name);
655                 return -ENOMEM;
656         }
657
658         priv->tx_bd_base = (struct txbd8 *) vaddr;
659
660         /* enet DMA only understands physical addresses */
661         gfar_write(&regs->tbase0, addr);
662
663         /* Start the rx descriptor ring where the tx ring leaves off */
664         addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
665         vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
666         priv->rx_bd_base = (struct rxbd8 *) vaddr;
667         gfar_write(&regs->rbase0, addr);
668
669         /* Setup the skbuff rings */
670         priv->tx_skbuff =
671             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
672                                         priv->tx_ring_size, GFP_KERNEL);
673
674         if (NULL == priv->tx_skbuff) {
675                 if (netif_msg_ifup(priv))
676                         printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
677                                         dev->name);
678                 err = -ENOMEM;
679                 goto tx_skb_fail;
680         }
681
682         for (i = 0; i < priv->tx_ring_size; i++)
683                 priv->tx_skbuff[i] = NULL;
684
685         priv->rx_skbuff =
686             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
687                                         priv->rx_ring_size, GFP_KERNEL);
688
689         if (NULL == priv->rx_skbuff) {
690                 if (netif_msg_ifup(priv))
691                         printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
692                                         dev->name);
693                 err = -ENOMEM;
694                 goto rx_skb_fail;
695         }
696
697         for (i = 0; i < priv->rx_ring_size; i++)
698                 priv->rx_skbuff[i] = NULL;
699
700         /* Initialize some variables in our dev structure */
701         priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
702         priv->cur_rx = priv->rx_bd_base;
703         priv->skb_curtx = priv->skb_dirtytx = 0;
704         priv->skb_currx = 0;
705
706         /* Initialize Transmit Descriptor Ring */
707         txbdp = priv->tx_bd_base;
708         for (i = 0; i < priv->tx_ring_size; i++) {
709                 txbdp->status = 0;
710                 txbdp->length = 0;
711                 txbdp->bufPtr = 0;
712                 txbdp++;
713         }
714
715         /* Set the last descriptor in the ring to indicate wrap */
716         txbdp--;
717         txbdp->status |= TXBD_WRAP;
718
719         rxbdp = priv->rx_bd_base;
720         for (i = 0; i < priv->rx_ring_size; i++) {
721                 struct sk_buff *skb = NULL;
722
723                 rxbdp->status = 0;
724
725                 skb = gfar_new_skb(dev, rxbdp);
726
727                 priv->rx_skbuff[i] = skb;
728
729                 rxbdp++;
730         }
731
732         /* Set the last descriptor in the ring to wrap */
733         rxbdp--;
734         rxbdp->status |= RXBD_WRAP;
735
736         /* If the device has multiple interrupts, register for
737          * them.  Otherwise, only register for the one */
738         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
739                 /* Install our interrupt handlers for Error,
740                  * Transmit, and Receive */
741                 if (request_irq(priv->interruptError, gfar_error,
742                                 0, "enet_error", dev) < 0) {
743                         if (netif_msg_intr(priv))
744                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
745                                         dev->name, priv->interruptError);
746
747                         err = -1;
748                         goto err_irq_fail;
749                 }
750
751                 if (request_irq(priv->interruptTransmit, gfar_transmit,
752                                 0, "enet_tx", dev) < 0) {
753                         if (netif_msg_intr(priv))
754                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
755                                         dev->name, priv->interruptTransmit);
756
757                         err = -1;
758
759                         goto tx_irq_fail;
760                 }
761
762                 if (request_irq(priv->interruptReceive, gfar_receive,
763                                 0, "enet_rx", dev) < 0) {
764                         if (netif_msg_intr(priv))
765                                 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
766                                                 dev->name, priv->interruptReceive);
767
768                         err = -1;
769                         goto rx_irq_fail;
770                 }
771         } else {
772                 if (request_irq(priv->interruptTransmit, gfar_interrupt,
773                                 0, "gfar_interrupt", dev) < 0) {
774                         if (netif_msg_intr(priv))
775                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
776                                         dev->name, priv->interruptError);
777
778                         err = -1;
779                         goto err_irq_fail;
780                 }
781         }
782
783         phy_start(priv->phydev);
784
785         /* Configure the coalescing support */
786         if (priv->txcoalescing)
787                 gfar_write(&regs->txic,
788                            mk_ic_value(priv->txcount, priv->txtime));
789         else
790                 gfar_write(&regs->txic, 0);
791
792         if (priv->rxcoalescing)
793                 gfar_write(&regs->rxic,
794                            mk_ic_value(priv->rxcount, priv->rxtime));
795         else
796                 gfar_write(&regs->rxic, 0);
797
798         if (priv->rx_csum_enable)
799                 rctrl |= RCTRL_CHECKSUMMING;
800
801         if (priv->extended_hash) {
802                 rctrl |= RCTRL_EXTHASH;
803
804                 gfar_clear_exact_match(dev);
805                 rctrl |= RCTRL_EMEN;
806         }
807
808         if (priv->vlan_enable)
809                 rctrl |= RCTRL_VLAN;
810
811         if (priv->padding) {
812                 rctrl &= ~RCTRL_PAL_MASK;
813                 rctrl |= RCTRL_PADDING(priv->padding);
814         }
815
816         /* Init rctrl based on our settings */
817         gfar_write(&priv->regs->rctrl, rctrl);
818
819         if (dev->features & NETIF_F_IP_CSUM)
820                 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
821
822         /* Set the extraction length and index */
823         attrs = ATTRELI_EL(priv->rx_stash_size) |
824                 ATTRELI_EI(priv->rx_stash_index);
825
826         gfar_write(&priv->regs->attreli, attrs);
827
828         /* Start with defaults, and add stashing or locking
829          * depending on the approprate variables */
830         attrs = ATTR_INIT_SETTINGS;
831
832         if (priv->bd_stash_en)
833                 attrs |= ATTR_BDSTASH;
834
835         if (priv->rx_stash_size != 0)
836                 attrs |= ATTR_BUFSTASH;
837
838         gfar_write(&priv->regs->attr, attrs);
839
840         gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
841         gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
842         gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
843
844         /* Start the controller */
845         gfar_start(dev);
846
847         return 0;
848
849 rx_irq_fail:
850         free_irq(priv->interruptTransmit, dev);
851 tx_irq_fail:
852         free_irq(priv->interruptError, dev);
853 err_irq_fail:
854 rx_skb_fail:
855         free_skb_resources(priv);
856 tx_skb_fail:
857         dma_free_coherent(NULL,
858                         sizeof(struct txbd8)*priv->tx_ring_size
859                         + sizeof(struct rxbd8)*priv->rx_ring_size,
860                         priv->tx_bd_base,
861                         gfar_read(&regs->tbase0));
862
863         return err;
864 }
865
866 /* Called when something needs to use the ethernet device */
867 /* Returns 0 for success. */
868 static int gfar_enet_open(struct net_device *dev)
869 {
870         int err;
871
872         /* Initialize a bunch of registers */
873         init_registers(dev);
874
875         gfar_set_mac_address(dev);
876
877         err = init_phy(dev);
878
879         if(err)
880                 return err;
881
882         err = startup_gfar(dev);
883
884         netif_start_queue(dev);
885
886         return err;
887 }
888
889 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
890 {
891         struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
892
893         memset(fcb, 0, GMAC_FCB_LEN);
894
895         return fcb;
896 }
897
898 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
899 {
900         u8 flags = 0;
901
902         /* If we're here, it's a IP packet with a TCP or UDP
903          * payload.  We set it to checksum, using a pseudo-header
904          * we provide
905          */
906         flags = TXFCB_DEFAULT;
907
908         /* Tell the controller what the protocol is */
909         /* And provide the already calculated phcs */
910         if (skb->nh.iph->protocol == IPPROTO_UDP) {
911                 flags |= TXFCB_UDP;
912                 fcb->phcs = skb->h.uh->check;
913         } else
914                 fcb->phcs = skb->h.th->check;
915
916         /* l3os is the distance between the start of the
917          * frame (skb->data) and the start of the IP hdr.
918          * l4os is the distance between the start of the
919          * l3 hdr and the l4 hdr */
920         fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN);
921         fcb->l4os = (u16)(skb->h.raw - skb->nh.raw);
922
923         fcb->flags = flags;
924 }
925
926 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
927 {
928         fcb->flags |= TXFCB_VLN;
929         fcb->vlctl = vlan_tx_tag_get(skb);
930 }
931
932 /* This is called by the kernel when a frame is ready for transmission. */
933 /* It is pointed to by the dev->hard_start_xmit function pointer */
934 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
935 {
936         struct gfar_private *priv = netdev_priv(dev);
937         struct txfcb *fcb = NULL;
938         struct txbd8 *txbdp;
939         u16 status;
940         unsigned long flags;
941
942         /* Update transmit stats */
943         priv->stats.tx_bytes += skb->len;
944
945         /* Lock priv now */
946         spin_lock_irqsave(&priv->txlock, flags);
947
948         /* Point at the first free tx descriptor */
949         txbdp = priv->cur_tx;
950
951         /* Clear all but the WRAP status flags */
952         status = txbdp->status & TXBD_WRAP;
953
954         /* Set up checksumming */
955         if (likely((dev->features & NETIF_F_IP_CSUM)
956                         && (CHECKSUM_PARTIAL == skb->ip_summed))) {
957                 fcb = gfar_add_fcb(skb, txbdp);
958                 status |= TXBD_TOE;
959                 gfar_tx_checksum(skb, fcb);
960         }
961
962         if (priv->vlan_enable &&
963                         unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
964                 if (unlikely(NULL == fcb)) {
965                         fcb = gfar_add_fcb(skb, txbdp);
966                         status |= TXBD_TOE;
967                 }
968
969                 gfar_tx_vlan(skb, fcb);
970         }
971
972         /* Set buffer length and pointer */
973         txbdp->length = skb->len;
974         txbdp->bufPtr = dma_map_single(NULL, skb->data,
975                         skb->len, DMA_TO_DEVICE);
976
977         /* Save the skb pointer so we can free it later */
978         priv->tx_skbuff[priv->skb_curtx] = skb;
979
980         /* Update the current skb pointer (wrapping if this was the last) */
981         priv->skb_curtx =
982             (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
983
984         /* Flag the BD as interrupt-causing */
985         status |= TXBD_INTERRUPT;
986
987         /* Flag the BD as ready to go, last in frame, and  */
988         /* in need of CRC */
989         status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
990
991         dev->trans_start = jiffies;
992
993         txbdp->status = status;
994
995         /* If this was the last BD in the ring, the next one */
996         /* is at the beginning of the ring */
997         if (txbdp->status & TXBD_WRAP)
998                 txbdp = priv->tx_bd_base;
999         else
1000                 txbdp++;
1001
1002         /* If the next BD still needs to be cleaned up, then the bds
1003            are full.  We need to tell the kernel to stop sending us stuff. */
1004         if (txbdp == priv->dirty_tx) {
1005                 netif_stop_queue(dev);
1006
1007                 priv->stats.tx_fifo_errors++;
1008         }
1009
1010         /* Update the current txbd to the next one */
1011         priv->cur_tx = txbdp;
1012
1013         /* Tell the DMA to go go go */
1014         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1015
1016         /* Unlock priv */
1017         spin_unlock_irqrestore(&priv->txlock, flags);
1018
1019         return 0;
1020 }
1021
1022 /* Stops the kernel queue, and halts the controller */
1023 static int gfar_close(struct net_device *dev)
1024 {
1025         struct gfar_private *priv = netdev_priv(dev);
1026         stop_gfar(dev);
1027
1028         /* Disconnect from the PHY */
1029         phy_disconnect(priv->phydev);
1030         priv->phydev = NULL;
1031
1032         netif_stop_queue(dev);
1033
1034         return 0;
1035 }
1036
1037 /* returns a net_device_stats structure pointer */
1038 static struct net_device_stats * gfar_get_stats(struct net_device *dev)
1039 {
1040         struct gfar_private *priv = netdev_priv(dev);
1041
1042         return &(priv->stats);
1043 }
1044
1045 /* Changes the mac address if the controller is not running. */
1046 int gfar_set_mac_address(struct net_device *dev)
1047 {
1048         gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1049
1050         return 0;
1051 }
1052
1053
1054 /* Enables and disables VLAN insertion/extraction */
1055 static void gfar_vlan_rx_register(struct net_device *dev,
1056                 struct vlan_group *grp)
1057 {
1058         struct gfar_private *priv = netdev_priv(dev);
1059         unsigned long flags;
1060         u32 tempval;
1061
1062         spin_lock_irqsave(&priv->rxlock, flags);
1063
1064         priv->vlgrp = grp;
1065
1066         if (grp) {
1067                 /* Enable VLAN tag insertion */
1068                 tempval = gfar_read(&priv->regs->tctrl);
1069                 tempval |= TCTRL_VLINS;
1070
1071                 gfar_write(&priv->regs->tctrl, tempval);
1072
1073                 /* Enable VLAN tag extraction */
1074                 tempval = gfar_read(&priv->regs->rctrl);
1075                 tempval |= RCTRL_VLEX;
1076                 gfar_write(&priv->regs->rctrl, tempval);
1077         } else {
1078                 /* Disable VLAN tag insertion */
1079                 tempval = gfar_read(&priv->regs->tctrl);
1080                 tempval &= ~TCTRL_VLINS;
1081                 gfar_write(&priv->regs->tctrl, tempval);
1082
1083                 /* Disable VLAN tag extraction */
1084                 tempval = gfar_read(&priv->regs->rctrl);
1085                 tempval &= ~RCTRL_VLEX;
1086                 gfar_write(&priv->regs->rctrl, tempval);
1087         }
1088
1089         spin_unlock_irqrestore(&priv->rxlock, flags);
1090 }
1091
1092
1093 static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1094 {
1095         struct gfar_private *priv = netdev_priv(dev);
1096         unsigned long flags;
1097
1098         spin_lock_irqsave(&priv->rxlock, flags);
1099
1100         if (priv->vlgrp)
1101                 priv->vlgrp->vlan_devices[vid] = NULL;
1102
1103         spin_unlock_irqrestore(&priv->rxlock, flags);
1104 }
1105
1106
1107 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1108 {
1109         int tempsize, tempval;
1110         struct gfar_private *priv = netdev_priv(dev);
1111         int oldsize = priv->rx_buffer_size;
1112         int frame_size = new_mtu + ETH_HLEN;
1113
1114         if (priv->vlan_enable)
1115                 frame_size += VLAN_ETH_HLEN;
1116
1117         if (gfar_uses_fcb(priv))
1118                 frame_size += GMAC_FCB_LEN;
1119
1120         frame_size += priv->padding;
1121
1122         if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1123                 if (netif_msg_drv(priv))
1124                         printk(KERN_ERR "%s: Invalid MTU setting\n",
1125                                         dev->name);
1126                 return -EINVAL;
1127         }
1128
1129         tempsize =
1130             (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1131             INCREMENTAL_BUFFER_SIZE;
1132
1133         /* Only stop and start the controller if it isn't already
1134          * stopped, and we changed something */
1135         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1136                 stop_gfar(dev);
1137
1138         priv->rx_buffer_size = tempsize;
1139
1140         dev->mtu = new_mtu;
1141
1142         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1143         gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1144
1145         /* If the mtu is larger than the max size for standard
1146          * ethernet frames (ie, a jumbo frame), then set maccfg2
1147          * to allow huge frames, and to check the length */
1148         tempval = gfar_read(&priv->regs->maccfg2);
1149
1150         if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1151                 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1152         else
1153                 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1154
1155         gfar_write(&priv->regs->maccfg2, tempval);
1156
1157         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1158                 startup_gfar(dev);
1159
1160         return 0;
1161 }
1162
1163 /* gfar_timeout gets called when a packet has not been
1164  * transmitted after a set amount of time.
1165  * For now, assume that clearing out all the structures, and
1166  * starting over will fix the problem. */
1167 static void gfar_timeout(struct net_device *dev)
1168 {
1169         struct gfar_private *priv = netdev_priv(dev);
1170
1171         priv->stats.tx_errors++;
1172
1173         if (dev->flags & IFF_UP) {
1174                 stop_gfar(dev);
1175                 startup_gfar(dev);
1176         }
1177
1178         netif_schedule(dev);
1179 }
1180
1181 /* Interrupt Handler for Transmit complete */
1182 static irqreturn_t gfar_transmit(int irq, void *dev_id)
1183 {
1184         struct net_device *dev = (struct net_device *) dev_id;
1185         struct gfar_private *priv = netdev_priv(dev);
1186         struct txbd8 *bdp;
1187
1188         /* Clear IEVENT */
1189         gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1190
1191         /* Lock priv */
1192         spin_lock(&priv->txlock);
1193         bdp = priv->dirty_tx;
1194         while ((bdp->status & TXBD_READY) == 0) {
1195                 /* If dirty_tx and cur_tx are the same, then either the */
1196                 /* ring is empty or full now (it could only be full in the beginning, */
1197                 /* obviously).  If it is empty, we are done. */
1198                 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1199                         break;
1200
1201                 priv->stats.tx_packets++;
1202
1203                 /* Deferred means some collisions occurred during transmit, */
1204                 /* but we eventually sent the packet. */
1205                 if (bdp->status & TXBD_DEF)
1206                         priv->stats.collisions++;
1207
1208                 /* Free the sk buffer associated with this TxBD */
1209                 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1210                 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1211                 priv->skb_dirtytx =
1212                     (priv->skb_dirtytx +
1213                      1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1214
1215                 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1216                 if (bdp->status & TXBD_WRAP)
1217                         bdp = priv->tx_bd_base;
1218                 else
1219                         bdp++;
1220
1221                 /* Move dirty_tx to be the next bd */
1222                 priv->dirty_tx = bdp;
1223
1224                 /* We freed a buffer, so now we can restart transmission */
1225                 if (netif_queue_stopped(dev))
1226                         netif_wake_queue(dev);
1227         } /* while ((bdp->status & TXBD_READY) == 0) */
1228
1229         /* If we are coalescing the interrupts, reset the timer */
1230         /* Otherwise, clear it */
1231         if (priv->txcoalescing)
1232                 gfar_write(&priv->regs->txic,
1233                            mk_ic_value(priv->txcount, priv->txtime));
1234         else
1235                 gfar_write(&priv->regs->txic, 0);
1236
1237         spin_unlock(&priv->txlock);
1238
1239         return IRQ_HANDLED;
1240 }
1241
1242 struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1243 {
1244         unsigned int alignamount;
1245         struct gfar_private *priv = netdev_priv(dev);
1246         struct sk_buff *skb = NULL;
1247         unsigned int timeout = SKB_ALLOC_TIMEOUT;
1248
1249         /* We have to allocate the skb, so keep trying till we succeed */
1250         while ((!skb) && timeout--)
1251                 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
1252
1253         if (NULL == skb)
1254                 return NULL;
1255
1256         alignamount = RXBUF_ALIGNMENT -
1257                 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1));
1258
1259         /* We need the data buffer to be aligned properly.  We will reserve
1260          * as many bytes as needed to align the data properly
1261          */
1262         skb_reserve(skb, alignamount);
1263
1264         skb->dev = dev;
1265
1266         bdp->bufPtr = dma_map_single(NULL, skb->data,
1267                         priv->rx_buffer_size, DMA_FROM_DEVICE);
1268
1269         bdp->length = 0;
1270
1271         /* Mark the buffer empty */
1272         bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
1273
1274         return skb;
1275 }
1276
1277 static inline void count_errors(unsigned short status, struct gfar_private *priv)
1278 {
1279         struct net_device_stats *stats = &priv->stats;
1280         struct gfar_extra_stats *estats = &priv->extra_stats;
1281
1282         /* If the packet was truncated, none of the other errors
1283          * matter */
1284         if (status & RXBD_TRUNCATED) {
1285                 stats->rx_length_errors++;
1286
1287                 estats->rx_trunc++;
1288
1289                 return;
1290         }
1291         /* Count the errors, if there were any */
1292         if (status & (RXBD_LARGE | RXBD_SHORT)) {
1293                 stats->rx_length_errors++;
1294
1295                 if (status & RXBD_LARGE)
1296                         estats->rx_large++;
1297                 else
1298                         estats->rx_short++;
1299         }
1300         if (status & RXBD_NONOCTET) {
1301                 stats->rx_frame_errors++;
1302                 estats->rx_nonoctet++;
1303         }
1304         if (status & RXBD_CRCERR) {
1305                 estats->rx_crcerr++;
1306                 stats->rx_crc_errors++;
1307         }
1308         if (status & RXBD_OVERRUN) {
1309                 estats->rx_overrun++;
1310                 stats->rx_crc_errors++;
1311         }
1312 }
1313
1314 irqreturn_t gfar_receive(int irq, void *dev_id)
1315 {
1316         struct net_device *dev = (struct net_device *) dev_id;
1317         struct gfar_private *priv = netdev_priv(dev);
1318 #ifdef CONFIG_GFAR_NAPI
1319         u32 tempval;
1320 #else
1321         unsigned long flags;
1322 #endif
1323
1324         /* Clear IEVENT, so rx interrupt isn't called again
1325          * because of this interrupt */
1326         gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1327
1328         /* support NAPI */
1329 #ifdef CONFIG_GFAR_NAPI
1330         if (netif_rx_schedule_prep(dev)) {
1331                 tempval = gfar_read(&priv->regs->imask);
1332                 tempval &= IMASK_RX_DISABLED;
1333                 gfar_write(&priv->regs->imask, tempval);
1334
1335                 __netif_rx_schedule(dev);
1336         } else {
1337                 if (netif_msg_rx_err(priv))
1338                         printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1339                                 dev->name, gfar_read(&priv->regs->ievent),
1340                                 gfar_read(&priv->regs->imask));
1341         }
1342 #else
1343
1344         spin_lock_irqsave(&priv->rxlock, flags);
1345         gfar_clean_rx_ring(dev, priv->rx_ring_size);
1346
1347         /* If we are coalescing interrupts, update the timer */
1348         /* Otherwise, clear it */
1349         if (priv->rxcoalescing)
1350                 gfar_write(&priv->regs->rxic,
1351                            mk_ic_value(priv->rxcount, priv->rxtime));
1352         else
1353                 gfar_write(&priv->regs->rxic, 0);
1354
1355         spin_unlock_irqrestore(&priv->rxlock, flags);
1356 #endif
1357
1358         return IRQ_HANDLED;
1359 }
1360
1361 static inline int gfar_rx_vlan(struct sk_buff *skb,
1362                 struct vlan_group *vlgrp, unsigned short vlctl)
1363 {
1364 #ifdef CONFIG_GFAR_NAPI
1365         return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1366 #else
1367         return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1368 #endif
1369 }
1370
1371 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1372 {
1373         /* If valid headers were found, and valid sums
1374          * were verified, then we tell the kernel that no
1375          * checksumming is necessary.  Otherwise, it is */
1376         if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1377                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1378         else
1379                 skb->ip_summed = CHECKSUM_NONE;
1380 }
1381
1382
1383 static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1384 {
1385         struct rxfcb *fcb = (struct rxfcb *)skb->data;
1386
1387         /* Remove the FCB from the skb */
1388         skb_pull(skb, GMAC_FCB_LEN);
1389
1390         return fcb;
1391 }
1392
1393 /* gfar_process_frame() -- handle one incoming packet if skb
1394  * isn't NULL.  */
1395 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1396                 int length)
1397 {
1398         struct gfar_private *priv = netdev_priv(dev);
1399         struct rxfcb *fcb = NULL;
1400
1401         if (NULL == skb) {
1402                 if (netif_msg_rx_err(priv))
1403                         printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1404                 priv->stats.rx_dropped++;
1405                 priv->extra_stats.rx_skbmissing++;
1406         } else {
1407                 int ret;
1408
1409                 /* Prep the skb for the packet */
1410                 skb_put(skb, length);
1411
1412                 /* Grab the FCB if there is one */
1413                 if (gfar_uses_fcb(priv))
1414                         fcb = gfar_get_fcb(skb);
1415
1416                 /* Remove the padded bytes, if there are any */
1417                 if (priv->padding)
1418                         skb_pull(skb, priv->padding);
1419
1420                 if (priv->rx_csum_enable)
1421                         gfar_rx_checksum(skb, fcb);
1422
1423                 /* Tell the skb what kind of packet this is */
1424                 skb->protocol = eth_type_trans(skb, dev);
1425
1426                 /* Send the packet up the stack */
1427                 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1428                         ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1429                 else
1430                         ret = RECEIVE(skb);
1431
1432                 if (NET_RX_DROP == ret)
1433                         priv->extra_stats.kernel_dropped++;
1434         }
1435
1436         return 0;
1437 }
1438
1439 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1440  *   until the budget/quota has been reached. Returns the number
1441  *   of frames handled
1442  */
1443 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1444 {
1445         struct rxbd8 *bdp;
1446         struct sk_buff *skb;
1447         u16 pkt_len;
1448         int howmany = 0;
1449         struct gfar_private *priv = netdev_priv(dev);
1450
1451         /* Get the first full descriptor */
1452         bdp = priv->cur_rx;
1453
1454         while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1455                 skb = priv->rx_skbuff[priv->skb_currx];
1456
1457                 if (!(bdp->status &
1458                       (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
1459                        | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
1460                         /* Increment the number of packets */
1461                         priv->stats.rx_packets++;
1462                         howmany++;
1463
1464                         /* Remove the FCS from the packet length */
1465                         pkt_len = bdp->length - 4;
1466
1467                         gfar_process_frame(dev, skb, pkt_len);
1468
1469                         priv->stats.rx_bytes += pkt_len;
1470                 } else {
1471                         count_errors(bdp->status, priv);
1472
1473                         if (skb)
1474                                 dev_kfree_skb_any(skb);
1475
1476                         priv->rx_skbuff[priv->skb_currx] = NULL;
1477                 }
1478
1479                 dev->last_rx = jiffies;
1480
1481                 /* Clear the status flags for this buffer */
1482                 bdp->status &= ~RXBD_STATS;
1483
1484                 /* Add another skb for the future */
1485                 skb = gfar_new_skb(dev, bdp);
1486                 priv->rx_skbuff[priv->skb_currx] = skb;
1487
1488                 /* Update to the next pointer */
1489                 if (bdp->status & RXBD_WRAP)
1490                         bdp = priv->rx_bd_base;
1491                 else
1492                         bdp++;
1493
1494                 /* update to point at the next skb */
1495                 priv->skb_currx =
1496                     (priv->skb_currx +
1497                      1) & RX_RING_MOD_MASK(priv->rx_ring_size);
1498
1499         }
1500
1501         /* Update the current rxbd pointer to be the next one */
1502         priv->cur_rx = bdp;
1503
1504         return howmany;
1505 }
1506
1507 #ifdef CONFIG_GFAR_NAPI
1508 static int gfar_poll(struct net_device *dev, int *budget)
1509 {
1510         int howmany;
1511         struct gfar_private *priv = netdev_priv(dev);
1512         int rx_work_limit = *budget;
1513
1514         if (rx_work_limit > dev->quota)
1515                 rx_work_limit = dev->quota;
1516
1517         howmany = gfar_clean_rx_ring(dev, rx_work_limit);
1518
1519         dev->quota -= howmany;
1520         rx_work_limit -= howmany;
1521         *budget -= howmany;
1522
1523         if (rx_work_limit > 0) {
1524                 netif_rx_complete(dev);
1525
1526                 /* Clear the halt bit in RSTAT */
1527                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1528
1529                 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1530
1531                 /* If we are coalescing interrupts, update the timer */
1532                 /* Otherwise, clear it */
1533                 if (priv->rxcoalescing)
1534                         gfar_write(&priv->regs->rxic,
1535                                    mk_ic_value(priv->rxcount, priv->rxtime));
1536                 else
1537                         gfar_write(&priv->regs->rxic, 0);
1538         }
1539
1540         /* Return 1 if there's more work to do */
1541         return (rx_work_limit > 0) ? 0 : 1;
1542 }
1543 #endif
1544
1545 #ifdef CONFIG_NET_POLL_CONTROLLER
1546 /*
1547  * Polling 'interrupt' - used by things like netconsole to send skbs
1548  * without having to re-enable interrupts. It's not called while
1549  * the interrupt routine is executing.
1550  */
1551 static void gfar_netpoll(struct net_device *dev)
1552 {
1553         struct gfar_private *priv = netdev_priv(dev);
1554
1555         /* If the device has multiple interrupts, run tx/rx */
1556         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1557                 disable_irq(priv->interruptTransmit);
1558                 disable_irq(priv->interruptReceive);
1559                 disable_irq(priv->interruptError);
1560                 gfar_interrupt(priv->interruptTransmit, dev);
1561                 enable_irq(priv->interruptError);
1562                 enable_irq(priv->interruptReceive);
1563                 enable_irq(priv->interruptTransmit);
1564         } else {
1565                 disable_irq(priv->interruptTransmit);
1566                 gfar_interrupt(priv->interruptTransmit, dev);
1567                 enable_irq(priv->interruptTransmit);
1568         }
1569 }
1570 #endif
1571
1572 /* The interrupt handler for devices with one interrupt */
1573 static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1574 {
1575         struct net_device *dev = dev_id;
1576         struct gfar_private *priv = netdev_priv(dev);
1577
1578         /* Save ievent for future reference */
1579         u32 events = gfar_read(&priv->regs->ievent);
1580
1581         /* Clear IEVENT */
1582         gfar_write(&priv->regs->ievent, events);
1583
1584         /* Check for reception */
1585         if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0))
1586                 gfar_receive(irq, dev_id);
1587
1588         /* Check for transmit completion */
1589         if ((events & IEVENT_TXF) || (events & IEVENT_TXB))
1590                 gfar_transmit(irq, dev_id);
1591
1592         /* Update error statistics */
1593         if (events & IEVENT_TXE) {
1594                 priv->stats.tx_errors++;
1595
1596                 if (events & IEVENT_LC)
1597                         priv->stats.tx_window_errors++;
1598                 if (events & IEVENT_CRL)
1599                         priv->stats.tx_aborted_errors++;
1600                 if (events & IEVENT_XFUN) {
1601                         if (netif_msg_tx_err(priv))
1602                                 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", dev->name);
1603                         priv->stats.tx_dropped++;
1604                         priv->extra_stats.tx_underrun++;
1605
1606                         /* Reactivate the Tx Queues */
1607                         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1608                 }
1609         }
1610         if (events & IEVENT_BSY) {
1611                 priv->stats.rx_errors++;
1612                 priv->extra_stats.rx_bsy++;
1613
1614                 gfar_receive(irq, dev_id);
1615
1616 #ifndef CONFIG_GFAR_NAPI
1617                 /* Clear the halt bit in RSTAT */
1618                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1619 #endif
1620
1621                 if (netif_msg_rx_err(priv))
1622                         printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1623                                         dev->name,
1624                                         gfar_read(&priv->regs->rstat));
1625         }
1626         if (events & IEVENT_BABR) {
1627                 priv->stats.rx_errors++;
1628                 priv->extra_stats.rx_babr++;
1629
1630                 if (netif_msg_rx_err(priv))
1631                         printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1632         }
1633         if (events & IEVENT_EBERR) {
1634                 priv->extra_stats.eberr++;
1635                 if (netif_msg_rx_err(priv))
1636                         printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1637         }
1638         if ((events & IEVENT_RXC) && (netif_msg_rx_err(priv)))
1639                         printk(KERN_DEBUG "%s: control frame\n", dev->name);
1640
1641         if (events & IEVENT_BABT) {
1642                 priv->extra_stats.tx_babt++;
1643                 if (netif_msg_rx_err(priv))
1644                         printk(KERN_DEBUG "%s: babt error\n", dev->name);
1645         }
1646
1647         return IRQ_HANDLED;
1648 }
1649
1650 /* Called every time the controller might need to be made
1651  * aware of new link state.  The PHY code conveys this
1652  * information through variables in the phydev structure, and this
1653  * function converts those variables into the appropriate
1654  * register values, and can bring down the device if needed.
1655  */
1656 static void adjust_link(struct net_device *dev)
1657 {
1658         struct gfar_private *priv = netdev_priv(dev);
1659         struct gfar __iomem *regs = priv->regs;
1660         unsigned long flags;
1661         struct phy_device *phydev = priv->phydev;
1662         int new_state = 0;
1663
1664         spin_lock_irqsave(&priv->txlock, flags);
1665         if (phydev->link) {
1666                 u32 tempval = gfar_read(&regs->maccfg2);
1667                 u32 ecntrl = gfar_read(&regs->ecntrl);
1668
1669                 /* Now we make sure that we can be in full duplex mode.
1670                  * If not, we operate in half-duplex mode. */
1671                 if (phydev->duplex != priv->oldduplex) {
1672                         new_state = 1;
1673                         if (!(phydev->duplex))
1674                                 tempval &= ~(MACCFG2_FULL_DUPLEX);
1675                         else
1676                                 tempval |= MACCFG2_FULL_DUPLEX;
1677
1678                         priv->oldduplex = phydev->duplex;
1679                 }
1680
1681                 if (phydev->speed != priv->oldspeed) {
1682                         new_state = 1;
1683                         switch (phydev->speed) {
1684                         case 1000:
1685                                 tempval =
1686                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1687                                 break;
1688                         case 100:
1689                         case 10:
1690                                 tempval =
1691                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1692
1693                                 /* Reduced mode distinguishes
1694                                  * between 10 and 100 */
1695                                 if (phydev->speed == SPEED_100)
1696                                         ecntrl |= ECNTRL_R100;
1697                                 else
1698                                         ecntrl &= ~(ECNTRL_R100);
1699                                 break;
1700                         default:
1701                                 if (netif_msg_link(priv))
1702                                         printk(KERN_WARNING
1703                                                 "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
1704                                                 dev->name, phydev->speed);
1705                                 break;
1706                         }
1707
1708                         priv->oldspeed = phydev->speed;
1709                 }
1710
1711                 gfar_write(&regs->maccfg2, tempval);
1712                 gfar_write(&regs->ecntrl, ecntrl);
1713
1714                 if (!priv->oldlink) {
1715                         new_state = 1;
1716                         priv->oldlink = 1;
1717                         netif_schedule(dev);
1718                 }
1719         } else if (priv->oldlink) {
1720                 new_state = 1;
1721                 priv->oldlink = 0;
1722                 priv->oldspeed = 0;
1723                 priv->oldduplex = -1;
1724         }
1725
1726         if (new_state && netif_msg_link(priv))
1727                 phy_print_status(phydev);
1728
1729         spin_unlock_irqrestore(&priv->txlock, flags);
1730 }
1731
1732 /* Update the hash table based on the current list of multicast
1733  * addresses we subscribe to.  Also, change the promiscuity of
1734  * the device based on the flags (this function is called
1735  * whenever dev->flags is changed */
1736 static void gfar_set_multi(struct net_device *dev)
1737 {
1738         struct dev_mc_list *mc_ptr;
1739         struct gfar_private *priv = netdev_priv(dev);
1740         struct gfar __iomem *regs = priv->regs;
1741         u32 tempval;
1742
1743         if(dev->flags & IFF_PROMISC) {
1744                 /* Set RCTRL to PROM */
1745                 tempval = gfar_read(&regs->rctrl);
1746                 tempval |= RCTRL_PROM;
1747                 gfar_write(&regs->rctrl, tempval);
1748         } else {
1749                 /* Set RCTRL to not PROM */
1750                 tempval = gfar_read(&regs->rctrl);
1751                 tempval &= ~(RCTRL_PROM);
1752                 gfar_write(&regs->rctrl, tempval);
1753         }
1754
1755         if(dev->flags & IFF_ALLMULTI) {
1756                 /* Set the hash to rx all multicast frames */
1757                 gfar_write(&regs->igaddr0, 0xffffffff);
1758                 gfar_write(&regs->igaddr1, 0xffffffff);
1759                 gfar_write(&regs->igaddr2, 0xffffffff);
1760                 gfar_write(&regs->igaddr3, 0xffffffff);
1761                 gfar_write(&regs->igaddr4, 0xffffffff);
1762                 gfar_write(&regs->igaddr5, 0xffffffff);
1763                 gfar_write(&regs->igaddr6, 0xffffffff);
1764                 gfar_write(&regs->igaddr7, 0xffffffff);
1765                 gfar_write(&regs->gaddr0, 0xffffffff);
1766                 gfar_write(&regs->gaddr1, 0xffffffff);
1767                 gfar_write(&regs->gaddr2, 0xffffffff);
1768                 gfar_write(&regs->gaddr3, 0xffffffff);
1769                 gfar_write(&regs->gaddr4, 0xffffffff);
1770                 gfar_write(&regs->gaddr5, 0xffffffff);
1771                 gfar_write(&regs->gaddr6, 0xffffffff);
1772                 gfar_write(&regs->gaddr7, 0xffffffff);
1773         } else {
1774                 int em_num;
1775                 int idx;
1776
1777                 /* zero out the hash */
1778                 gfar_write(&regs->igaddr0, 0x0);
1779                 gfar_write(&regs->igaddr1, 0x0);
1780                 gfar_write(&regs->igaddr2, 0x0);
1781                 gfar_write(&regs->igaddr3, 0x0);
1782                 gfar_write(&regs->igaddr4, 0x0);
1783                 gfar_write(&regs->igaddr5, 0x0);
1784                 gfar_write(&regs->igaddr6, 0x0);
1785                 gfar_write(&regs->igaddr7, 0x0);
1786                 gfar_write(&regs->gaddr0, 0x0);
1787                 gfar_write(&regs->gaddr1, 0x0);
1788                 gfar_write(&regs->gaddr2, 0x0);
1789                 gfar_write(&regs->gaddr3, 0x0);
1790                 gfar_write(&regs->gaddr4, 0x0);
1791                 gfar_write(&regs->gaddr5, 0x0);
1792                 gfar_write(&regs->gaddr6, 0x0);
1793                 gfar_write(&regs->gaddr7, 0x0);
1794
1795                 /* If we have extended hash tables, we need to
1796                  * clear the exact match registers to prepare for
1797                  * setting them */
1798                 if (priv->extended_hash) {
1799                         em_num = GFAR_EM_NUM + 1;
1800                         gfar_clear_exact_match(dev);
1801                         idx = 1;
1802                 } else {
1803                         idx = 0;
1804                         em_num = 0;
1805                 }
1806
1807                 if(dev->mc_count == 0)
1808                         return;
1809
1810                 /* Parse the list, and set the appropriate bits */
1811                 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1812                         if (idx < em_num) {
1813                                 gfar_set_mac_for_addr(dev, idx,
1814                                                 mc_ptr->dmi_addr);
1815                                 idx++;
1816                         } else
1817                                 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1818                 }
1819         }
1820
1821         return;
1822 }
1823
1824
1825 /* Clears each of the exact match registers to zero, so they
1826  * don't interfere with normal reception */
1827 static void gfar_clear_exact_match(struct net_device *dev)
1828 {
1829         int idx;
1830         u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1831
1832         for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1833                 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1834 }
1835
1836 /* Set the appropriate hash bit for the given addr */
1837 /* The algorithm works like so:
1838  * 1) Take the Destination Address (ie the multicast address), and
1839  * do a CRC on it (little endian), and reverse the bits of the
1840  * result.
1841  * 2) Use the 8 most significant bits as a hash into a 256-entry
1842  * table.  The table is controlled through 8 32-bit registers:
1843  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
1844  * gaddr7.  This means that the 3 most significant bits in the
1845  * hash index which gaddr register to use, and the 5 other bits
1846  * indicate which bit (assuming an IBM numbering scheme, which
1847  * for PowerPC (tm) is usually the case) in the register holds
1848  * the entry. */
1849 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1850 {
1851         u32 tempval;
1852         struct gfar_private *priv = netdev_priv(dev);
1853         u32 result = ether_crc(MAC_ADDR_LEN, addr);
1854         int width = priv->hash_width;
1855         u8 whichbit = (result >> (32 - width)) & 0x1f;
1856         u8 whichreg = result >> (32 - width + 5);
1857         u32 value = (1 << (31-whichbit));
1858
1859         tempval = gfar_read(priv->hash_regs[whichreg]);
1860         tempval |= value;
1861         gfar_write(priv->hash_regs[whichreg], tempval);
1862
1863         return;
1864 }
1865
1866
1867 /* There are multiple MAC Address register pairs on some controllers
1868  * This function sets the numth pair to a given address
1869  */
1870 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
1871 {
1872         struct gfar_private *priv = netdev_priv(dev);
1873         int idx;
1874         char tmpbuf[MAC_ADDR_LEN];
1875         u32 tempval;
1876         u32 __iomem *macptr = &priv->regs->macstnaddr1;
1877
1878         macptr += num*2;
1879
1880         /* Now copy it into the mac registers backwards, cuz */
1881         /* little endian is silly */
1882         for (idx = 0; idx < MAC_ADDR_LEN; idx++)
1883                 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
1884
1885         gfar_write(macptr, *((u32 *) (tmpbuf)));
1886
1887         tempval = *((u32 *) (tmpbuf + 4));
1888
1889         gfar_write(macptr+1, tempval);
1890 }
1891
1892 /* GFAR error interrupt handler */
1893 static irqreturn_t gfar_error(int irq, void *dev_id)
1894 {
1895         struct net_device *dev = dev_id;
1896         struct gfar_private *priv = netdev_priv(dev);
1897
1898         /* Save ievent for future reference */
1899         u32 events = gfar_read(&priv->regs->ievent);
1900
1901         /* Clear IEVENT */
1902         gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1903
1904         /* Hmm... */
1905         if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
1906                 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1907                                 dev->name, events, gfar_read(&priv->regs->imask));
1908
1909         /* Update the error counters */
1910         if (events & IEVENT_TXE) {
1911                 priv->stats.tx_errors++;
1912
1913                 if (events & IEVENT_LC)
1914                         priv->stats.tx_window_errors++;
1915                 if (events & IEVENT_CRL)
1916                         priv->stats.tx_aborted_errors++;
1917                 if (events & IEVENT_XFUN) {
1918                         if (netif_msg_tx_err(priv))
1919                                 printk(KERN_DEBUG "%s: underrun.  packet dropped.\n",
1920                                                 dev->name);
1921                         priv->stats.tx_dropped++;
1922                         priv->extra_stats.tx_underrun++;
1923
1924                         /* Reactivate the Tx Queues */
1925                         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1926                 }
1927                 if (netif_msg_tx_err(priv))
1928                         printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1929         }
1930         if (events & IEVENT_BSY) {
1931                 priv->stats.rx_errors++;
1932                 priv->extra_stats.rx_bsy++;
1933
1934                 gfar_receive(irq, dev_id);
1935
1936 #ifndef CONFIG_GFAR_NAPI
1937                 /* Clear the halt bit in RSTAT */
1938                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1939 #endif
1940
1941                 if (netif_msg_rx_err(priv))
1942                         printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1943                                         dev->name,
1944                                         gfar_read(&priv->regs->rstat));
1945         }
1946         if (events & IEVENT_BABR) {
1947                 priv->stats.rx_errors++;
1948                 priv->extra_stats.rx_babr++;
1949
1950                 if (netif_msg_rx_err(priv))
1951                         printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1952         }
1953         if (events & IEVENT_EBERR) {
1954                 priv->extra_stats.eberr++;
1955                 if (netif_msg_rx_err(priv))
1956                         printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1957         }
1958         if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
1959                 if (netif_msg_rx_status(priv))
1960                         printk(KERN_DEBUG "%s: control frame\n", dev->name);
1961
1962         if (events & IEVENT_BABT) {
1963                 priv->extra_stats.tx_babt++;
1964                 if (netif_msg_tx_err(priv))
1965                         printk(KERN_DEBUG "%s: babt error\n", dev->name);
1966         }
1967         return IRQ_HANDLED;
1968 }
1969
1970 /* Structure for a device driver */
1971 static struct platform_driver gfar_driver = {
1972         .probe = gfar_probe,
1973         .remove = gfar_remove,
1974         .driver = {
1975                 .name = "fsl-gianfar",
1976         },
1977 };
1978
1979 static int __init gfar_init(void)
1980 {
1981         int err = gfar_mdio_init();
1982
1983         if (err)
1984                 return err;
1985
1986         err = platform_driver_register(&gfar_driver);
1987
1988         if (err)
1989                 gfar_mdio_exit();
1990
1991         return err;
1992 }
1993
1994 static void __exit gfar_exit(void)
1995 {
1996         platform_driver_unregister(&gfar_driver);
1997         gfar_mdio_exit();
1998 }
1999
2000 module_init(gfar_init);
2001 module_exit(gfar_exit);
2002