2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/ptrace.h>
26 #include <linux/errno.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/spinlock.h>
37 #include <linux/workqueue.h>
38 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/clk.h>
42 #include <linux/platform_device.h>
44 #include <asm/cacheflush.h>
46 #ifndef CONFIG_ARCH_MXC
47 #include <asm/coldfire.h>
48 #include <asm/mcfsim.h>
53 #ifdef CONFIG_ARCH_MXC
54 #include <mach/hardware.h>
55 #define FEC_ALIGNMENT 0xf
57 #define FEC_ALIGNMENT 0x3
61 * Define the fixed address of the FEC hardware.
63 #if defined(CONFIG_M5272)
64 #define HAVE_mii_link_interrupt
66 static unsigned char fec_mac_default[] = {
67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
71 * Some hardware gets it MAC address out of local flash memory.
72 * if this is non-zero then assume it is the address to get MAC from.
74 #if defined(CONFIG_NETtel)
75 #define FEC_FLASHMAC 0xf0006006
76 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
77 #define FEC_FLASHMAC 0xf0006000
78 #elif defined(CONFIG_CANCam)
79 #define FEC_FLASHMAC 0xf0020000
80 #elif defined (CONFIG_M5272C3)
81 #define FEC_FLASHMAC (0xffe04000 + 4)
82 #elif defined(CONFIG_MOD5272)
83 #define FEC_FLASHMAC 0xffc0406b
85 #define FEC_FLASHMAC 0
87 #endif /* CONFIG_M5272 */
89 /* Forward declarations of some structures to support different PHYs
94 void (*funct)(uint mii_reg, struct net_device *dev);
101 const phy_cmd_t *config;
102 const phy_cmd_t *startup;
103 const phy_cmd_t *ack_int;
104 const phy_cmd_t *shutdown;
107 /* The number of Tx and Rx buffers. These are allocated from the page
108 * pool. The code may assume these are power of two, so it it best
109 * to keep them that size.
110 * We don't need to allocate pages for the transmitter. We just use
111 * the skbuffer directly.
113 #define FEC_ENET_RX_PAGES 8
114 #define FEC_ENET_RX_FRSIZE 2048
115 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
116 #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
117 #define FEC_ENET_TX_FRSIZE 2048
118 #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
119 #define TX_RING_SIZE 16 /* Must be power of two */
120 #define TX_RING_MOD_MASK 15 /* for this to work */
122 #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
123 #error "FEC: descriptor ring size constants too large"
126 /* Interrupt events/masks.
128 #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
129 #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
130 #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
131 #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
132 #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
133 #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
134 #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
135 #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
136 #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
137 #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
139 /* The FEC stores dest/src/type, data, and checksum for receive packets.
141 #define PKT_MAXBUF_SIZE 1518
142 #define PKT_MINBUF_SIZE 64
143 #define PKT_MAXBLR_SIZE 1520
147 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
148 * size bits. Other FEC hardware does not, so we need to take that into
149 * account when setting it.
151 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
152 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
153 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
155 #define OPT_FRAME_SIZE 0
158 /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
159 * tx_bd_base always point to the base of the buffer descriptors. The
160 * cur_rx and cur_tx point to the currently available buffer.
161 * The dirty_tx tracks the current buffer that is being sent by the
162 * controller. The cur_tx and dirty_tx are equal under both completely
163 * empty and completely full conditions. The empty/ready indicator in
164 * the buffer descriptor determines the actual condition.
166 struct fec_enet_private {
167 /* Hardware registers of the FEC device */
170 struct net_device *netdev;
174 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
175 unsigned char *tx_bounce[TX_RING_SIZE];
176 struct sk_buff* tx_skbuff[TX_RING_SIZE];
180 /* CPM dual port RAM relative addresses.
183 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
185 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
186 cbd_t *dirty_tx; /* The ring entries to be free()ed. */
188 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
190 /* hold while accessing the mii_list_t() elements */
197 phy_info_t const *phy;
198 struct work_struct phy_task;
201 uint mii_phy_task_queued;
212 static int fec_enet_open(struct net_device *dev);
213 static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
214 static void fec_enet_mii(struct net_device *dev);
215 static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
216 static void fec_enet_tx(struct net_device *dev);
217 static void fec_enet_rx(struct net_device *dev);
218 static int fec_enet_close(struct net_device *dev);
219 static void set_multicast_list(struct net_device *dev);
220 static void fec_restart(struct net_device *dev, int duplex);
221 static void fec_stop(struct net_device *dev);
222 static void fec_set_mac_address(struct net_device *dev);
225 /* MII processing. We keep this as simple as possible. Requests are
226 * placed on the list (if there is room). When the request is finished
227 * by the MII, an optional function may be called.
229 typedef struct mii_list {
231 void (*mii_func)(uint val, struct net_device *dev);
232 struct mii_list *mii_next;
236 static mii_list_t mii_cmds[NMII];
237 static mii_list_t *mii_free;
238 static mii_list_t *mii_head;
239 static mii_list_t *mii_tail;
241 static int mii_queue(struct net_device *dev, int request,
242 void (*func)(uint, struct net_device *));
244 /* Make MII read/write commands for the FEC.
246 #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
247 #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
251 /* Transmitter timeout.
253 #define TX_TIMEOUT (2*HZ)
255 /* Register definitions for the PHY.
258 #define MII_REG_CR 0 /* Control Register */
259 #define MII_REG_SR 1 /* Status Register */
260 #define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
261 #define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
262 #define MII_REG_ANAR 4 /* A-N Advertisement Register */
263 #define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
264 #define MII_REG_ANER 6 /* A-N Expansion Register */
265 #define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
266 #define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
268 /* values for phy_status */
270 #define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
271 #define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
272 #define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
273 #define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
274 #define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
275 #define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
276 #define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
278 #define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
279 #define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
280 #define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
281 #define PHY_STAT_SPMASK 0xf000 /* mask for speed */
282 #define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
283 #define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
284 #define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
285 #define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
289 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
291 struct fec_enet_private *fep = netdev_priv(dev);
293 unsigned short status;
297 /* Link is down or autonegotiation is in progress. */
301 spin_lock_irqsave(&fep->hw_lock, flags);
302 /* Fill in a Tx ring entry */
305 status = bdp->cbd_sc;
306 #ifndef final_version
307 if (status & BD_ENET_TX_READY) {
308 /* Ooops. All transmit buffers are full. Bail out.
309 * This should not happen, since dev->tbusy should be set.
311 printk("%s: tx queue full!.\n", dev->name);
312 spin_unlock_irqrestore(&fep->hw_lock, flags);
317 /* Clear all of the status flags.
319 status &= ~BD_ENET_TX_STATS;
321 /* Set buffer length and buffer pointer.
323 bdp->cbd_bufaddr = __pa(skb->data);
324 bdp->cbd_datlen = skb->len;
327 * On some FEC implementations data must be aligned on
328 * 4-byte boundaries. Use bounce buffers to copy data
329 * and get it aligned. Ugh.
331 if (bdp->cbd_bufaddr & FEC_ALIGNMENT) {
333 index = bdp - fep->tx_bd_base;
334 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
335 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
340 fep->tx_skbuff[fep->skb_cur] = skb;
342 dev->stats.tx_bytes += skb->len;
343 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
345 /* Push the data cache so the CPM does not get stale memory
348 dma_sync_single(NULL, bdp->cbd_bufaddr,
349 bdp->cbd_datlen, DMA_TO_DEVICE);
351 /* Send it on its way. Tell FEC it's ready, interrupt when done,
352 * it's the last BD of the frame, and to put the CRC on the end.
355 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
356 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
357 bdp->cbd_sc = status;
359 dev->trans_start = jiffies;
361 /* Trigger transmission start */
362 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
364 /* If this was the last BD in the ring, start at the beginning again.
366 if (status & BD_ENET_TX_WRAP) {
367 bdp = fep->tx_bd_base;
372 if (bdp == fep->dirty_tx) {
374 netif_stop_queue(dev);
377 fep->cur_tx = (cbd_t *)bdp;
379 spin_unlock_irqrestore(&fep->hw_lock, flags);
385 fec_timeout(struct net_device *dev)
387 struct fec_enet_private *fep = netdev_priv(dev);
389 printk("%s: transmit timed out.\n", dev->name);
390 dev->stats.tx_errors++;
391 #ifndef final_version
396 printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n",
397 (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "",
398 (unsigned long)fep->dirty_tx,
399 (unsigned long)fep->cur_rx);
401 bdp = fep->tx_bd_base;
402 printk(" tx: %u buffers\n", TX_RING_SIZE);
403 for (i = 0 ; i < TX_RING_SIZE; i++) {
404 printk(" %08x: %04x %04x %08x\n",
408 (int) bdp->cbd_bufaddr);
412 bdp = fep->rx_bd_base;
413 printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE);
414 for (i = 0 ; i < RX_RING_SIZE; i++) {
415 printk(" %08x: %04x %04x %08x\n",
419 (int) bdp->cbd_bufaddr);
424 fec_restart(dev, fep->full_duplex);
425 netif_wake_queue(dev);
428 /* The interrupt handler.
429 * This is called from the MPC core interrupt.
432 fec_enet_interrupt(int irq, void * dev_id)
434 struct net_device *dev = dev_id;
435 struct fec_enet_private *fep = netdev_priv(dev);
437 irqreturn_t ret = IRQ_NONE;
439 /* Get the interrupt events that caused us to be here. */
441 int_events = readl(fep->hwp + FEC_IEVENT);
442 writel(int_events, fep->hwp + FEC_IEVENT);
444 /* Handle receive event in its own function. */
445 if (int_events & FEC_ENET_RXF) {
450 /* Transmit OK, or non-fatal error. Update the buffer
451 * descriptors. FEC handles all errors, we just discover
452 * them as part of the transmit process.
454 if (int_events & FEC_ENET_TXF) {
459 if (int_events & FEC_ENET_MII) {
464 } while (int_events);
471 fec_enet_tx(struct net_device *dev)
473 struct fec_enet_private *fep;
475 unsigned short status;
478 fep = netdev_priv(dev);
479 spin_lock_irq(&fep->hw_lock);
482 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
483 if (bdp == fep->cur_tx && fep->tx_full == 0) break;
485 skb = fep->tx_skbuff[fep->skb_dirty];
486 /* Check for errors. */
487 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
488 BD_ENET_TX_RL | BD_ENET_TX_UN |
490 dev->stats.tx_errors++;
491 if (status & BD_ENET_TX_HB) /* No heartbeat */
492 dev->stats.tx_heartbeat_errors++;
493 if (status & BD_ENET_TX_LC) /* Late collision */
494 dev->stats.tx_window_errors++;
495 if (status & BD_ENET_TX_RL) /* Retrans limit */
496 dev->stats.tx_aborted_errors++;
497 if (status & BD_ENET_TX_UN) /* Underrun */
498 dev->stats.tx_fifo_errors++;
499 if (status & BD_ENET_TX_CSL) /* Carrier lost */
500 dev->stats.tx_carrier_errors++;
502 dev->stats.tx_packets++;
505 #ifndef final_version
506 if (status & BD_ENET_TX_READY)
507 printk("HEY! Enet xmit interrupt and TX_READY.\n");
509 /* Deferred means some collisions occurred during transmit,
510 * but we eventually sent the packet OK.
512 if (status & BD_ENET_TX_DEF)
513 dev->stats.collisions++;
515 /* Free the sk buffer associated with this last transmit.
517 dev_kfree_skb_any(skb);
518 fep->tx_skbuff[fep->skb_dirty] = NULL;
519 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
521 /* Update pointer to next buffer descriptor to be transmitted.
523 if (status & BD_ENET_TX_WRAP)
524 bdp = fep->tx_bd_base;
528 /* Since we have freed up a buffer, the ring is no longer
533 if (netif_queue_stopped(dev))
534 netif_wake_queue(dev);
537 fep->dirty_tx = (cbd_t *)bdp;
538 spin_unlock_irq(&fep->hw_lock);
542 /* During a receive, the cur_rx points to the current incoming buffer.
543 * When we update through the ring, if the next incoming buffer has
544 * not been given to the system, we just set the empty indicator,
545 * effectively tossing the packet.
548 fec_enet_rx(struct net_device *dev)
550 struct fec_enet_private *fep = netdev_priv(dev);
552 unsigned short status;
561 spin_lock_irq(&fep->hw_lock);
563 /* First, grab all of the stats for the incoming packet.
564 * These get messed up if we get called due to a busy condition.
568 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
570 #ifndef final_version
571 /* Since we have allocated space to hold a complete frame,
572 * the last indicator should be set.
574 if ((status & BD_ENET_RX_LAST) == 0)
575 printk("FEC ENET: rcv is not +last\n");
579 goto rx_processing_done;
581 /* Check for errors. */
582 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
583 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
584 dev->stats.rx_errors++;
585 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
586 /* Frame too long or too short. */
587 dev->stats.rx_length_errors++;
589 if (status & BD_ENET_RX_NO) /* Frame alignment */
590 dev->stats.rx_frame_errors++;
591 if (status & BD_ENET_RX_CR) /* CRC Error */
592 dev->stats.rx_crc_errors++;
593 if (status & BD_ENET_RX_OV) /* FIFO overrun */
594 dev->stats.rx_fifo_errors++;
597 /* Report late collisions as a frame error.
598 * On this error, the BD is closed, but we don't know what we
599 * have in the buffer. So, just drop this frame on the floor.
601 if (status & BD_ENET_RX_CL) {
602 dev->stats.rx_errors++;
603 dev->stats.rx_frame_errors++;
604 goto rx_processing_done;
607 /* Process the incoming frame.
609 dev->stats.rx_packets++;
610 pkt_len = bdp->cbd_datlen;
611 dev->stats.rx_bytes += pkt_len;
612 data = (__u8*)__va(bdp->cbd_bufaddr);
614 dma_sync_single(NULL, (unsigned long)__pa(data),
615 pkt_len - 4, DMA_FROM_DEVICE);
617 /* This does 16 byte alignment, exactly what we need.
618 * The packet length includes FCS, but we don't want to
619 * include that when passing upstream as it messes up
620 * bridging applications.
622 skb = dev_alloc_skb(pkt_len-4);
625 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
626 dev->stats.rx_dropped++;
628 skb_put(skb,pkt_len-4); /* Make room */
629 skb_copy_to_linear_data(skb, data, pkt_len-4);
630 skb->protocol=eth_type_trans(skb,dev);
635 /* Clear the status flags for this buffer.
637 status &= ~BD_ENET_RX_STATS;
639 /* Mark the buffer empty.
641 status |= BD_ENET_RX_EMPTY;
642 bdp->cbd_sc = status;
644 /* Update BD pointer to next entry.
646 if (status & BD_ENET_RX_WRAP)
647 bdp = fep->rx_bd_base;
652 /* Doing this here will keep the FEC running while we process
653 * incoming frames. On a heavily loaded network, we should be
654 * able to keep up at the expense of system resources.
656 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
658 } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */
659 fep->cur_rx = (cbd_t *)bdp;
662 /* Doing this here will allow us to process all frames in the
663 * ring before the FEC is allowed to put more there. On a heavily
664 * loaded network, some frames may be lost. Unfortunately, this
665 * increases the interrupt overhead since we can potentially work
666 * our way back to the interrupt return only to come right back
669 fecp->fec_r_des_active = 0;
672 spin_unlock_irq(&fep->hw_lock);
676 /* called from interrupt context */
678 fec_enet_mii(struct net_device *dev)
680 struct fec_enet_private *fep;
683 fep = netdev_priv(dev);
684 spin_lock_irq(&fep->mii_lock);
686 if ((mip = mii_head) == NULL) {
687 printk("MII and no head!\n");
691 if (mip->mii_func != NULL)
692 (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
694 mii_head = mip->mii_next;
695 mip->mii_next = mii_free;
698 if ((mip = mii_head) != NULL)
699 writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
702 spin_unlock_irq(&fep->mii_lock);
706 mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *))
708 struct fec_enet_private *fep;
713 /* Add PHY address to register command.
715 fep = netdev_priv(dev);
716 spin_lock_irqsave(&fep->mii_lock, flags);
718 regval |= fep->phy_addr << 23;
721 if ((mip = mii_free) != NULL) {
722 mii_free = mip->mii_next;
723 mip->mii_regval = regval;
724 mip->mii_func = func;
725 mip->mii_next = NULL;
727 mii_tail->mii_next = mip;
730 mii_head = mii_tail = mip;
731 writel(regval, fep->hwp + FEC_MII_DATA);
737 spin_unlock_irqrestore(&fep->mii_lock, flags);
741 static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
746 for (; c->mii_data != mk_mii_end; c++)
747 mii_queue(dev, c->mii_data, c->funct);
750 static void mii_parse_sr(uint mii_reg, struct net_device *dev)
752 struct fec_enet_private *fep = netdev_priv(dev);
753 volatile uint *s = &(fep->phy_status);
756 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
758 if (mii_reg & 0x0004)
759 status |= PHY_STAT_LINK;
760 if (mii_reg & 0x0010)
761 status |= PHY_STAT_FAULT;
762 if (mii_reg & 0x0020)
763 status |= PHY_STAT_ANC;
767 static void mii_parse_cr(uint mii_reg, struct net_device *dev)
769 struct fec_enet_private *fep = netdev_priv(dev);
770 volatile uint *s = &(fep->phy_status);
773 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
775 if (mii_reg & 0x1000)
776 status |= PHY_CONF_ANE;
777 if (mii_reg & 0x4000)
778 status |= PHY_CONF_LOOP;
782 static void mii_parse_anar(uint mii_reg, struct net_device *dev)
784 struct fec_enet_private *fep = netdev_priv(dev);
785 volatile uint *s = &(fep->phy_status);
788 status = *s & ~(PHY_CONF_SPMASK);
790 if (mii_reg & 0x0020)
791 status |= PHY_CONF_10HDX;
792 if (mii_reg & 0x0040)
793 status |= PHY_CONF_10FDX;
794 if (mii_reg & 0x0080)
795 status |= PHY_CONF_100HDX;
796 if (mii_reg & 0x00100)
797 status |= PHY_CONF_100FDX;
801 /* ------------------------------------------------------------------------- */
802 /* The Level one LXT970 is used by many boards */
804 #define MII_LXT970_MIRROR 16 /* Mirror register */
805 #define MII_LXT970_IER 17 /* Interrupt Enable Register */
806 #define MII_LXT970_ISR 18 /* Interrupt Status Register */
807 #define MII_LXT970_CONFIG 19 /* Configuration Register */
808 #define MII_LXT970_CSR 20 /* Chip Status Register */
810 static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
812 struct fec_enet_private *fep = netdev_priv(dev);
813 volatile uint *s = &(fep->phy_status);
816 status = *s & ~(PHY_STAT_SPMASK);
817 if (mii_reg & 0x0800) {
818 if (mii_reg & 0x1000)
819 status |= PHY_STAT_100FDX;
821 status |= PHY_STAT_100HDX;
823 if (mii_reg & 0x1000)
824 status |= PHY_STAT_10FDX;
826 status |= PHY_STAT_10HDX;
831 static phy_cmd_t const phy_cmd_lxt970_config[] = {
832 { mk_mii_read(MII_REG_CR), mii_parse_cr },
833 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
836 static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
837 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
838 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
841 static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
842 /* read SR and ISR to acknowledge */
843 { mk_mii_read(MII_REG_SR), mii_parse_sr },
844 { mk_mii_read(MII_LXT970_ISR), NULL },
846 /* find out the current status */
847 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
850 static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
851 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
854 static phy_info_t const phy_info_lxt970 = {
857 .config = phy_cmd_lxt970_config,
858 .startup = phy_cmd_lxt970_startup,
859 .ack_int = phy_cmd_lxt970_ack_int,
860 .shutdown = phy_cmd_lxt970_shutdown
863 /* ------------------------------------------------------------------------- */
864 /* The Level one LXT971 is used on some of my custom boards */
866 /* register definitions for the 971 */
868 #define MII_LXT971_PCR 16 /* Port Control Register */
869 #define MII_LXT971_SR2 17 /* Status Register 2 */
870 #define MII_LXT971_IER 18 /* Interrupt Enable Register */
871 #define MII_LXT971_ISR 19 /* Interrupt Status Register */
872 #define MII_LXT971_LCR 20 /* LED Control Register */
873 #define MII_LXT971_TCR 30 /* Transmit Control Register */
876 * I had some nice ideas of running the MDIO faster...
877 * The 971 should support 8MHz and I tried it, but things acted really
878 * weird, so 2.5 MHz ought to be enough for anyone...
881 static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
883 struct fec_enet_private *fep = netdev_priv(dev);
884 volatile uint *s = &(fep->phy_status);
887 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
889 if (mii_reg & 0x0400) {
891 status |= PHY_STAT_LINK;
895 if (mii_reg & 0x0080)
896 status |= PHY_STAT_ANC;
897 if (mii_reg & 0x4000) {
898 if (mii_reg & 0x0200)
899 status |= PHY_STAT_100FDX;
901 status |= PHY_STAT_100HDX;
903 if (mii_reg & 0x0200)
904 status |= PHY_STAT_10FDX;
906 status |= PHY_STAT_10HDX;
908 if (mii_reg & 0x0008)
909 status |= PHY_STAT_FAULT;
914 static phy_cmd_t const phy_cmd_lxt971_config[] = {
915 /* limit to 10MBit because my prototype board
916 * doesn't work with 100. */
917 { mk_mii_read(MII_REG_CR), mii_parse_cr },
918 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
919 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
922 static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
923 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
924 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
925 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
926 /* Somehow does the 971 tell me that the link is down
927 * the first read after power-up.
928 * read here to get a valid value in ack_int */
929 { mk_mii_read(MII_REG_SR), mii_parse_sr },
932 static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
933 /* acknowledge the int before reading status ! */
934 { mk_mii_read(MII_LXT971_ISR), NULL },
935 /* find out the current status */
936 { mk_mii_read(MII_REG_SR), mii_parse_sr },
937 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
940 static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
941 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
944 static phy_info_t const phy_info_lxt971 = {
947 .config = phy_cmd_lxt971_config,
948 .startup = phy_cmd_lxt971_startup,
949 .ack_int = phy_cmd_lxt971_ack_int,
950 .shutdown = phy_cmd_lxt971_shutdown
953 /* ------------------------------------------------------------------------- */
954 /* The Quality Semiconductor QS6612 is used on the RPX CLLF */
956 /* register definitions */
958 #define MII_QS6612_MCR 17 /* Mode Control Register */
959 #define MII_QS6612_FTR 27 /* Factory Test Register */
960 #define MII_QS6612_MCO 28 /* Misc. Control Register */
961 #define MII_QS6612_ISR 29 /* Interrupt Source Register */
962 #define MII_QS6612_IMR 30 /* Interrupt Mask Register */
963 #define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
965 static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
967 struct fec_enet_private *fep = netdev_priv(dev);
968 volatile uint *s = &(fep->phy_status);
971 status = *s & ~(PHY_STAT_SPMASK);
973 switch((mii_reg >> 2) & 7) {
974 case 1: status |= PHY_STAT_10HDX; break;
975 case 2: status |= PHY_STAT_100HDX; break;
976 case 5: status |= PHY_STAT_10FDX; break;
977 case 6: status |= PHY_STAT_100FDX; break;
983 static phy_cmd_t const phy_cmd_qs6612_config[] = {
984 /* The PHY powers up isolated on the RPX,
985 * so send a command to allow operation.
987 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
989 /* parse cr and anar to get some info */
990 { mk_mii_read(MII_REG_CR), mii_parse_cr },
991 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
994 static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
995 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
996 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
999 static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
1000 /* we need to read ISR, SR and ANER to acknowledge */
1001 { mk_mii_read(MII_QS6612_ISR), NULL },
1002 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1003 { mk_mii_read(MII_REG_ANER), NULL },
1005 /* read pcr to get info */
1006 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
1009 static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
1010 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
1013 static phy_info_t const phy_info_qs6612 = {
1016 .config = phy_cmd_qs6612_config,
1017 .startup = phy_cmd_qs6612_startup,
1018 .ack_int = phy_cmd_qs6612_ack_int,
1019 .shutdown = phy_cmd_qs6612_shutdown
1022 /* ------------------------------------------------------------------------- */
1023 /* AMD AM79C874 phy */
1025 /* register definitions for the 874 */
1027 #define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */
1028 #define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */
1029 #define MII_AM79C874_DR 18 /* Diagnostic Register */
1030 #define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
1031 #define MII_AM79C874_MCR 21 /* ModeControl Register */
1032 #define MII_AM79C874_DC 23 /* Disconnect Counter */
1033 #define MII_AM79C874_REC 24 /* Recieve Error Counter */
1035 static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev)
1037 struct fec_enet_private *fep = netdev_priv(dev);
1038 volatile uint *s = &(fep->phy_status);
1041 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
1043 if (mii_reg & 0x0080)
1044 status |= PHY_STAT_ANC;
1045 if (mii_reg & 0x0400)
1046 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
1048 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
1053 static phy_cmd_t const phy_cmd_am79c874_config[] = {
1054 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1055 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1056 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
1059 static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
1060 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
1061 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1062 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1065 static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
1066 /* find out the current status */
1067 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1068 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
1069 /* we only need to read ISR to acknowledge */
1070 { mk_mii_read(MII_AM79C874_ICSR), NULL },
1073 static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
1074 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
1077 static phy_info_t const phy_info_am79c874 = {
1080 .config = phy_cmd_am79c874_config,
1081 .startup = phy_cmd_am79c874_startup,
1082 .ack_int = phy_cmd_am79c874_ack_int,
1083 .shutdown = phy_cmd_am79c874_shutdown
1087 /* ------------------------------------------------------------------------- */
1088 /* Kendin KS8721BL phy */
1090 /* register definitions for the 8721 */
1092 #define MII_KS8721BL_RXERCR 21
1093 #define MII_KS8721BL_ICSR 27
1094 #define MII_KS8721BL_PHYCR 31
1096 static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
1097 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1098 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1101 static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
1102 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
1103 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1104 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1107 static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
1108 /* find out the current status */
1109 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1110 /* we only need to read ISR to acknowledge */
1111 { mk_mii_read(MII_KS8721BL_ICSR), NULL },
1114 static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
1115 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
1118 static phy_info_t const phy_info_ks8721bl = {
1121 .config = phy_cmd_ks8721bl_config,
1122 .startup = phy_cmd_ks8721bl_startup,
1123 .ack_int = phy_cmd_ks8721bl_ack_int,
1124 .shutdown = phy_cmd_ks8721bl_shutdown
1127 /* ------------------------------------------------------------------------- */
1128 /* register definitions for the DP83848 */
1130 #define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
1132 static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
1134 struct fec_enet_private *fep = netdev_priv(dev);
1135 volatile uint *s = &(fep->phy_status);
1137 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
1140 if (mii_reg & 0x0001) {
1142 *s |= PHY_STAT_LINK;
1145 /* Status of link */
1146 if (mii_reg & 0x0010) /* Autonegotioation complete */
1148 if (mii_reg & 0x0002) { /* 10MBps? */
1149 if (mii_reg & 0x0004) /* Full Duplex? */
1150 *s |= PHY_STAT_10FDX;
1152 *s |= PHY_STAT_10HDX;
1153 } else { /* 100 Mbps? */
1154 if (mii_reg & 0x0004) /* Full Duplex? */
1155 *s |= PHY_STAT_100FDX;
1157 *s |= PHY_STAT_100HDX;
1159 if (mii_reg & 0x0008)
1160 *s |= PHY_STAT_FAULT;
1163 static phy_info_t phy_info_dp83848= {
1167 (const phy_cmd_t []) { /* config */
1168 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1169 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1170 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 },
1173 (const phy_cmd_t []) { /* startup - enable interrupts */
1174 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1175 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1178 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
1181 (const phy_cmd_t []) { /* shutdown */
1186 /* ------------------------------------------------------------------------- */
1188 static phy_info_t const * const phy_info[] = {
1198 /* ------------------------------------------------------------------------- */
1199 #ifdef HAVE_mii_link_interrupt
1201 mii_link_interrupt(int irq, void * dev_id);
1204 * This is specific to the MII interrupt setup of the M5272EVB.
1206 static void __inline__ fec_request_mii_intr(struct net_device *dev)
1208 if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
1209 printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
1212 static void __inline__ fec_disable_phy_intr(void)
1214 volatile unsigned long *icrp;
1215 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
1219 static void __inline__ fec_phy_ack_intr(void)
1221 volatile unsigned long *icrp;
1222 /* Acknowledge the interrupt */
1223 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
1228 static void __inline__ fec_get_mac(struct net_device *dev)
1230 struct fec_enet_private *fep = netdev_priv(dev);
1231 unsigned char *iap, tmpaddr[ETH_ALEN];
1235 * Get MAC address from FLASH.
1236 * If it is all 1's or 0's, use the default.
1238 iap = (unsigned char *)FEC_FLASHMAC;
1239 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
1240 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
1241 iap = fec_mac_default;
1242 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
1243 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
1244 iap = fec_mac_default;
1246 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
1247 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1251 memcpy(dev->dev_addr, iap, ETH_ALEN);
1253 /* Adjust MAC if using default MAC address */
1254 if (iap == fec_mac_default)
1255 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
1259 /* ------------------------------------------------------------------------- */
1261 static void mii_display_status(struct net_device *dev)
1263 struct fec_enet_private *fep = netdev_priv(dev);
1264 volatile uint *s = &(fep->phy_status);
1266 if (!fep->link && !fep->old_link) {
1267 /* Link is still down - don't print anything */
1271 printk("%s: status: ", dev->name);
1274 printk("link down");
1278 switch(*s & PHY_STAT_SPMASK) {
1279 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break;
1280 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break;
1281 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break;
1282 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break;
1284 printk(", Unknown speed/duplex");
1287 if (*s & PHY_STAT_ANC)
1288 printk(", auto-negotiation complete");
1291 if (*s & PHY_STAT_FAULT)
1292 printk(", remote fault");
1297 static void mii_display_config(struct work_struct *work)
1299 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
1300 struct net_device *dev = fep->netdev;
1301 uint status = fep->phy_status;
1304 ** When we get here, phy_task is already removed from
1305 ** the workqueue. It is thus safe to allow to reuse it.
1307 fep->mii_phy_task_queued = 0;
1308 printk("%s: config: auto-negotiation ", dev->name);
1310 if (status & PHY_CONF_ANE)
1315 if (status & PHY_CONF_100FDX)
1317 if (status & PHY_CONF_100HDX)
1319 if (status & PHY_CONF_10FDX)
1321 if (status & PHY_CONF_10HDX)
1323 if (!(status & PHY_CONF_SPMASK))
1324 printk(", No speed/duplex selected?");
1326 if (status & PHY_CONF_LOOP)
1327 printk(", loopback enabled");
1331 fep->sequence_done = 1;
1334 static void mii_relink(struct work_struct *work)
1336 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
1337 struct net_device *dev = fep->netdev;
1341 ** When we get here, phy_task is already removed from
1342 ** the workqueue. It is thus safe to allow to reuse it.
1344 fep->mii_phy_task_queued = 0;
1345 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
1346 mii_display_status(dev);
1347 fep->old_link = fep->link;
1352 & (PHY_STAT_100FDX | PHY_STAT_10FDX))
1354 fec_restart(dev, duplex);
1359 enable_irq(fep->mii_irq);
1364 /* mii_queue_relink is called in interrupt context from mii_link_interrupt */
1365 static void mii_queue_relink(uint mii_reg, struct net_device *dev)
1367 struct fec_enet_private *fep = netdev_priv(dev);
1370 ** We cannot queue phy_task twice in the workqueue. It
1371 ** would cause an endless loop in the workqueue.
1372 ** Fortunately, if the last mii_relink entry has not yet been
1373 ** executed now, it will do the job for the current interrupt,
1374 ** which is just what we want.
1376 if (fep->mii_phy_task_queued)
1379 fep->mii_phy_task_queued = 1;
1380 INIT_WORK(&fep->phy_task, mii_relink);
1381 schedule_work(&fep->phy_task);
1384 /* mii_queue_config is called in interrupt context from fec_enet_mii */
1385 static void mii_queue_config(uint mii_reg, struct net_device *dev)
1387 struct fec_enet_private *fep = netdev_priv(dev);
1389 if (fep->mii_phy_task_queued)
1392 fep->mii_phy_task_queued = 1;
1393 INIT_WORK(&fep->phy_task, mii_display_config);
1394 schedule_work(&fep->phy_task);
1397 phy_cmd_t const phy_cmd_relink[] = {
1398 { mk_mii_read(MII_REG_CR), mii_queue_relink },
1401 phy_cmd_t const phy_cmd_config[] = {
1402 { mk_mii_read(MII_REG_CR), mii_queue_config },
1406 /* Read remainder of PHY ID.
1409 mii_discover_phy3(uint mii_reg, struct net_device *dev)
1411 struct fec_enet_private *fep;
1414 fep = netdev_priv(dev);
1415 fep->phy_id |= (mii_reg & 0xffff);
1416 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
1418 for(i = 0; phy_info[i]; i++) {
1419 if(phy_info[i]->id == (fep->phy_id >> 4))
1424 printk(" -- %s\n", phy_info[i]->name);
1426 printk(" -- unknown PHY!\n");
1428 fep->phy = phy_info[i];
1429 fep->phy_id_done = 1;
1432 /* Scan all of the MII PHY addresses looking for someone to respond
1433 * with a valid ID. This usually happens quickly.
1436 mii_discover_phy(uint mii_reg, struct net_device *dev)
1438 struct fec_enet_private *fep;
1441 fep = netdev_priv(dev);
1443 if (fep->phy_addr < 32) {
1444 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
1446 /* Got first part of ID, now get remainder.
1448 fep->phy_id = phytype << 16;
1449 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2),
1453 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
1457 printk("FEC: No PHY device found.\n");
1458 /* Disable external MII interface */
1459 writel(0, fep->hwp + FEC_MII_SPEED);
1461 #ifdef HAVE_mii_link_interrupt
1462 fec_disable_phy_intr();
1467 /* This interrupt occurs when the PHY detects a link change.
1469 #ifdef HAVE_mii_link_interrupt
1471 mii_link_interrupt(int irq, void * dev_id)
1473 struct net_device *dev = dev_id;
1474 struct fec_enet_private *fep = netdev_priv(dev);
1479 disable_irq(fep->mii_irq); /* disable now, enable later */
1482 mii_do_cmd(dev, fep->phy->ack_int);
1483 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
1490 fec_enet_open(struct net_device *dev)
1492 struct fec_enet_private *fep = netdev_priv(dev);
1494 /* I should reset the ring buffers here, but I don't yet know
1495 * a simple way to do that.
1497 fec_set_mac_address(dev);
1499 fep->sequence_done = 0;
1503 mii_do_cmd(dev, fep->phy->ack_int);
1504 mii_do_cmd(dev, fep->phy->config);
1505 mii_do_cmd(dev, phy_cmd_config); /* display configuration */
1507 /* Poll until the PHY tells us its configuration
1509 * Request is initiated by mii_do_cmd above, but answer
1510 * comes by interrupt.
1511 * This should take about 25 usec per register at 2.5 MHz,
1512 * and we read approximately 5 registers.
1514 while(!fep->sequence_done)
1517 mii_do_cmd(dev, fep->phy->startup);
1519 /* Set the initial link state to true. A lot of hardware
1520 * based on this device does not implement a PHY interrupt,
1521 * so we are never notified of link change.
1525 fep->link = 1; /* lets just try it and see */
1526 /* no phy, go full duplex, it's most likely a hub chip */
1527 fec_restart(dev, 1);
1530 netif_start_queue(dev);
1532 return 0; /* Success */
1536 fec_enet_close(struct net_device *dev)
1538 struct fec_enet_private *fep = netdev_priv(dev);
1540 /* Don't know what to do yet.
1543 netif_stop_queue(dev);
1549 /* Set or clear the multicast filter for this adaptor.
1550 * Skeleton taken from sunlance driver.
1551 * The CPM Ethernet implementation allows Multicast as well as individual
1552 * MAC address filtering. Some of the drivers check to make sure it is
1553 * a group multicast address, and discard those that are not. I guess I
1554 * will do the same for now, but just remove the test if you want
1555 * individual filtering as well (do the upper net layers want or support
1556 * this kind of feature?).
1559 #define HASH_BITS 6 /* #bits in hash */
1560 #define CRC32_POLY 0xEDB88320
1562 static void set_multicast_list(struct net_device *dev)
1564 struct fec_enet_private *fep = netdev_priv(dev);
1565 struct dev_mc_list *dmi;
1566 unsigned int i, j, bit, data, crc, tmp;
1569 if (dev->flags&IFF_PROMISC) {
1570 tmp = readl(fep->hwp + FEC_R_CNTRL);
1572 writel(tmp, fep->hwp + FEC_R_CNTRL);
1574 tmp = readl(fep->hwp + FEC_R_CNTRL);
1576 writel(tmp, fep->hwp + FEC_R_CNTRL);
1578 if (dev->flags & IFF_ALLMULTI) {
1579 /* Catch all multicast addresses, so set the
1580 * filter to all 1's.
1582 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1583 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1585 /* Clear filter and add the addresses in hash register.
1587 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1588 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1592 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next)
1594 /* Only support group multicast for now.
1596 if (!(dmi->dmi_addr[0] & 1))
1599 /* calculate crc32 value of mac address
1603 for (i = 0; i < dmi->dmi_addrlen; i++)
1605 data = dmi->dmi_addr[i];
1606 for (bit = 0; bit < 8; bit++, data >>= 1)
1609 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1613 /* only upper 6 bits (HASH_BITS) are used
1614 which point to specific bit in he hash registers
1616 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1619 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1620 tmp |= 1 << (hash - 32);
1621 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1623 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1625 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1632 /* Set a MAC change in hardware.
1635 fec_set_mac_address(struct net_device *dev)
1637 struct fec_enet_private *fep = netdev_priv(dev);
1639 /* Set station address. */
1640 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
1641 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
1642 fep->hwp + FEC_ADDR_LOW);
1643 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
1644 fep + FEC_ADDR_HIGH);
1648 * XXX: We need to clean up on failure exits here.
1650 * index is only used in legacy code
1652 int __init fec_enet_init(struct net_device *dev, int index)
1654 struct fec_enet_private *fep = netdev_priv(dev);
1655 unsigned long mem_addr;
1656 volatile cbd_t *bdp;
1660 /* Allocate memory for buffer descriptors.
1662 mem_addr = (unsigned long)dma_alloc_coherent(NULL, PAGE_SIZE,
1663 &fep->bd_dma, GFP_KERNEL);
1664 if (mem_addr == 0) {
1665 printk("FEC: allocate descriptor memory failed?\n");
1669 spin_lock_init(&fep->hw_lock);
1670 spin_lock_init(&fep->mii_lock);
1673 fep->hwp = (void __iomem *)dev->base_addr;
1676 /* Whack a reset. We should wait for this.
1678 writel(1, fep->hwp + FEC_ECNTRL);
1681 /* Set the Ethernet address */
1687 l = readl(fep->hwp + FEC_ADDR_LOW);
1688 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
1689 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
1690 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
1691 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
1692 l = readl(fep->hwp + FEC_ADDR_HIGH);
1693 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
1694 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
1698 cbd_base = (cbd_t *)mem_addr;
1700 /* Set receive and transmit descriptor base.
1702 fep->rx_bd_base = cbd_base;
1703 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1705 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1706 fep->cur_rx = fep->rx_bd_base;
1708 fep->skb_cur = fep->skb_dirty = 0;
1710 /* Initialize the receive buffer descriptors.
1712 bdp = fep->rx_bd_base;
1713 for (i=0; i<FEC_ENET_RX_PAGES; i++) {
1717 mem_addr = __get_free_page(GFP_KERNEL);
1718 /* XXX: missing check for allocation failure */
1720 /* Initialize the BD for every fragment in the page.
1722 for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
1723 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1724 bdp->cbd_bufaddr = __pa(mem_addr);
1725 mem_addr += FEC_ENET_RX_FRSIZE;
1730 /* Set the last buffer to wrap.
1733 bdp->cbd_sc |= BD_SC_WRAP;
1735 /* ...and the same for transmmit.
1737 bdp = fep->tx_bd_base;
1738 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) {
1739 if (j >= FEC_ENET_TX_FRPPG) {
1740 mem_addr = __get_free_page(GFP_KERNEL);
1743 mem_addr += FEC_ENET_TX_FRSIZE;
1746 fep->tx_bounce[i] = (unsigned char *) mem_addr;
1748 /* Initialize the BD for every fragment in the page.
1751 bdp->cbd_bufaddr = 0;
1755 /* Set the last buffer to wrap.
1758 bdp->cbd_sc |= BD_SC_WRAP;
1760 /* Set receive and transmit descriptor base.
1762 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1763 writel((unsigned long)fep->bd_dma + sizeof(cbd_t) * RX_RING_SIZE,
1764 fep->hwp + FEC_X_DES_START);
1766 #ifdef HAVE_mii_link_interrupt
1767 fec_request_mii_intr(dev);
1770 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1771 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1772 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1773 writel(2, fep->hwp + FEC_ECNTRL);
1774 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1775 #ifndef CONFIG_M5272
1776 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1777 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1780 /* The FEC Ethernet specific entries in the device structure. */
1781 dev->open = fec_enet_open;
1782 dev->hard_start_xmit = fec_enet_start_xmit;
1783 dev->tx_timeout = fec_timeout;
1784 dev->watchdog_timeo = TX_TIMEOUT;
1785 dev->stop = fec_enet_close;
1786 dev->set_multicast_list = set_multicast_list;
1788 for (i=0; i<NMII-1; i++)
1789 mii_cmds[i].mii_next = &mii_cmds[i+1];
1790 mii_free = mii_cmds;
1792 /* setup MII interface */
1793 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1794 writel(0, fep->hwp + FEC_X_CNTRL);
1797 * Set MII speed to 2.5 MHz
1799 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
1800 / 2500000) / 2) & 0x3F) << 1;
1801 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1802 fec_restart(dev, 0);
1804 /* Clear and enable interrupts */
1805 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1806 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
1807 fep->hwp + FEC_IMASK);
1809 /* Queue up command to detect the PHY and initialize the
1810 * remainder of the interface.
1812 fep->phy_id_done = 0;
1814 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
1819 /* This function is called to start or restart the FEC during a link
1820 * change. This only happens when switching between half and full
1824 fec_restart(struct net_device *dev, int duplex)
1826 struct fec_enet_private *fep = netdev_priv(dev);
1827 volatile cbd_t *bdp;
1830 /* Whack a reset. We should wait for this. */
1831 writel(1, fep->hwp + FEC_ECNTRL);
1834 /* Clear any outstanding interrupt. */
1835 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1837 /* Set station address. */
1838 fec_set_mac_address(dev);
1840 /* Reset all multicast. */
1841 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1842 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1844 /* Set maximum receive buffer size. */
1845 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1847 /* Set receive and transmit descriptor base. */
1848 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1849 writel((unsigned long)fep->bd_dma + sizeof(cbd_t) * RX_RING_SIZE,
1850 fep->hwp + FEC_X_DES_START);
1852 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1853 fep->cur_rx = fep->rx_bd_base;
1855 /* Reset SKB transmit buffers. */
1856 fep->skb_cur = fep->skb_dirty = 0;
1857 for (i=0; i<=TX_RING_MOD_MASK; i++) {
1858 if (fep->tx_skbuff[i] != NULL) {
1859 dev_kfree_skb_any(fep->tx_skbuff[i]);
1860 fep->tx_skbuff[i] = NULL;
1864 /* Initialize the receive buffer descriptors. */
1865 bdp = fep->rx_bd_base;
1866 for (i=0; i<RX_RING_SIZE; i++) {
1868 /* Initialize the BD for every fragment in the page. */
1869 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1873 /* Set the last buffer to wrap. */
1875 bdp->cbd_sc |= BD_SC_WRAP;
1877 /* ...and the same for transmmit. */
1878 bdp = fep->tx_bd_base;
1879 for (i=0; i<TX_RING_SIZE; i++) {
1881 /* Initialize the BD for every fragment in the page. */
1883 bdp->cbd_bufaddr = 0;
1887 /* Set the last buffer to wrap. */
1889 bdp->cbd_sc |= BD_SC_WRAP;
1891 /* Enable MII mode. */
1893 /* MII enable / FD enable */
1894 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1895 writel(0x04, fep->hwp + FEC_X_CNTRL);
1897 /* MII enable / No Rcv on Xmit */
1898 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1899 writel(0x0, fep->hwp + FEC_X_CNTRL);
1901 fep->full_duplex = duplex;
1903 /* Set MII speed. */
1904 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1906 /* And last, enable the transmit and receive processing. */
1907 writel(2, fep->hwp + FEC_ECNTRL);
1908 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1910 /* Enable interrupts we wish to service. */
1911 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
1912 fep->hwp + FEC_IMASK);
1916 fec_stop(struct net_device *dev)
1918 struct fec_enet_private *fep = netdev_priv(dev);
1921 ** We cannot expect a graceful transmit stop without link !!!
1924 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1926 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1927 printk("fec_stop : Graceful transmit stop did not complete !\n");
1930 /* Whack a reset. We should wait for this. */
1931 writel(1, fep->hwp + FEC_ECNTRL);
1934 /* Clear outstanding MII command interrupts. */
1935 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
1937 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1938 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1941 static int __devinit
1942 fec_probe(struct platform_device *pdev)
1944 struct fec_enet_private *fep;
1945 struct net_device *ndev;
1946 int i, irq, ret = 0;
1949 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1953 r = request_mem_region(r->start, resource_size(r), pdev->name);
1957 /* Init network device */
1958 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1962 SET_NETDEV_DEV(ndev, &pdev->dev);
1964 /* setup board info structure */
1965 fep = netdev_priv(ndev);
1966 memset(fep, 0, sizeof(*fep));
1968 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
1970 if (!ndev->base_addr) {
1972 goto failed_ioremap;
1975 platform_set_drvdata(pdev, ndev);
1977 /* This device has up to three irqs on some platforms */
1978 for (i = 0; i < 3; i++) {
1979 irq = platform_get_irq(pdev, i);
1982 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1985 irq = platform_get_irq(pdev, i);
1986 free_irq(irq, ndev);
1993 fep->clk = clk_get(&pdev->dev, "fec_clk");
1994 if (IS_ERR(fep->clk)) {
1995 ret = PTR_ERR(fep->clk);
1998 clk_enable(fep->clk);
2000 ret = fec_enet_init(ndev, 0);
2004 ret = register_netdev(ndev);
2006 goto failed_register;
2012 clk_disable(fep->clk);
2015 for (i = 0; i < 3; i++) {
2016 irq = platform_get_irq(pdev, i);
2018 free_irq(irq, ndev);
2021 iounmap((void __iomem *)ndev->base_addr);
2028 static int __devexit
2029 fec_drv_remove(struct platform_device *pdev)
2031 struct net_device *ndev = platform_get_drvdata(pdev);
2032 struct fec_enet_private *fep = netdev_priv(ndev);
2034 platform_set_drvdata(pdev, NULL);
2037 clk_disable(fep->clk);
2039 iounmap((void __iomem *)ndev->base_addr);
2040 unregister_netdev(ndev);
2046 fec_suspend(struct platform_device *dev, pm_message_t state)
2048 struct net_device *ndev = platform_get_drvdata(dev);
2049 struct fec_enet_private *fep;
2052 fep = netdev_priv(ndev);
2053 if (netif_running(ndev)) {
2054 netif_device_detach(ndev);
2062 fec_resume(struct platform_device *dev)
2064 struct net_device *ndev = platform_get_drvdata(dev);
2067 if (netif_running(ndev)) {
2068 fec_enet_init(ndev, 0);
2069 netif_device_attach(ndev);
2075 static struct platform_driver fec_driver = {
2078 .owner = THIS_MODULE,
2081 .remove = __devexit_p(fec_drv_remove),
2082 .suspend = fec_suspend,
2083 .resume = fec_resume,
2087 fec_enet_module_init(void)
2089 printk(KERN_INFO "FEC Ethernet Driver\n");
2091 return platform_driver_register(&fec_driver);
2095 fec_enet_cleanup(void)
2097 platform_driver_unregister(&fec_driver);
2100 module_exit(fec_enet_cleanup);
2101 module_init(fec_enet_module_init);
2103 MODULE_LICENSE("GPL");