1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
32 #define DRV_NAME "via-rhine"
33 #define DRV_VERSION "1.4.3"
34 #define DRV_RELDATE "2007-03-06"
37 /* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
40 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41 static int max_interrupt_work = 20;
43 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
45 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
46 || defined(CONFIG_SPARC) || defined(__ia64__) \
47 || defined(__sh__) || defined(__mips__)
48 static int rx_copybreak = 1518;
50 static int rx_copybreak;
53 /* Work-around for broken BIOSes: they are unable to get the chip back out of
54 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 * In case you are looking for 'options[]' or 'full_duplex[]', they
59 * are gone. Use ethtool(8) instead.
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 The Rhine has a 64 element 8390-like hash table. */
64 static const int multicast_filter_limit = 32;
67 /* Operational parameters that are set at compile time. */
69 /* Keep the ring sizes a power of two for compile efficiency.
70 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
71 Making the Tx ring too large decreases the effectiveness of channel
72 bonding and packet priority.
73 There are no ill effects from too-large receive rings. */
74 #define TX_RING_SIZE 16
75 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
76 #define RX_RING_SIZE 64
78 /* Operational parameters that usually are not changed. */
80 /* Time in jiffies before concluding the transmitter is hung. */
81 #define TX_TIMEOUT (2*HZ)
83 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
85 #include <linux/module.h>
86 #include <linux/moduleparam.h>
87 #include <linux/kernel.h>
88 #include <linux/string.h>
89 #include <linux/timer.h>
90 #include <linux/errno.h>
91 #include <linux/ioport.h>
92 #include <linux/slab.h>
93 #include <linux/interrupt.h>
94 #include <linux/pci.h>
95 #include <linux/dma-mapping.h>
96 #include <linux/netdevice.h>
97 #include <linux/etherdevice.h>
98 #include <linux/skbuff.h>
99 #include <linux/init.h>
100 #include <linux/delay.h>
101 #include <linux/mii.h>
102 #include <linux/ethtool.h>
103 #include <linux/crc32.h>
104 #include <linux/bitops.h>
105 #include <asm/processor.h> /* Processor type for cache alignment. */
108 #include <asm/uaccess.h>
109 #include <linux/dmi.h>
111 /* These identify the driver base version and may not be removed. */
112 static const char version[] __devinitconst =
113 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE
114 " Written by Donald Becker\n";
116 /* This driver was written to use PCI memory space. Some early versions
117 of the Rhine may only work correctly with I/O space accesses. */
118 #ifdef CONFIG_VIA_RHINE_MMIO
123 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
124 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
125 MODULE_LICENSE("GPL");
127 module_param(max_interrupt_work, int, 0);
128 module_param(debug, int, 0);
129 module_param(rx_copybreak, int, 0);
130 module_param(avoid_D3, bool, 0);
131 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
132 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
133 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
134 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
139 I. Board Compatibility
141 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
144 II. Board-specific settings
146 Boards with this chip are functional only in a bus-master PCI slot.
148 Many operational settings are loaded from the EEPROM to the Config word at
149 offset 0x78. For most of these settings, this driver assumes that they are
151 If this driver is compiled to use PCI memory space operations the EEPROM
152 must be configured to enable memory ops.
154 III. Driver operation
158 This driver uses two statically allocated fixed-size descriptor lists
159 formed into rings by a branch from the final descriptor to the beginning of
160 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
162 IIIb/c. Transmit/Receive Structure
164 This driver attempts to use a zero-copy receive and transmit scheme.
166 Alas, all data buffers are required to start on a 32 bit boundary, so
167 the driver must often copy transmit packets into bounce buffers.
169 The driver allocates full frame size skbuffs for the Rx ring buffers at
170 open() time and passes the skb->data field to the chip as receive data
171 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
172 a fresh skbuff is allocated and the frame is copied to the new skbuff.
173 When the incoming frame is larger, the skbuff is passed directly up the
174 protocol stack. Buffers consumed this way are replaced by newly allocated
175 skbuffs in the last phase of rhine_rx().
177 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
178 using a full-sized skbuff for small frames vs. the copying costs of larger
179 frames. New boards are typically used in generously configured machines
180 and the underfilled buffers have negligible impact compared to the benefit of
181 a single allocation size, so the default value of zero results in never
182 copying packets. When copying is done, the cost is usually mitigated by using
183 a combined copy/checksum routine. Copying also preloads the cache, which is
184 most useful with small frames.
186 Since the VIA chips are only able to transfer data to buffers on 32 bit
187 boundaries, the IP header at offset 14 in an ethernet frame isn't
188 longword aligned for further processing. Copying these unaligned buffers
189 has the beneficial effect of 16-byte aligning the IP header.
191 IIId. Synchronization
193 The driver runs as two independent, single-threaded flows of control. One
194 is the send-packet routine, which enforces single-threaded use by the
195 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
196 which is single threaded by the hardware and interrupt handling software.
198 The send packet thread has partial control over the Tx ring. It locks the
199 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
200 the ring is not available it stops the transmit queue by
201 calling netif_stop_queue.
203 The interrupt handler has exclusive control over the Rx ring and records stats
204 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
205 empty by incrementing the dirty_tx mark. If at least half of the entries in
206 the Rx ring are available the transmit queue is woken up if it was stopped.
212 Preliminary VT86C100A manual from http://www.via.com.tw/
213 http://www.scyld.com/expert/100mbps.html
214 http://www.scyld.com/expert/NWay.html
215 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
216 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
221 The VT86C100A manual is not reliable information.
222 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
223 in significant performance degradation for bounce buffer copies on transmit
224 and unaligned IP headers on receive.
225 The chip does not pad to minimum transmit length.
230 /* This table drives the PCI probe routines. It's mostly boilerplate in all
231 of the drivers, and will likely be provided by some future kernel.
232 Note the matching code -- the first table entry matchs all 56** cards but
233 second only the 1234 card.
240 VT8231 = 0x50, /* Integrated MAC */
241 VT8233 = 0x60, /* Integrated MAC */
242 VT8235 = 0x74, /* Integrated MAC */
243 VT8237 = 0x78, /* Integrated MAC */
250 VT6105M = 0x90, /* Management adapter */
254 rqWOL = 0x0001, /* Wake-On-LAN support */
255 rqForceReset = 0x0002,
256 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
257 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
258 rqRhineI = 0x0100, /* See comment below */
261 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
262 * MMIO as well as for the collision counter and the Tx FIFO underflow
263 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
266 /* Beware of PCI posted writes */
267 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
269 static const struct pci_device_id rhine_pci_tbl[] = {
270 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
271 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
272 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
273 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
274 { } /* terminate list */
276 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
279 /* Offsets to the device registers. */
280 enum register_offsets {
281 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
283 IntrStatus=0x0C, IntrEnable=0x0E,
284 MulticastFilter0=0x10, MulticastFilter1=0x14,
285 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
286 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
287 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
288 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
289 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
290 StickyHW=0x83, IntrStatus2=0x84,
291 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
292 WOLcrClr1=0xA6, WOLcgClr=0xA7,
293 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
296 /* Bits in ConfigD */
298 BackOptional=0x01, BackModify=0x02,
299 BackCaptureEffect=0x04, BackRandom=0x08
303 /* Registers we check that mmio and reg are the same. */
304 static const int mmio_verify_registers[] = {
305 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
310 /* Bits in the interrupt status/mask registers. */
311 enum intr_status_bits {
312 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
313 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
315 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
316 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
317 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
319 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
320 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
321 IntrTxErrSummary=0x082218,
324 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
333 /* The Rx and Tx buffer descriptors. */
336 __le32 desc_length; /* Chain flag, Buffer/frame length */
342 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
347 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
348 #define TXDESC 0x00e08000
350 enum rx_status_bits {
351 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
354 /* Bits in *_desc.*_status */
355 enum desc_status_bits {
359 /* Bits in ChipCmd. */
361 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
362 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
363 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
364 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
367 struct rhine_private {
368 /* Descriptor rings */
369 struct rx_desc *rx_ring;
370 struct tx_desc *tx_ring;
371 dma_addr_t rx_ring_dma;
372 dma_addr_t tx_ring_dma;
374 /* The addresses of receive-in-place skbuffs. */
375 struct sk_buff *rx_skbuff[RX_RING_SIZE];
376 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
378 /* The saved address of a sent-in-place packet/buffer, for later free(). */
379 struct sk_buff *tx_skbuff[TX_RING_SIZE];
380 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
382 /* Tx bounce buffers (Rhine-I only) */
383 unsigned char *tx_buf[TX_RING_SIZE];
384 unsigned char *tx_bufs;
385 dma_addr_t tx_bufs_dma;
387 struct pci_dev *pdev;
389 struct net_device *dev;
390 struct napi_struct napi;
393 /* Frequently used values: keep some adjacent for cache effect. */
395 struct rx_desc *rx_head_desc;
396 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
397 unsigned int cur_tx, dirty_tx;
398 unsigned int rx_buf_sz; /* Based on MTU+slack. */
401 u8 tx_thresh, rx_thresh;
403 struct mii_if_info mii_if;
407 static int mdio_read(struct net_device *dev, int phy_id, int location);
408 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
409 static int rhine_open(struct net_device *dev);
410 static void rhine_tx_timeout(struct net_device *dev);
411 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
412 struct net_device *dev);
413 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
414 static void rhine_tx(struct net_device *dev);
415 static int rhine_rx(struct net_device *dev, int limit);
416 static void rhine_error(struct net_device *dev, int intr_status);
417 static void rhine_set_rx_mode(struct net_device *dev);
418 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
419 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
420 static const struct ethtool_ops netdev_ethtool_ops;
421 static int rhine_close(struct net_device *dev);
422 static void rhine_shutdown (struct pci_dev *pdev);
424 #define RHINE_WAIT_FOR(condition) do { \
426 while (!(condition) && --i) \
428 if (debug > 1 && i < 512) \
429 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
430 DRV_NAME, 1024-i, __func__, __LINE__); \
433 static inline u32 get_intr_status(struct net_device *dev)
435 struct rhine_private *rp = netdev_priv(dev);
436 void __iomem *ioaddr = rp->base;
439 intr_status = ioread16(ioaddr + IntrStatus);
440 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
441 if (rp->quirks & rqStatusWBRace)
442 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
447 * Get power related registers into sane state.
448 * Notify user about past WOL event.
450 static void rhine_power_init(struct net_device *dev)
452 struct rhine_private *rp = netdev_priv(dev);
453 void __iomem *ioaddr = rp->base;
456 if (rp->quirks & rqWOL) {
457 /* Make sure chip is in power state D0 */
458 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
460 /* Disable "force PME-enable" */
461 iowrite8(0x80, ioaddr + WOLcgClr);
463 /* Clear power-event config bits (WOL) */
464 iowrite8(0xFF, ioaddr + WOLcrClr);
465 /* More recent cards can manage two additional patterns */
466 if (rp->quirks & rq6patterns)
467 iowrite8(0x03, ioaddr + WOLcrClr1);
469 /* Save power-event status bits */
470 wolstat = ioread8(ioaddr + PwrcsrSet);
471 if (rp->quirks & rq6patterns)
472 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
474 /* Clear power-event status bits */
475 iowrite8(0xFF, ioaddr + PwrcsrClr);
476 if (rp->quirks & rq6patterns)
477 iowrite8(0x03, ioaddr + PwrcsrClr1);
483 reason = "Magic packet";
486 reason = "Link went up";
489 reason = "Link went down";
492 reason = "Unicast packet";
495 reason = "Multicast/broadcast packet";
500 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
506 static void rhine_chip_reset(struct net_device *dev)
508 struct rhine_private *rp = netdev_priv(dev);
509 void __iomem *ioaddr = rp->base;
511 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
514 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
515 printk(KERN_INFO "%s: Reset not complete yet. "
516 "Trying harder.\n", DRV_NAME);
519 if (rp->quirks & rqForceReset)
520 iowrite8(0x40, ioaddr + MiscCmd);
522 /* Reset can take somewhat longer (rare) */
523 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
527 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
528 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
529 "failed" : "succeeded");
533 static void enable_mmio(long pioaddr, u32 quirks)
536 if (quirks & rqRhineI) {
537 /* More recent docs say that this bit is reserved ... */
538 n = inb(pioaddr + ConfigA) | 0x20;
539 outb(n, pioaddr + ConfigA);
541 n = inb(pioaddr + ConfigD) | 0x80;
542 outb(n, pioaddr + ConfigD);
548 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
549 * (plus 0x6C for Rhine-I/II)
551 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
553 struct rhine_private *rp = netdev_priv(dev);
554 void __iomem *ioaddr = rp->base;
556 outb(0x20, pioaddr + MACRegEEcsr);
557 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
561 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
562 * MMIO. If reloading EEPROM was done first this could be avoided, but
563 * it is not known if that still works with the "win98-reboot" problem.
565 enable_mmio(pioaddr, rp->quirks);
568 /* Turn off EEPROM-controlled wake-up (magic packet) */
569 if (rp->quirks & rqWOL)
570 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
574 #ifdef CONFIG_NET_POLL_CONTROLLER
575 static void rhine_poll(struct net_device *dev)
577 disable_irq(dev->irq);
578 rhine_interrupt(dev->irq, (void *)dev);
579 enable_irq(dev->irq);
583 static int rhine_napipoll(struct napi_struct *napi, int budget)
585 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
586 struct net_device *dev = rp->dev;
587 void __iomem *ioaddr = rp->base;
590 work_done = rhine_rx(dev, budget);
592 if (work_done < budget) {
595 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
596 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
597 IntrTxDone | IntrTxError | IntrTxUnderrun |
598 IntrPCIErr | IntrStatsMax | IntrLinkChange,
599 ioaddr + IntrEnable);
604 static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
606 struct rhine_private *rp = netdev_priv(dev);
608 /* Reset the chip to erase previous misconfiguration. */
609 rhine_chip_reset(dev);
611 /* Rhine-I needs extra time to recuperate before EEPROM reload */
612 if (rp->quirks & rqRhineI)
615 /* Reload EEPROM controlled bytes cleared by soft reset */
616 rhine_reload_eeprom(pioaddr, dev);
619 static const struct net_device_ops rhine_netdev_ops = {
620 .ndo_open = rhine_open,
621 .ndo_stop = rhine_close,
622 .ndo_start_xmit = rhine_start_tx,
623 .ndo_get_stats = rhine_get_stats,
624 .ndo_set_multicast_list = rhine_set_rx_mode,
625 .ndo_change_mtu = eth_change_mtu,
626 .ndo_validate_addr = eth_validate_addr,
627 .ndo_set_mac_address = eth_mac_addr,
628 .ndo_do_ioctl = netdev_ioctl,
629 .ndo_tx_timeout = rhine_tx_timeout,
630 #ifdef CONFIG_NET_POLL_CONTROLLER
631 .ndo_poll_controller = rhine_poll,
635 static int __devinit rhine_init_one(struct pci_dev *pdev,
636 const struct pci_device_id *ent)
638 struct net_device *dev;
639 struct rhine_private *rp;
644 void __iomem *ioaddr;
653 /* when built into the kernel, we only print version if device is found */
655 static int printed_version;
656 if (!printed_version++)
664 if (pdev->revision < VTunknown0) {
668 else if (pdev->revision >= VT6102) {
669 quirks = rqWOL | rqForceReset;
670 if (pdev->revision < VT6105) {
672 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
675 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
676 if (pdev->revision >= VT6105_B0)
677 quirks |= rq6patterns;
678 if (pdev->revision < VT6105M)
681 name = "Rhine III (Management Adapter)";
685 rc = pci_enable_device(pdev);
689 /* this should always be supported */
690 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
692 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
698 if ((pci_resource_len(pdev, 0) < io_size) ||
699 (pci_resource_len(pdev, 1) < io_size)) {
701 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
705 pioaddr = pci_resource_start(pdev, 0);
706 memaddr = pci_resource_start(pdev, 1);
708 pci_set_master(pdev);
710 dev = alloc_etherdev(sizeof(struct rhine_private));
713 printk(KERN_ERR "alloc_etherdev failed\n");
716 SET_NETDEV_DEV(dev, &pdev->dev);
718 rp = netdev_priv(dev);
721 rp->pioaddr = pioaddr;
724 rc = pci_request_regions(pdev, DRV_NAME);
726 goto err_out_free_netdev;
728 ioaddr = pci_iomap(pdev, bar, io_size);
731 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
732 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
733 goto err_out_free_res;
737 enable_mmio(pioaddr, quirks);
739 /* Check that selected MMIO registers match the PIO ones */
741 while (mmio_verify_registers[i]) {
742 int reg = mmio_verify_registers[i++];
743 unsigned char a = inb(pioaddr+reg);
744 unsigned char b = readb(ioaddr+reg);
747 printk(KERN_ERR "MMIO do not match PIO [%02x] "
748 "(%02x != %02x)\n", reg, a, b);
752 #endif /* USE_MMIO */
754 dev->base_addr = (unsigned long)ioaddr;
757 /* Get chip registers into a sane state */
758 rhine_power_init(dev);
759 rhine_hw_init(dev, pioaddr);
761 for (i = 0; i < 6; i++)
762 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
763 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
765 if (!is_valid_ether_addr(dev->perm_addr)) {
767 printk(KERN_ERR "Invalid MAC address\n");
771 /* For Rhine-I/II, phy_id is loaded from EEPROM */
773 phy_id = ioread8(ioaddr + 0x6C);
775 dev->irq = pdev->irq;
777 spin_lock_init(&rp->lock);
778 rp->mii_if.dev = dev;
779 rp->mii_if.mdio_read = mdio_read;
780 rp->mii_if.mdio_write = mdio_write;
781 rp->mii_if.phy_id_mask = 0x1f;
782 rp->mii_if.reg_num_mask = 0x1f;
784 /* The chip-specific entries in the device structure. */
785 dev->netdev_ops = &rhine_netdev_ops;
786 dev->ethtool_ops = &netdev_ethtool_ops,
787 dev->watchdog_timeo = TX_TIMEOUT;
789 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
791 if (rp->quirks & rqRhineI)
792 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
794 /* dev->name not defined before register_netdev()! */
795 rc = register_netdev(dev);
799 printk(KERN_INFO "%s: VIA %s at 0x%lx, %pM, IRQ %d.\n",
806 dev->dev_addr, pdev->irq);
808 pci_set_drvdata(pdev, dev);
812 int mii_status = mdio_read(dev, phy_id, 1);
813 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
814 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
815 if (mii_status != 0xffff && mii_status != 0x0000) {
816 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
817 printk(KERN_INFO "%s: MII PHY found at address "
818 "%d, status 0x%4.4x advertising %4.4x "
819 "Link %4.4x.\n", dev->name, phy_id,
820 mii_status, rp->mii_if.advertising,
821 mdio_read(dev, phy_id, 5));
823 /* set IFF_RUNNING */
824 if (mii_status & BMSR_LSTATUS)
825 netif_carrier_on(dev);
827 netif_carrier_off(dev);
831 rp->mii_if.phy_id = phy_id;
832 if (debug > 1 && avoid_D3)
833 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
839 pci_iounmap(pdev, ioaddr);
841 pci_release_regions(pdev);
848 static int alloc_ring(struct net_device* dev)
850 struct rhine_private *rp = netdev_priv(dev);
854 ring = pci_alloc_consistent(rp->pdev,
855 RX_RING_SIZE * sizeof(struct rx_desc) +
856 TX_RING_SIZE * sizeof(struct tx_desc),
859 printk(KERN_ERR "Could not allocate DMA memory.\n");
862 if (rp->quirks & rqRhineI) {
863 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
864 PKT_BUF_SZ * TX_RING_SIZE,
866 if (rp->tx_bufs == NULL) {
867 pci_free_consistent(rp->pdev,
868 RX_RING_SIZE * sizeof(struct rx_desc) +
869 TX_RING_SIZE * sizeof(struct tx_desc),
876 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
877 rp->rx_ring_dma = ring_dma;
878 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
883 static void free_ring(struct net_device* dev)
885 struct rhine_private *rp = netdev_priv(dev);
887 pci_free_consistent(rp->pdev,
888 RX_RING_SIZE * sizeof(struct rx_desc) +
889 TX_RING_SIZE * sizeof(struct tx_desc),
890 rp->rx_ring, rp->rx_ring_dma);
894 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
895 rp->tx_bufs, rp->tx_bufs_dma);
901 static void alloc_rbufs(struct net_device *dev)
903 struct rhine_private *rp = netdev_priv(dev);
907 rp->dirty_rx = rp->cur_rx = 0;
909 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
910 rp->rx_head_desc = &rp->rx_ring[0];
911 next = rp->rx_ring_dma;
913 /* Init the ring entries */
914 for (i = 0; i < RX_RING_SIZE; i++) {
915 rp->rx_ring[i].rx_status = 0;
916 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
917 next += sizeof(struct rx_desc);
918 rp->rx_ring[i].next_desc = cpu_to_le32(next);
919 rp->rx_skbuff[i] = NULL;
921 /* Mark the last entry as wrapping the ring. */
922 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
924 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
925 for (i = 0; i < RX_RING_SIZE; i++) {
926 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
927 rp->rx_skbuff[i] = skb;
930 skb->dev = dev; /* Mark as being used by this device. */
932 rp->rx_skbuff_dma[i] =
933 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
936 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
937 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
939 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
942 static void free_rbufs(struct net_device* dev)
944 struct rhine_private *rp = netdev_priv(dev);
947 /* Free all the skbuffs in the Rx queue. */
948 for (i = 0; i < RX_RING_SIZE; i++) {
949 rp->rx_ring[i].rx_status = 0;
950 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
951 if (rp->rx_skbuff[i]) {
952 pci_unmap_single(rp->pdev,
953 rp->rx_skbuff_dma[i],
954 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
955 dev_kfree_skb(rp->rx_skbuff[i]);
957 rp->rx_skbuff[i] = NULL;
961 static void alloc_tbufs(struct net_device* dev)
963 struct rhine_private *rp = netdev_priv(dev);
967 rp->dirty_tx = rp->cur_tx = 0;
968 next = rp->tx_ring_dma;
969 for (i = 0; i < TX_RING_SIZE; i++) {
970 rp->tx_skbuff[i] = NULL;
971 rp->tx_ring[i].tx_status = 0;
972 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
973 next += sizeof(struct tx_desc);
974 rp->tx_ring[i].next_desc = cpu_to_le32(next);
975 if (rp->quirks & rqRhineI)
976 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
978 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
982 static void free_tbufs(struct net_device* dev)
984 struct rhine_private *rp = netdev_priv(dev);
987 for (i = 0; i < TX_RING_SIZE; i++) {
988 rp->tx_ring[i].tx_status = 0;
989 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
990 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
991 if (rp->tx_skbuff[i]) {
992 if (rp->tx_skbuff_dma[i]) {
993 pci_unmap_single(rp->pdev,
994 rp->tx_skbuff_dma[i],
995 rp->tx_skbuff[i]->len,
998 dev_kfree_skb(rp->tx_skbuff[i]);
1000 rp->tx_skbuff[i] = NULL;
1001 rp->tx_buf[i] = NULL;
1005 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1007 struct rhine_private *rp = netdev_priv(dev);
1008 void __iomem *ioaddr = rp->base;
1010 mii_check_media(&rp->mii_if, debug, init_media);
1012 if (rp->mii_if.full_duplex)
1013 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1016 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1019 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1020 rp->mii_if.force_media, netif_carrier_ok(dev));
1023 /* Called after status of force_media possibly changed */
1024 static void rhine_set_carrier(struct mii_if_info *mii)
1026 if (mii->force_media) {
1027 /* autoneg is off: Link is always assumed to be up */
1028 if (!netif_carrier_ok(mii->dev))
1029 netif_carrier_on(mii->dev);
1031 else /* Let MMI library update carrier status */
1032 rhine_check_media(mii->dev, 0);
1034 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1035 mii->dev->name, mii->force_media,
1036 netif_carrier_ok(mii->dev));
1039 static void init_registers(struct net_device *dev)
1041 struct rhine_private *rp = netdev_priv(dev);
1042 void __iomem *ioaddr = rp->base;
1045 for (i = 0; i < 6; i++)
1046 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1048 /* Initialize other registers. */
1049 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1050 /* Configure initial FIFO thresholds. */
1051 iowrite8(0x20, ioaddr + TxConfig);
1052 rp->tx_thresh = 0x20;
1053 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1055 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1056 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1058 rhine_set_rx_mode(dev);
1060 napi_enable(&rp->napi);
1062 /* Enable interrupts by setting the interrupt mask. */
1063 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1064 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1065 IntrTxDone | IntrTxError | IntrTxUnderrun |
1066 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1067 ioaddr + IntrEnable);
1069 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1071 rhine_check_media(dev, 1);
1074 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1075 static void rhine_enable_linkmon(void __iomem *ioaddr)
1077 iowrite8(0, ioaddr + MIICmd);
1078 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1079 iowrite8(0x80, ioaddr + MIICmd);
1081 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1083 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1086 /* Disable MII link status auto-polling (required for MDIO access) */
1087 static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1089 iowrite8(0, ioaddr + MIICmd);
1091 if (quirks & rqRhineI) {
1092 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1094 /* Can be called from ISR. Evil. */
1097 /* 0x80 must be set immediately before turning it off */
1098 iowrite8(0x80, ioaddr + MIICmd);
1100 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1102 /* Heh. Now clear 0x80 again. */
1103 iowrite8(0, ioaddr + MIICmd);
1106 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1109 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1111 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1113 struct rhine_private *rp = netdev_priv(dev);
1114 void __iomem *ioaddr = rp->base;
1117 rhine_disable_linkmon(ioaddr, rp->quirks);
1119 /* rhine_disable_linkmon already cleared MIICmd */
1120 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1121 iowrite8(regnum, ioaddr + MIIRegAddr);
1122 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1123 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1124 result = ioread16(ioaddr + MIIData);
1126 rhine_enable_linkmon(ioaddr);
1130 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1132 struct rhine_private *rp = netdev_priv(dev);
1133 void __iomem *ioaddr = rp->base;
1135 rhine_disable_linkmon(ioaddr, rp->quirks);
1137 /* rhine_disable_linkmon already cleared MIICmd */
1138 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1139 iowrite8(regnum, ioaddr + MIIRegAddr);
1140 iowrite16(value, ioaddr + MIIData);
1141 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1142 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1144 rhine_enable_linkmon(ioaddr);
1147 static int rhine_open(struct net_device *dev)
1149 struct rhine_private *rp = netdev_priv(dev);
1150 void __iomem *ioaddr = rp->base;
1153 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1159 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1160 dev->name, rp->pdev->irq);
1162 rc = alloc_ring(dev);
1164 free_irq(rp->pdev->irq, dev);
1169 rhine_chip_reset(dev);
1170 init_registers(dev);
1172 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1173 "MII status: %4.4x.\n",
1174 dev->name, ioread16(ioaddr + ChipCmd),
1175 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1177 netif_start_queue(dev);
1182 static void rhine_tx_timeout(struct net_device *dev)
1184 struct rhine_private *rp = netdev_priv(dev);
1185 void __iomem *ioaddr = rp->base;
1187 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1188 "%4.4x, resetting...\n",
1189 dev->name, ioread16(ioaddr + IntrStatus),
1190 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1192 /* protect against concurrent rx interrupts */
1193 disable_irq(rp->pdev->irq);
1195 napi_disable(&rp->napi);
1197 spin_lock(&rp->lock);
1199 /* clear all descriptors */
1205 /* Reinitialize the hardware. */
1206 rhine_chip_reset(dev);
1207 init_registers(dev);
1209 spin_unlock(&rp->lock);
1210 enable_irq(rp->pdev->irq);
1212 dev->trans_start = jiffies;
1213 dev->stats.tx_errors++;
1214 netif_wake_queue(dev);
1217 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1218 struct net_device *dev)
1220 struct rhine_private *rp = netdev_priv(dev);
1221 void __iomem *ioaddr = rp->base;
1224 /* Caution: the write order is important here, set the field
1225 with the "ownership" bits last. */
1227 /* Calculate the next Tx descriptor entry. */
1228 entry = rp->cur_tx % TX_RING_SIZE;
1230 if (skb_padto(skb, ETH_ZLEN))
1231 return NETDEV_TX_OK;
1233 rp->tx_skbuff[entry] = skb;
1235 if ((rp->quirks & rqRhineI) &&
1236 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1237 /* Must use alignment buffer. */
1238 if (skb->len > PKT_BUF_SZ) {
1239 /* packet too long, drop it */
1241 rp->tx_skbuff[entry] = NULL;
1242 dev->stats.tx_dropped++;
1243 return NETDEV_TX_OK;
1246 /* Padding is not copied and so must be redone. */
1247 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1248 if (skb->len < ETH_ZLEN)
1249 memset(rp->tx_buf[entry] + skb->len, 0,
1250 ETH_ZLEN - skb->len);
1251 rp->tx_skbuff_dma[entry] = 0;
1252 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1253 (rp->tx_buf[entry] -
1256 rp->tx_skbuff_dma[entry] =
1257 pci_map_single(rp->pdev, skb->data, skb->len,
1259 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1262 rp->tx_ring[entry].desc_length =
1263 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1266 spin_lock_irq(&rp->lock);
1268 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1273 /* Non-x86 Todo: explicitly flush cache lines here. */
1275 /* Wake the potentially-idle transmit channel */
1276 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1280 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1281 netif_stop_queue(dev);
1283 dev->trans_start = jiffies;
1285 spin_unlock_irq(&rp->lock);
1288 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1289 dev->name, rp->cur_tx-1, entry);
1291 return NETDEV_TX_OK;
1294 /* The interrupt handler does all of the Rx thread work and cleans up
1295 after the Tx thread. */
1296 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1298 struct net_device *dev = dev_instance;
1299 struct rhine_private *rp = netdev_priv(dev);
1300 void __iomem *ioaddr = rp->base;
1302 int boguscnt = max_interrupt_work;
1305 while ((intr_status = get_intr_status(dev))) {
1308 /* Acknowledge all of the current interrupt sources ASAP. */
1309 if (intr_status & IntrTxDescRace)
1310 iowrite8(0x08, ioaddr + IntrStatus2);
1311 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1315 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1316 dev->name, intr_status);
1318 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1319 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1320 iowrite16(IntrTxAborted |
1321 IntrTxDone | IntrTxError | IntrTxUnderrun |
1322 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1323 ioaddr + IntrEnable);
1325 napi_schedule(&rp->napi);
1328 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1329 if (intr_status & IntrTxErrSummary) {
1330 /* Avoid scavenging before Tx engine turned off */
1331 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1333 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1334 printk(KERN_WARNING "%s: "
1335 "rhine_interrupt() Tx engine "
1336 "still on.\n", dev->name);
1341 /* Abnormal error summary/uncommon events handlers. */
1342 if (intr_status & (IntrPCIErr | IntrLinkChange |
1343 IntrStatsMax | IntrTxError | IntrTxAborted |
1344 IntrTxUnderrun | IntrTxDescRace))
1345 rhine_error(dev, intr_status);
1347 if (--boguscnt < 0) {
1348 printk(KERN_WARNING "%s: Too much work at interrupt, "
1350 dev->name, intr_status);
1356 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1357 dev->name, ioread16(ioaddr + IntrStatus));
1358 return IRQ_RETVAL(handled);
1361 /* This routine is logically part of the interrupt handler, but isolated
1363 static void rhine_tx(struct net_device *dev)
1365 struct rhine_private *rp = netdev_priv(dev);
1366 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1368 spin_lock(&rp->lock);
1370 /* find and cleanup dirty tx descriptors */
1371 while (rp->dirty_tx != rp->cur_tx) {
1372 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1374 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1376 if (txstatus & DescOwn)
1378 if (txstatus & 0x8000) {
1380 printk(KERN_DEBUG "%s: Transmit error, "
1381 "Tx status %8.8x.\n",
1382 dev->name, txstatus);
1383 dev->stats.tx_errors++;
1384 if (txstatus & 0x0400)
1385 dev->stats.tx_carrier_errors++;
1386 if (txstatus & 0x0200)
1387 dev->stats.tx_window_errors++;
1388 if (txstatus & 0x0100)
1389 dev->stats.tx_aborted_errors++;
1390 if (txstatus & 0x0080)
1391 dev->stats.tx_heartbeat_errors++;
1392 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1393 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1394 dev->stats.tx_fifo_errors++;
1395 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1396 break; /* Keep the skb - we try again */
1398 /* Transmitter restarted in 'abnormal' handler. */
1400 if (rp->quirks & rqRhineI)
1401 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1403 dev->stats.collisions += txstatus & 0x0F;
1405 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1406 (txstatus >> 3) & 0xF,
1408 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1409 dev->stats.tx_packets++;
1411 /* Free the original skb. */
1412 if (rp->tx_skbuff_dma[entry]) {
1413 pci_unmap_single(rp->pdev,
1414 rp->tx_skbuff_dma[entry],
1415 rp->tx_skbuff[entry]->len,
1418 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1419 rp->tx_skbuff[entry] = NULL;
1420 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1422 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1423 netif_wake_queue(dev);
1425 spin_unlock(&rp->lock);
1428 /* Process up to limit frames from receive ring */
1429 static int rhine_rx(struct net_device *dev, int limit)
1431 struct rhine_private *rp = netdev_priv(dev);
1433 int entry = rp->cur_rx % RX_RING_SIZE;
1436 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1438 le32_to_cpu(rp->rx_head_desc->rx_status));
1441 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1442 for (count = 0; count < limit; ++count) {
1443 struct rx_desc *desc = rp->rx_head_desc;
1444 u32 desc_status = le32_to_cpu(desc->rx_status);
1445 int data_size = desc_status >> 16;
1447 if (desc_status & DescOwn)
1451 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1454 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1455 if ((desc_status & RxWholePkt) != RxWholePkt) {
1456 printk(KERN_WARNING "%s: Oversized Ethernet "
1457 "frame spanned multiple buffers, entry "
1458 "%#x length %d status %8.8x!\n",
1459 dev->name, entry, data_size,
1461 printk(KERN_WARNING "%s: Oversized Ethernet "
1462 "frame %p vs %p.\n", dev->name,
1463 rp->rx_head_desc, &rp->rx_ring[entry]);
1464 dev->stats.rx_length_errors++;
1465 } else if (desc_status & RxErr) {
1466 /* There was a error. */
1468 printk(KERN_DEBUG "rhine_rx() Rx "
1469 "error was %8.8x.\n",
1471 dev->stats.rx_errors++;
1472 if (desc_status & 0x0030)
1473 dev->stats.rx_length_errors++;
1474 if (desc_status & 0x0048)
1475 dev->stats.rx_fifo_errors++;
1476 if (desc_status & 0x0004)
1477 dev->stats.rx_frame_errors++;
1478 if (desc_status & 0x0002) {
1479 /* this can also be updated outside the interrupt handler */
1480 spin_lock(&rp->lock);
1481 dev->stats.rx_crc_errors++;
1482 spin_unlock(&rp->lock);
1486 struct sk_buff *skb;
1487 /* Length should omit the CRC */
1488 int pkt_len = data_size - 4;
1490 /* Check if the packet is long enough to accept without
1491 copying to a minimally-sized skbuff. */
1492 if (pkt_len < rx_copybreak &&
1493 (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
1494 skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */
1495 pci_dma_sync_single_for_cpu(rp->pdev,
1496 rp->rx_skbuff_dma[entry],
1498 PCI_DMA_FROMDEVICE);
1500 skb_copy_to_linear_data(skb,
1501 rp->rx_skbuff[entry]->data,
1503 skb_put(skb, pkt_len);
1504 pci_dma_sync_single_for_device(rp->pdev,
1505 rp->rx_skbuff_dma[entry],
1507 PCI_DMA_FROMDEVICE);
1509 skb = rp->rx_skbuff[entry];
1511 printk(KERN_ERR "%s: Inconsistent Rx "
1512 "descriptor chain.\n",
1516 rp->rx_skbuff[entry] = NULL;
1517 skb_put(skb, pkt_len);
1518 pci_unmap_single(rp->pdev,
1519 rp->rx_skbuff_dma[entry],
1521 PCI_DMA_FROMDEVICE);
1523 skb->protocol = eth_type_trans(skb, dev);
1524 netif_receive_skb(skb);
1525 dev->stats.rx_bytes += pkt_len;
1526 dev->stats.rx_packets++;
1528 entry = (++rp->cur_rx) % RX_RING_SIZE;
1529 rp->rx_head_desc = &rp->rx_ring[entry];
1532 /* Refill the Rx ring buffers. */
1533 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1534 struct sk_buff *skb;
1535 entry = rp->dirty_rx % RX_RING_SIZE;
1536 if (rp->rx_skbuff[entry] == NULL) {
1537 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1538 rp->rx_skbuff[entry] = skb;
1540 break; /* Better luck next round. */
1541 skb->dev = dev; /* Mark as being used by this device. */
1542 rp->rx_skbuff_dma[entry] =
1543 pci_map_single(rp->pdev, skb->data,
1545 PCI_DMA_FROMDEVICE);
1546 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1548 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1555 * Clears the "tally counters" for CRC errors and missed frames(?).
1556 * It has been reported that some chips need a write of 0 to clear
1557 * these, for others the counters are set to 1 when written to and
1558 * instead cleared when read. So we clear them both ways ...
1560 static inline void clear_tally_counters(void __iomem *ioaddr)
1562 iowrite32(0, ioaddr + RxMissed);
1563 ioread16(ioaddr + RxCRCErrs);
1564 ioread16(ioaddr + RxMissed);
1567 static void rhine_restart_tx(struct net_device *dev) {
1568 struct rhine_private *rp = netdev_priv(dev);
1569 void __iomem *ioaddr = rp->base;
1570 int entry = rp->dirty_tx % TX_RING_SIZE;
1574 * If new errors occured, we need to sort them out before doing Tx.
1575 * In that case the ISR will be back here RSN anyway.
1577 intr_status = get_intr_status(dev);
1579 if ((intr_status & IntrTxErrSummary) == 0) {
1581 /* We know better than the chip where it should continue. */
1582 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1583 ioaddr + TxRingPtr);
1585 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1587 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1592 /* This should never happen */
1594 printk(KERN_WARNING "%s: rhine_restart_tx() "
1595 "Another error occured %8.8x.\n",
1596 dev->name, intr_status);
1601 static void rhine_error(struct net_device *dev, int intr_status)
1603 struct rhine_private *rp = netdev_priv(dev);
1604 void __iomem *ioaddr = rp->base;
1606 spin_lock(&rp->lock);
1608 if (intr_status & IntrLinkChange)
1609 rhine_check_media(dev, 0);
1610 if (intr_status & IntrStatsMax) {
1611 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1612 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1613 clear_tally_counters(ioaddr);
1615 if (intr_status & IntrTxAborted) {
1617 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1618 dev->name, intr_status);
1620 if (intr_status & IntrTxUnderrun) {
1621 if (rp->tx_thresh < 0xE0)
1622 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1624 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1625 "threshold now %2.2x.\n",
1626 dev->name, rp->tx_thresh);
1628 if (intr_status & IntrTxDescRace) {
1630 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1633 if ((intr_status & IntrTxError) &&
1634 (intr_status & (IntrTxAborted |
1635 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1636 if (rp->tx_thresh < 0xE0) {
1637 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1640 printk(KERN_INFO "%s: Unspecified error. Tx "
1641 "threshold now %2.2x.\n",
1642 dev->name, rp->tx_thresh);
1644 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1646 rhine_restart_tx(dev);
1648 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1649 IntrTxError | IntrTxAborted | IntrNormalSummary |
1652 printk(KERN_ERR "%s: Something Wicked happened! "
1653 "%8.8x.\n", dev->name, intr_status);
1656 spin_unlock(&rp->lock);
1659 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1661 struct rhine_private *rp = netdev_priv(dev);
1662 void __iomem *ioaddr = rp->base;
1663 unsigned long flags;
1665 spin_lock_irqsave(&rp->lock, flags);
1666 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1667 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1668 clear_tally_counters(ioaddr);
1669 spin_unlock_irqrestore(&rp->lock, flags);
1674 static void rhine_set_rx_mode(struct net_device *dev)
1676 struct rhine_private *rp = netdev_priv(dev);
1677 void __iomem *ioaddr = rp->base;
1678 u32 mc_filter[2]; /* Multicast hash filter */
1679 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1681 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1683 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1684 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1685 } else if ((dev->mc_count > multicast_filter_limit)
1686 || (dev->flags & IFF_ALLMULTI)) {
1687 /* Too many to match, or accept all multicasts. */
1688 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1689 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1692 struct dev_mc_list *mclist;
1694 memset(mc_filter, 0, sizeof(mc_filter));
1695 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1696 i++, mclist = mclist->next) {
1697 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1699 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1701 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1702 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1705 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1708 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1710 struct rhine_private *rp = netdev_priv(dev);
1712 strcpy(info->driver, DRV_NAME);
1713 strcpy(info->version, DRV_VERSION);
1714 strcpy(info->bus_info, pci_name(rp->pdev));
1717 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1719 struct rhine_private *rp = netdev_priv(dev);
1722 spin_lock_irq(&rp->lock);
1723 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1724 spin_unlock_irq(&rp->lock);
1729 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1731 struct rhine_private *rp = netdev_priv(dev);
1734 spin_lock_irq(&rp->lock);
1735 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1736 spin_unlock_irq(&rp->lock);
1737 rhine_set_carrier(&rp->mii_if);
1742 static int netdev_nway_reset(struct net_device *dev)
1744 struct rhine_private *rp = netdev_priv(dev);
1746 return mii_nway_restart(&rp->mii_if);
1749 static u32 netdev_get_link(struct net_device *dev)
1751 struct rhine_private *rp = netdev_priv(dev);
1753 return mii_link_ok(&rp->mii_if);
1756 static u32 netdev_get_msglevel(struct net_device *dev)
1761 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1766 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1768 struct rhine_private *rp = netdev_priv(dev);
1770 if (!(rp->quirks & rqWOL))
1773 spin_lock_irq(&rp->lock);
1774 wol->supported = WAKE_PHY | WAKE_MAGIC |
1775 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1776 wol->wolopts = rp->wolopts;
1777 spin_unlock_irq(&rp->lock);
1780 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1782 struct rhine_private *rp = netdev_priv(dev);
1783 u32 support = WAKE_PHY | WAKE_MAGIC |
1784 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1786 if (!(rp->quirks & rqWOL))
1789 if (wol->wolopts & ~support)
1792 spin_lock_irq(&rp->lock);
1793 rp->wolopts = wol->wolopts;
1794 spin_unlock_irq(&rp->lock);
1799 static const struct ethtool_ops netdev_ethtool_ops = {
1800 .get_drvinfo = netdev_get_drvinfo,
1801 .get_settings = netdev_get_settings,
1802 .set_settings = netdev_set_settings,
1803 .nway_reset = netdev_nway_reset,
1804 .get_link = netdev_get_link,
1805 .get_msglevel = netdev_get_msglevel,
1806 .set_msglevel = netdev_set_msglevel,
1807 .get_wol = rhine_get_wol,
1808 .set_wol = rhine_set_wol,
1811 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1813 struct rhine_private *rp = netdev_priv(dev);
1816 if (!netif_running(dev))
1819 spin_lock_irq(&rp->lock);
1820 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1821 spin_unlock_irq(&rp->lock);
1822 rhine_set_carrier(&rp->mii_if);
1827 static int rhine_close(struct net_device *dev)
1829 struct rhine_private *rp = netdev_priv(dev);
1830 void __iomem *ioaddr = rp->base;
1832 spin_lock_irq(&rp->lock);
1834 netif_stop_queue(dev);
1835 napi_disable(&rp->napi);
1838 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1839 "status was %4.4x.\n",
1840 dev->name, ioread16(ioaddr + ChipCmd));
1842 /* Switch to loopback mode to avoid hardware races. */
1843 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1845 /* Disable interrupts by clearing the interrupt mask. */
1846 iowrite16(0x0000, ioaddr + IntrEnable);
1848 /* Stop the chip's Tx and Rx processes. */
1849 iowrite16(CmdStop, ioaddr + ChipCmd);
1851 spin_unlock_irq(&rp->lock);
1853 free_irq(rp->pdev->irq, dev);
1862 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1864 struct net_device *dev = pci_get_drvdata(pdev);
1865 struct rhine_private *rp = netdev_priv(dev);
1867 unregister_netdev(dev);
1869 pci_iounmap(pdev, rp->base);
1870 pci_release_regions(pdev);
1873 pci_disable_device(pdev);
1874 pci_set_drvdata(pdev, NULL);
1877 static void rhine_shutdown (struct pci_dev *pdev)
1879 struct net_device *dev = pci_get_drvdata(pdev);
1880 struct rhine_private *rp = netdev_priv(dev);
1881 void __iomem *ioaddr = rp->base;
1883 if (!(rp->quirks & rqWOL))
1884 return; /* Nothing to do for non-WOL adapters */
1886 rhine_power_init(dev);
1888 /* Make sure we use pattern 0, 1 and not 4, 5 */
1889 if (rp->quirks & rq6patterns)
1890 iowrite8(0x04, ioaddr + WOLcgClr);
1892 if (rp->wolopts & WAKE_MAGIC) {
1893 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1895 * Turn EEPROM-controlled wake-up back on -- some hardware may
1896 * not cooperate otherwise.
1898 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1901 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1902 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1904 if (rp->wolopts & WAKE_PHY)
1905 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1907 if (rp->wolopts & WAKE_UCAST)
1908 iowrite8(WOLucast, ioaddr + WOLcrSet);
1911 /* Enable legacy WOL (for old motherboards) */
1912 iowrite8(0x01, ioaddr + PwcfgSet);
1913 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1916 /* Hit power state D3 (sleep) */
1918 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1920 /* TODO: Check use of pci_enable_wake() */
1925 static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1927 struct net_device *dev = pci_get_drvdata(pdev);
1928 struct rhine_private *rp = netdev_priv(dev);
1929 unsigned long flags;
1931 if (!netif_running(dev))
1934 napi_disable(&rp->napi);
1936 netif_device_detach(dev);
1937 pci_save_state(pdev);
1939 spin_lock_irqsave(&rp->lock, flags);
1940 rhine_shutdown(pdev);
1941 spin_unlock_irqrestore(&rp->lock, flags);
1943 free_irq(dev->irq, dev);
1947 static int rhine_resume(struct pci_dev *pdev)
1949 struct net_device *dev = pci_get_drvdata(pdev);
1950 struct rhine_private *rp = netdev_priv(dev);
1951 unsigned long flags;
1954 if (!netif_running(dev))
1957 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1958 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1960 ret = pci_set_power_state(pdev, PCI_D0);
1962 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1963 dev->name, ret ? "failed" : "succeeded", ret);
1965 pci_restore_state(pdev);
1967 spin_lock_irqsave(&rp->lock, flags);
1969 enable_mmio(rp->pioaddr, rp->quirks);
1971 rhine_power_init(dev);
1976 init_registers(dev);
1977 spin_unlock_irqrestore(&rp->lock, flags);
1979 netif_device_attach(dev);
1983 #endif /* CONFIG_PM */
1985 static struct pci_driver rhine_driver = {
1987 .id_table = rhine_pci_tbl,
1988 .probe = rhine_init_one,
1989 .remove = __devexit_p(rhine_remove_one),
1991 .suspend = rhine_suspend,
1992 .resume = rhine_resume,
1993 #endif /* CONFIG_PM */
1994 .shutdown = rhine_shutdown,
1997 static struct dmi_system_id __initdata rhine_dmi_table[] = {
2001 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2002 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2008 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2009 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2015 static int __init rhine_init(void)
2017 /* when a module, this is printed whether or not devices are found in probe */
2021 if (dmi_check_system(rhine_dmi_table)) {
2022 /* these BIOSes fail at PXE boot if chip is in D3 */
2024 printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2029 printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2031 return pci_register_driver(&rhine_driver);
2035 static void __exit rhine_cleanup(void)
2037 pci_unregister_driver(&rhine_driver);
2041 module_init(rhine_init);
2042 module_exit(rhine_cleanup);