1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
19 See the file COPYING in this distribution for more information.
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
28 * Test Tx checksumming thoroughly
31 * Complete reset on PciErr
32 * Consider Rx interrupt mitigation using TimerIntr
33 * Investigate using skb->priority with h/w VLAN priority
34 * Investigate using High Priority Tx Queue with skb->priority
35 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37 * Implement Tx software interrupt mitigation via
39 * The real minimum of CP_MIN_MTU is 4 bytes. However,
40 for this to be supported, one must(?) turn on packet padding.
41 * Support external MII transceivers (patch available)
44 * TX checksumming is considered experimental. It is off by
45 default, use ethtool to turn it on.
49 #define DRV_NAME "8139cp"
50 #define DRV_VERSION "1.3"
51 #define DRV_RELDATE "Mar 22, 2004"
54 #include <linux/module.h>
55 #include <linux/moduleparam.h>
56 #include <linux/kernel.h>
57 #include <linux/compiler.h>
58 #include <linux/netdevice.h>
59 #include <linux/etherdevice.h>
60 #include <linux/init.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/delay.h>
64 #include <linux/ethtool.h>
65 #include <linux/mii.h>
66 #include <linux/if_vlan.h>
67 #include <linux/crc32.h>
70 #include <linux/tcp.h>
71 #include <linux/udp.h>
72 #include <linux/cache.h>
75 #include <asm/uaccess.h>
77 /* VLAN tagging feature enable/disable */
78 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
79 #define CP_VLAN_TAG_USED 1
80 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
81 do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0)
83 #define CP_VLAN_TAG_USED 0
84 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
85 do { (tx_desc)->opts2 = 0; } while (0)
88 /* These identify the driver base version and may not be removed. */
89 static char version[] =
90 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
92 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
93 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
94 MODULE_VERSION(DRV_VERSION);
95 MODULE_LICENSE("GPL");
97 static int debug = -1;
98 module_param(debug, int, 0);
99 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
101 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
102 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
103 static int multicast_filter_limit = 32;
104 module_param(multicast_filter_limit, int, 0);
105 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
107 #define PFX DRV_NAME ": "
109 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
112 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
113 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
114 #define CP_REGS_SIZE (0xff + 1)
115 #define CP_REGS_VER 1 /* version 1 */
116 #define CP_RX_RING_SIZE 64
117 #define CP_TX_RING_SIZE 64
118 #define CP_RING_BYTES \
119 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
120 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
122 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
123 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
124 #define TX_BUFFS_AVAIL(CP) \
125 (((CP)->tx_tail <= (CP)->tx_head) ? \
126 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
127 (CP)->tx_tail - (CP)->tx_head - 1)
129 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
130 #define CP_INTERNAL_PHY 32
132 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
133 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
134 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
135 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
136 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
138 /* Time in jiffies before concluding the transmitter is hung. */
139 #define TX_TIMEOUT (6*HZ)
141 /* hardware minimum and maximum for a single frame's data payload */
142 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
143 #define CP_MAX_MTU 4096
146 /* NIC register offsets */
147 MAC0 = 0x00, /* Ethernet hardware address. */
148 MAR0 = 0x08, /* Multicast filter. */
149 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
150 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
151 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
152 Cmd = 0x37, /* Command register */
153 IntrMask = 0x3C, /* Interrupt mask */
154 IntrStatus = 0x3E, /* Interrupt status */
155 TxConfig = 0x40, /* Tx configuration */
156 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
157 RxConfig = 0x44, /* Rx configuration */
158 RxMissed = 0x4C, /* 24 bits valid, write clears */
159 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
160 Config1 = 0x52, /* Config1 */
161 Config3 = 0x59, /* Config3 */
162 Config4 = 0x5A, /* Config4 */
163 MultiIntr = 0x5C, /* Multiple interrupt select */
164 BasicModeCtrl = 0x62, /* MII BMCR */
165 BasicModeStatus = 0x64, /* MII BMSR */
166 NWayAdvert = 0x66, /* MII ADVERTISE */
167 NWayLPAR = 0x68, /* MII LPA */
168 NWayExpansion = 0x6A, /* MII Expansion */
169 Config5 = 0xD8, /* Config5 */
170 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
171 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
172 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
173 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
174 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
175 TxThresh = 0xEC, /* Early Tx threshold */
176 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
177 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
179 /* Tx and Rx status descriptors */
180 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
181 RingEnd = (1 << 30), /* End of descriptor ring */
182 FirstFrag = (1 << 29), /* First segment of a packet */
183 LastFrag = (1 << 28), /* Final segment of a packet */
184 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
185 MSSShift = 16, /* MSS value position */
186 MSSMask = 0xfff, /* MSS value: 11 bits */
187 TxError = (1 << 23), /* Tx error summary */
188 RxError = (1 << 20), /* Rx error summary */
189 IPCS = (1 << 18), /* Calculate IP checksum */
190 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
191 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
192 TxVlanTag = (1 << 17), /* Add VLAN tag */
193 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
194 IPFail = (1 << 15), /* IP checksum failed */
195 UDPFail = (1 << 14), /* UDP/IP checksum failed */
196 TCPFail = (1 << 13), /* TCP/IP checksum failed */
197 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
198 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
199 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
203 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
204 TxOWC = (1 << 22), /* Tx Out-of-window collision */
205 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
206 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
207 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
208 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
209 RxErrFrame = (1 << 27), /* Rx frame alignment error */
210 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
211 RxErrCRC = (1 << 18), /* Rx CRC error */
212 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
213 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
214 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
216 /* StatsAddr register */
217 DumpStats = (1 << 3), /* Begin stats dump */
219 /* RxConfig register */
220 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
221 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
222 AcceptErr = 0x20, /* Accept packets with CRC errors */
223 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
224 AcceptBroadcast = 0x08, /* Accept broadcast packets */
225 AcceptMulticast = 0x04, /* Accept multicast packets */
226 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
227 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
229 /* IntrMask / IntrStatus registers */
230 PciErr = (1 << 15), /* System error on the PCI bus */
231 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
232 LenChg = (1 << 13), /* Cable length change */
233 SWInt = (1 << 8), /* Software-requested interrupt */
234 TxEmpty = (1 << 7), /* No Tx descriptors available */
235 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
236 LinkChg = (1 << 5), /* Packet underrun, or link change */
237 RxEmpty = (1 << 4), /* No Rx descriptors available */
238 TxErr = (1 << 3), /* Tx error */
239 TxOK = (1 << 2), /* Tx packet sent */
240 RxErr = (1 << 1), /* Rx error */
241 RxOK = (1 << 0), /* Rx packet received */
242 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
243 but hardware likes to raise it */
245 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
246 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
247 RxErr | RxOK | IntrResvd,
249 /* C mode command register */
250 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
251 RxOn = (1 << 3), /* Rx mode enable */
252 TxOn = (1 << 2), /* Tx mode enable */
254 /* C+ mode command register */
255 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
256 RxChkSum = (1 << 5), /* Rx checksum offload enable */
257 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
258 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
259 CpRxOn = (1 << 1), /* Rx mode enable */
260 CpTxOn = (1 << 0), /* Tx mode enable */
262 /* Cfg9436 EEPROM control register */
263 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
264 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
266 /* TxConfig register */
267 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
268 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
270 /* Early Tx Threshold register */
271 TxThreshMask = 0x3f, /* Mask bits 5-0 */
272 TxThreshMax = 2048, /* Max early Tx threshold */
274 /* Config1 register */
275 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
276 LWACT = (1 << 4), /* LWAKE active mode */
277 PMEnable = (1 << 0), /* Enable various PM features of chip */
279 /* Config3 register */
280 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
281 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
282 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
284 /* Config4 register */
285 LWPTN = (1 << 1), /* LWAKE Pattern */
286 LWPME = (1 << 4), /* LANWAKE vs PMEB */
288 /* Config5 register */
289 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
290 MWF = (1 << 5), /* Accept Multicast wakeup frame */
291 UWF = (1 << 4), /* Accept Unicast wakeup frame */
292 LANWake = (1 << 1), /* Enable LANWake signal */
293 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
295 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
296 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
297 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
300 static const unsigned int cp_rx_config =
301 (RX_FIFO_THRESH << RxCfgFIFOShift) |
302 (RX_DMA_BURST << RxCfgDMAShift);
310 struct cp_dma_stats {
324 } __attribute__((packed));
326 struct cp_extra_stats {
327 unsigned long rx_frags;
332 struct net_device *dev;
336 struct napi_struct napi;
338 struct pci_dev *pdev;
342 struct cp_extra_stats cp_stats;
344 unsigned rx_head ____cacheline_aligned;
346 struct cp_desc *rx_ring;
347 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
349 unsigned tx_head ____cacheline_aligned;
351 struct cp_desc *tx_ring;
352 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
355 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
358 struct vlan_group *vlgrp;
362 struct mii_if_info mii_if;
365 #define cpr8(reg) readb(cp->regs + (reg))
366 #define cpr16(reg) readw(cp->regs + (reg))
367 #define cpr32(reg) readl(cp->regs + (reg))
368 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
369 #define cpw16(reg,val) writew((val), cp->regs + (reg))
370 #define cpw32(reg,val) writel((val), cp->regs + (reg))
371 #define cpw8_f(reg,val) do { \
372 writeb((val), cp->regs + (reg)); \
373 readb(cp->regs + (reg)); \
375 #define cpw16_f(reg,val) do { \
376 writew((val), cp->regs + (reg)); \
377 readw(cp->regs + (reg)); \
379 #define cpw32_f(reg,val) do { \
380 writel((val), cp->regs + (reg)); \
381 readl(cp->regs + (reg)); \
385 static void __cp_set_rx_mode (struct net_device *dev);
386 static void cp_tx (struct cp_private *cp);
387 static void cp_clean_rings (struct cp_private *cp);
388 #ifdef CONFIG_NET_POLL_CONTROLLER
389 static void cp_poll_controller(struct net_device *dev);
391 static int cp_get_eeprom_len(struct net_device *dev);
392 static int cp_get_eeprom(struct net_device *dev,
393 struct ethtool_eeprom *eeprom, u8 *data);
394 static int cp_set_eeprom(struct net_device *dev,
395 struct ethtool_eeprom *eeprom, u8 *data);
397 static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
398 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
399 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
402 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
405 const char str[ETH_GSTRING_LEN];
406 } ethtool_stats_keys[] = {
425 static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
427 struct cp_private *cp = netdev_priv(dev);
430 spin_lock_irqsave(&cp->lock, flags);
433 cp->cpcmd |= RxVlanOn;
435 cp->cpcmd &= ~RxVlanOn;
437 cpw16(CpCmd, cp->cpcmd);
438 spin_unlock_irqrestore(&cp->lock, flags);
440 #endif /* CP_VLAN_TAG_USED */
442 static inline void cp_set_rxbufsize (struct cp_private *cp)
444 unsigned int mtu = cp->dev->mtu;
446 if (mtu > ETH_DATA_LEN)
447 /* MTU + ethernet header + FCS + optional VLAN tag */
448 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
450 cp->rx_buf_sz = PKT_BUF_SZ;
453 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
454 struct cp_desc *desc)
456 skb->protocol = eth_type_trans (skb, cp->dev);
458 cp->dev->stats.rx_packets++;
459 cp->dev->stats.rx_bytes += skb->len;
462 if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) {
463 vlan_hwaccel_receive_skb(skb, cp->vlgrp,
464 swab16(le32_to_cpu(desc->opts2) & 0xffff));
467 netif_receive_skb(skb);
470 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
473 if (netif_msg_rx_err (cp))
474 pr_debug("%s: rx err, slot %d status 0x%x len %d\n",
475 cp->dev->name, rx_tail, status, len);
476 cp->dev->stats.rx_errors++;
477 if (status & RxErrFrame)
478 cp->dev->stats.rx_frame_errors++;
479 if (status & RxErrCRC)
480 cp->dev->stats.rx_crc_errors++;
481 if ((status & RxErrRunt) || (status & RxErrLong))
482 cp->dev->stats.rx_length_errors++;
483 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
484 cp->dev->stats.rx_length_errors++;
485 if (status & RxErrFIFO)
486 cp->dev->stats.rx_fifo_errors++;
489 static inline unsigned int cp_rx_csum_ok (u32 status)
491 unsigned int protocol = (status >> 16) & 0x3;
493 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
495 else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
497 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
502 static int cp_rx_poll(struct napi_struct *napi, int budget)
504 struct cp_private *cp = container_of(napi, struct cp_private, napi);
505 struct net_device *dev = cp->dev;
506 unsigned int rx_tail = cp->rx_tail;
511 cpw16(IntrStatus, cp_rx_intr_mask);
516 struct sk_buff *skb, *new_skb;
517 struct cp_desc *desc;
518 const unsigned buflen = cp->rx_buf_sz;
520 skb = cp->rx_skb[rx_tail];
523 desc = &cp->rx_ring[rx_tail];
524 status = le32_to_cpu(desc->opts1);
525 if (status & DescOwn)
528 len = (status & 0x1fff) - 4;
529 mapping = le64_to_cpu(desc->addr);
531 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
532 /* we don't support incoming fragmented frames.
533 * instead, we attempt to ensure that the
534 * pre-allocated RX skbs are properly sized such
535 * that RX fragments are never encountered
537 cp_rx_err_acct(cp, rx_tail, status, len);
538 dev->stats.rx_dropped++;
539 cp->cp_stats.rx_frags++;
543 if (status & (RxError | RxErrFIFO)) {
544 cp_rx_err_acct(cp, rx_tail, status, len);
548 if (netif_msg_rx_status(cp))
549 pr_debug("%s: rx slot %d status 0x%x len %d\n",
550 dev->name, rx_tail, status, len);
552 new_skb = netdev_alloc_skb_ip_align(dev, buflen);
554 dev->stats.rx_dropped++;
558 dma_unmap_single(&cp->pdev->dev, mapping,
559 buflen, PCI_DMA_FROMDEVICE);
561 /* Handle checksum offloading for incoming packets. */
562 if (cp_rx_csum_ok(status))
563 skb->ip_summed = CHECKSUM_UNNECESSARY;
565 skb->ip_summed = CHECKSUM_NONE;
569 mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
571 cp->rx_skb[rx_tail] = new_skb;
573 cp_rx_skb(cp, skb, desc);
577 cp->rx_ring[rx_tail].opts2 = 0;
578 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
579 if (rx_tail == (CP_RX_RING_SIZE - 1))
580 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
583 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
584 rx_tail = NEXT_RX(rx_tail);
590 cp->rx_tail = rx_tail;
592 /* if we did not reach work limit, then we're done with
593 * this round of polling
598 if (cpr16(IntrStatus) & cp_rx_intr_mask)
601 spin_lock_irqsave(&cp->lock, flags);
602 cpw16_f(IntrMask, cp_intr_mask);
603 __napi_complete(napi);
604 spin_unlock_irqrestore(&cp->lock, flags);
610 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
612 struct net_device *dev = dev_instance;
613 struct cp_private *cp;
616 if (unlikely(dev == NULL))
618 cp = netdev_priv(dev);
620 status = cpr16(IntrStatus);
621 if (!status || (status == 0xFFFF))
624 if (netif_msg_intr(cp))
625 pr_debug("%s: intr, status %04x cmd %02x cpcmd %04x\n",
626 dev->name, status, cpr8(Cmd), cpr16(CpCmd));
628 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
630 spin_lock(&cp->lock);
632 /* close possible race's with dev_close */
633 if (unlikely(!netif_running(dev))) {
635 spin_unlock(&cp->lock);
639 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
640 if (napi_schedule_prep(&cp->napi)) {
641 cpw16_f(IntrMask, cp_norx_intr_mask);
642 __napi_schedule(&cp->napi);
645 if (status & (TxOK | TxErr | TxEmpty | SWInt))
647 if (status & LinkChg)
648 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
650 spin_unlock(&cp->lock);
652 if (status & PciErr) {
655 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
656 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
657 pr_err("%s: PCI bus error, status=%04x, PCI status=%04x\n",
658 dev->name, status, pci_status);
660 /* TODO: reset hardware */
666 #ifdef CONFIG_NET_POLL_CONTROLLER
668 * Polling receive - used by netconsole and other diagnostic tools
669 * to allow network i/o with interrupts disabled.
671 static void cp_poll_controller(struct net_device *dev)
673 disable_irq(dev->irq);
674 cp_interrupt(dev->irq, dev);
675 enable_irq(dev->irq);
679 static void cp_tx (struct cp_private *cp)
681 unsigned tx_head = cp->tx_head;
682 unsigned tx_tail = cp->tx_tail;
684 while (tx_tail != tx_head) {
685 struct cp_desc *txd = cp->tx_ring + tx_tail;
690 status = le32_to_cpu(txd->opts1);
691 if (status & DescOwn)
694 skb = cp->tx_skb[tx_tail];
697 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
698 le32_to_cpu(txd->opts1) & 0xffff,
701 if (status & LastFrag) {
702 if (status & (TxError | TxFIFOUnder)) {
703 if (netif_msg_tx_err(cp))
704 pr_debug("%s: tx err, status 0x%x\n",
705 cp->dev->name, status);
706 cp->dev->stats.tx_errors++;
708 cp->dev->stats.tx_window_errors++;
709 if (status & TxMaxCol)
710 cp->dev->stats.tx_aborted_errors++;
711 if (status & TxLinkFail)
712 cp->dev->stats.tx_carrier_errors++;
713 if (status & TxFIFOUnder)
714 cp->dev->stats.tx_fifo_errors++;
716 cp->dev->stats.collisions +=
717 ((status >> TxColCntShift) & TxColCntMask);
718 cp->dev->stats.tx_packets++;
719 cp->dev->stats.tx_bytes += skb->len;
720 if (netif_msg_tx_done(cp))
721 pr_debug("%s: tx done, slot %d\n", cp->dev->name, tx_tail);
723 dev_kfree_skb_irq(skb);
726 cp->tx_skb[tx_tail] = NULL;
728 tx_tail = NEXT_TX(tx_tail);
731 cp->tx_tail = tx_tail;
733 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
734 netif_wake_queue(cp->dev);
737 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
738 struct net_device *dev)
740 struct cp_private *cp = netdev_priv(dev);
743 unsigned long intr_flags;
749 spin_lock_irqsave(&cp->lock, intr_flags);
751 /* This is a hard error, log it. */
752 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
753 netif_stop_queue(dev);
754 spin_unlock_irqrestore(&cp->lock, intr_flags);
755 pr_err(PFX "%s: BUG! Tx Ring full when queue awake!\n",
757 return NETDEV_TX_BUSY;
761 if (cp->vlgrp && vlan_tx_tag_present(skb))
762 vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));
766 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
767 if (dev->features & NETIF_F_TSO)
768 mss = skb_shinfo(skb)->gso_size;
770 if (skb_shinfo(skb)->nr_frags == 0) {
771 struct cp_desc *txd = &cp->tx_ring[entry];
776 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
777 CP_VLAN_TX_TAG(txd, vlan_tag);
778 txd->addr = cpu_to_le64(mapping);
781 flags = eor | len | DescOwn | FirstFrag | LastFrag;
784 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
785 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
786 const struct iphdr *ip = ip_hdr(skb);
787 if (ip->protocol == IPPROTO_TCP)
788 flags |= IPCS | TCPCS;
789 else if (ip->protocol == IPPROTO_UDP)
790 flags |= IPCS | UDPCS;
792 WARN_ON(1); /* we need a WARN() */
795 txd->opts1 = cpu_to_le32(flags);
798 cp->tx_skb[entry] = skb;
799 entry = NEXT_TX(entry);
802 u32 first_len, first_eor;
803 dma_addr_t first_mapping;
804 int frag, first_entry = entry;
805 const struct iphdr *ip = ip_hdr(skb);
807 /* We must give this initial chunk to the device last.
808 * Otherwise we could race with the device.
811 first_len = skb_headlen(skb);
812 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
813 first_len, PCI_DMA_TODEVICE);
814 cp->tx_skb[entry] = skb;
815 entry = NEXT_TX(entry);
817 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
818 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
823 len = this_frag->size;
824 mapping = dma_map_single(&cp->pdev->dev,
825 ((void *) page_address(this_frag->page) +
826 this_frag->page_offset),
827 len, PCI_DMA_TODEVICE);
828 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
830 ctrl = eor | len | DescOwn;
834 ((mss & MSSMask) << MSSShift);
835 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
836 if (ip->protocol == IPPROTO_TCP)
837 ctrl |= IPCS | TCPCS;
838 else if (ip->protocol == IPPROTO_UDP)
839 ctrl |= IPCS | UDPCS;
844 if (frag == skb_shinfo(skb)->nr_frags - 1)
847 txd = &cp->tx_ring[entry];
848 CP_VLAN_TX_TAG(txd, vlan_tag);
849 txd->addr = cpu_to_le64(mapping);
852 txd->opts1 = cpu_to_le32(ctrl);
855 cp->tx_skb[entry] = skb;
856 entry = NEXT_TX(entry);
859 txd = &cp->tx_ring[first_entry];
860 CP_VLAN_TX_TAG(txd, vlan_tag);
861 txd->addr = cpu_to_le64(first_mapping);
864 if (skb->ip_summed == CHECKSUM_PARTIAL) {
865 if (ip->protocol == IPPROTO_TCP)
866 txd->opts1 = cpu_to_le32(first_eor | first_len |
867 FirstFrag | DescOwn |
869 else if (ip->protocol == IPPROTO_UDP)
870 txd->opts1 = cpu_to_le32(first_eor | first_len |
871 FirstFrag | DescOwn |
876 txd->opts1 = cpu_to_le32(first_eor | first_len |
877 FirstFrag | DescOwn);
881 if (netif_msg_tx_queued(cp))
882 pr_debug("%s: tx queued, slot %d, skblen %d\n",
883 dev->name, entry, skb->len);
884 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
885 netif_stop_queue(dev);
887 spin_unlock_irqrestore(&cp->lock, intr_flags);
889 cpw8(TxPoll, NormalTxPoll);
890 dev->trans_start = jiffies;
895 /* Set or clear the multicast filter for this adaptor.
896 This routine is not state sensitive and need not be SMP locked. */
898 static void __cp_set_rx_mode (struct net_device *dev)
900 struct cp_private *cp = netdev_priv(dev);
901 u32 mc_filter[2]; /* Multicast hash filter */
905 /* Note: do not reorder, GCC is clever about common statements. */
906 if (dev->flags & IFF_PROMISC) {
907 /* Unconditionally log net taps. */
909 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
911 mc_filter[1] = mc_filter[0] = 0xffffffff;
912 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
913 (dev->flags & IFF_ALLMULTI)) {
914 /* Too many to filter perfectly -- accept all multicasts. */
915 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
916 mc_filter[1] = mc_filter[0] = 0xffffffff;
918 struct dev_mc_list *mclist;
919 rx_mode = AcceptBroadcast | AcceptMyPhys;
920 mc_filter[1] = mc_filter[0] = 0;
921 netdev_for_each_mc_addr(mclist, dev) {
922 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
924 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
925 rx_mode |= AcceptMulticast;
929 /* We can safely update without stopping the chip. */
930 tmp = cp_rx_config | rx_mode;
931 if (cp->rx_config != tmp) {
932 cpw32_f (RxConfig, tmp);
935 cpw32_f (MAR0 + 0, mc_filter[0]);
936 cpw32_f (MAR0 + 4, mc_filter[1]);
939 static void cp_set_rx_mode (struct net_device *dev)
942 struct cp_private *cp = netdev_priv(dev);
944 spin_lock_irqsave (&cp->lock, flags);
945 __cp_set_rx_mode(dev);
946 spin_unlock_irqrestore (&cp->lock, flags);
949 static void __cp_get_stats(struct cp_private *cp)
951 /* only lower 24 bits valid; write any value to clear */
952 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
956 static struct net_device_stats *cp_get_stats(struct net_device *dev)
958 struct cp_private *cp = netdev_priv(dev);
961 /* The chip only need report frame silently dropped. */
962 spin_lock_irqsave(&cp->lock, flags);
963 if (netif_running(dev) && netif_device_present(dev))
965 spin_unlock_irqrestore(&cp->lock, flags);
970 static void cp_stop_hw (struct cp_private *cp)
972 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
973 cpw16_f(IntrMask, 0);
976 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
979 cp->tx_head = cp->tx_tail = 0;
982 static void cp_reset_hw (struct cp_private *cp)
984 unsigned work = 1000;
989 if (!(cpr8(Cmd) & CmdReset))
992 schedule_timeout_uninterruptible(10);
995 pr_err("%s: hardware reset timeout\n", cp->dev->name);
998 static inline void cp_start_hw (struct cp_private *cp)
1000 cpw16(CpCmd, cp->cpcmd);
1001 cpw8(Cmd, RxOn | TxOn);
1004 static void cp_init_hw (struct cp_private *cp)
1006 struct net_device *dev = cp->dev;
1007 dma_addr_t ring_dma;
1011 cpw8_f (Cfg9346, Cfg9346_Unlock);
1013 /* Restore our idea of the MAC address. */
1014 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1015 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1018 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1020 __cp_set_rx_mode(dev);
1021 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1023 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1024 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1025 cpw8(Config3, PARMEnable);
1026 cp->wol_enabled = 0;
1028 cpw8(Config5, cpr8(Config5) & PMEStatus);
1030 cpw32_f(HiTxRingAddr, 0);
1031 cpw32_f(HiTxRingAddr + 4, 0);
1033 ring_dma = cp->ring_dma;
1034 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1035 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1037 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1038 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1039 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1041 cpw16(MultiIntr, 0);
1043 cpw16_f(IntrMask, cp_intr_mask);
1045 cpw8_f(Cfg9346, Cfg9346_Lock);
1048 static int cp_refill_rx(struct cp_private *cp)
1050 struct net_device *dev = cp->dev;
1053 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1054 struct sk_buff *skb;
1057 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1061 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1062 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1063 cp->rx_skb[i] = skb;
1065 cp->rx_ring[i].opts2 = 0;
1066 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1067 if (i == (CP_RX_RING_SIZE - 1))
1068 cp->rx_ring[i].opts1 =
1069 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1071 cp->rx_ring[i].opts1 =
1072 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1082 static void cp_init_rings_index (struct cp_private *cp)
1085 cp->tx_head = cp->tx_tail = 0;
1088 static int cp_init_rings (struct cp_private *cp)
1090 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1091 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1093 cp_init_rings_index(cp);
1095 return cp_refill_rx (cp);
1098 static int cp_alloc_rings (struct cp_private *cp)
1102 mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1103 &cp->ring_dma, GFP_KERNEL);
1108 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1110 return cp_init_rings(cp);
1113 static void cp_clean_rings (struct cp_private *cp)
1115 struct cp_desc *desc;
1118 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1119 if (cp->rx_skb[i]) {
1120 desc = cp->rx_ring + i;
1121 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1122 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1123 dev_kfree_skb(cp->rx_skb[i]);
1127 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1128 if (cp->tx_skb[i]) {
1129 struct sk_buff *skb = cp->tx_skb[i];
1131 desc = cp->tx_ring + i;
1132 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1133 le32_to_cpu(desc->opts1) & 0xffff,
1135 if (le32_to_cpu(desc->opts1) & LastFrag)
1137 cp->dev->stats.tx_dropped++;
1141 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1142 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1144 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1145 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1148 static void cp_free_rings (struct cp_private *cp)
1151 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1157 static int cp_open (struct net_device *dev)
1159 struct cp_private *cp = netdev_priv(dev);
1162 if (netif_msg_ifup(cp))
1163 pr_debug("%s: enabling interface\n", dev->name);
1165 rc = cp_alloc_rings(cp);
1169 napi_enable(&cp->napi);
1173 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1177 netif_carrier_off(dev);
1178 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1179 netif_start_queue(dev);
1184 napi_disable(&cp->napi);
1190 static int cp_close (struct net_device *dev)
1192 struct cp_private *cp = netdev_priv(dev);
1193 unsigned long flags;
1195 napi_disable(&cp->napi);
1197 if (netif_msg_ifdown(cp))
1198 pr_debug("%s: disabling interface\n", dev->name);
1200 spin_lock_irqsave(&cp->lock, flags);
1202 netif_stop_queue(dev);
1203 netif_carrier_off(dev);
1207 spin_unlock_irqrestore(&cp->lock, flags);
1209 free_irq(dev->irq, dev);
1215 static void cp_tx_timeout(struct net_device *dev)
1217 struct cp_private *cp = netdev_priv(dev);
1218 unsigned long flags;
1221 pr_warning("%s: Transmit timeout, status %2x %4x %4x %4x\n",
1222 dev->name, cpr8(Cmd), cpr16(CpCmd),
1223 cpr16(IntrStatus), cpr16(IntrMask));
1225 spin_lock_irqsave(&cp->lock, flags);
1229 rc = cp_init_rings(cp);
1232 netif_wake_queue(dev);
1234 spin_unlock_irqrestore(&cp->lock, flags);
1240 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1242 struct cp_private *cp = netdev_priv(dev);
1244 unsigned long flags;
1246 /* check for invalid MTU, according to hardware limits */
1247 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1250 /* if network interface not up, no need for complexity */
1251 if (!netif_running(dev)) {
1253 cp_set_rxbufsize(cp); /* set new rx buf size */
1257 spin_lock_irqsave(&cp->lock, flags);
1259 cp_stop_hw(cp); /* stop h/w and free rings */
1263 cp_set_rxbufsize(cp); /* set new rx buf size */
1265 rc = cp_init_rings(cp); /* realloc and restart h/w */
1268 spin_unlock_irqrestore(&cp->lock, flags);
1274 static const char mii_2_8139_map[8] = {
1285 static int mdio_read(struct net_device *dev, int phy_id, int location)
1287 struct cp_private *cp = netdev_priv(dev);
1289 return location < 8 && mii_2_8139_map[location] ?
1290 readw(cp->regs + mii_2_8139_map[location]) : 0;
1294 static void mdio_write(struct net_device *dev, int phy_id, int location,
1297 struct cp_private *cp = netdev_priv(dev);
1299 if (location == 0) {
1300 cpw8(Cfg9346, Cfg9346_Unlock);
1301 cpw16(BasicModeCtrl, value);
1302 cpw8(Cfg9346, Cfg9346_Lock);
1303 } else if (location < 8 && mii_2_8139_map[location])
1304 cpw16(mii_2_8139_map[location], value);
1307 /* Set the ethtool Wake-on-LAN settings */
1308 static int netdev_set_wol (struct cp_private *cp,
1309 const struct ethtool_wolinfo *wol)
1313 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1314 /* If WOL is being disabled, no need for complexity */
1316 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1317 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1320 cpw8 (Cfg9346, Cfg9346_Unlock);
1321 cpw8 (Config3, options);
1322 cpw8 (Cfg9346, Cfg9346_Lock);
1324 options = 0; /* Paranoia setting */
1325 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1326 /* If WOL is being disabled, no need for complexity */
1328 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1329 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1330 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1333 cpw8 (Config5, options);
1335 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1340 /* Get the ethtool Wake-on-LAN settings */
1341 static void netdev_get_wol (struct cp_private *cp,
1342 struct ethtool_wolinfo *wol)
1346 wol->wolopts = 0; /* Start from scratch */
1347 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1348 WAKE_MCAST | WAKE_UCAST;
1349 /* We don't need to go on if WOL is disabled */
1350 if (!cp->wol_enabled) return;
1352 options = cpr8 (Config3);
1353 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1354 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1356 options = 0; /* Paranoia setting */
1357 options = cpr8 (Config5);
1358 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1359 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1360 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1363 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1365 struct cp_private *cp = netdev_priv(dev);
1367 strcpy (info->driver, DRV_NAME);
1368 strcpy (info->version, DRV_VERSION);
1369 strcpy (info->bus_info, pci_name(cp->pdev));
1372 static int cp_get_regs_len(struct net_device *dev)
1374 return CP_REGS_SIZE;
1377 static int cp_get_sset_count (struct net_device *dev, int sset)
1381 return CP_NUM_STATS;
1387 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1389 struct cp_private *cp = netdev_priv(dev);
1391 unsigned long flags;
1393 spin_lock_irqsave(&cp->lock, flags);
1394 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1395 spin_unlock_irqrestore(&cp->lock, flags);
1400 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1402 struct cp_private *cp = netdev_priv(dev);
1404 unsigned long flags;
1406 spin_lock_irqsave(&cp->lock, flags);
1407 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1408 spin_unlock_irqrestore(&cp->lock, flags);
1413 static int cp_nway_reset(struct net_device *dev)
1415 struct cp_private *cp = netdev_priv(dev);
1416 return mii_nway_restart(&cp->mii_if);
1419 static u32 cp_get_msglevel(struct net_device *dev)
1421 struct cp_private *cp = netdev_priv(dev);
1422 return cp->msg_enable;
1425 static void cp_set_msglevel(struct net_device *dev, u32 value)
1427 struct cp_private *cp = netdev_priv(dev);
1428 cp->msg_enable = value;
1431 static u32 cp_get_rx_csum(struct net_device *dev)
1433 struct cp_private *cp = netdev_priv(dev);
1434 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1437 static int cp_set_rx_csum(struct net_device *dev, u32 data)
1439 struct cp_private *cp = netdev_priv(dev);
1440 u16 cmd = cp->cpcmd, newcmd;
1447 newcmd &= ~RxChkSum;
1449 if (newcmd != cmd) {
1450 unsigned long flags;
1452 spin_lock_irqsave(&cp->lock, flags);
1454 cpw16_f(CpCmd, newcmd);
1455 spin_unlock_irqrestore(&cp->lock, flags);
1461 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1464 struct cp_private *cp = netdev_priv(dev);
1465 unsigned long flags;
1467 if (regs->len < CP_REGS_SIZE)
1468 return /* -EINVAL */;
1470 regs->version = CP_REGS_VER;
1472 spin_lock_irqsave(&cp->lock, flags);
1473 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1474 spin_unlock_irqrestore(&cp->lock, flags);
1477 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1479 struct cp_private *cp = netdev_priv(dev);
1480 unsigned long flags;
1482 spin_lock_irqsave (&cp->lock, flags);
1483 netdev_get_wol (cp, wol);
1484 spin_unlock_irqrestore (&cp->lock, flags);
1487 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1489 struct cp_private *cp = netdev_priv(dev);
1490 unsigned long flags;
1493 spin_lock_irqsave (&cp->lock, flags);
1494 rc = netdev_set_wol (cp, wol);
1495 spin_unlock_irqrestore (&cp->lock, flags);
1500 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1502 switch (stringset) {
1504 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
1512 static void cp_get_ethtool_stats (struct net_device *dev,
1513 struct ethtool_stats *estats, u64 *tmp_stats)
1515 struct cp_private *cp = netdev_priv(dev);
1516 struct cp_dma_stats *nic_stats;
1520 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1525 /* begin NIC statistics dump */
1526 cpw32(StatsAddr + 4, (u64)dma >> 32);
1527 cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1530 for (i = 0; i < 1000; i++) {
1531 if ((cpr32(StatsAddr) & DumpStats) == 0)
1535 cpw32(StatsAddr, 0);
1536 cpw32(StatsAddr + 4, 0);
1540 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1541 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1542 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1543 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1544 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1545 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1546 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1547 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1548 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1549 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1550 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1551 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1552 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1553 tmp_stats[i++] = cp->cp_stats.rx_frags;
1554 BUG_ON(i != CP_NUM_STATS);
1556 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1559 static const struct ethtool_ops cp_ethtool_ops = {
1560 .get_drvinfo = cp_get_drvinfo,
1561 .get_regs_len = cp_get_regs_len,
1562 .get_sset_count = cp_get_sset_count,
1563 .get_settings = cp_get_settings,
1564 .set_settings = cp_set_settings,
1565 .nway_reset = cp_nway_reset,
1566 .get_link = ethtool_op_get_link,
1567 .get_msglevel = cp_get_msglevel,
1568 .set_msglevel = cp_set_msglevel,
1569 .get_rx_csum = cp_get_rx_csum,
1570 .set_rx_csum = cp_set_rx_csum,
1571 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1572 .set_sg = ethtool_op_set_sg,
1573 .set_tso = ethtool_op_set_tso,
1574 .get_regs = cp_get_regs,
1575 .get_wol = cp_get_wol,
1576 .set_wol = cp_set_wol,
1577 .get_strings = cp_get_strings,
1578 .get_ethtool_stats = cp_get_ethtool_stats,
1579 .get_eeprom_len = cp_get_eeprom_len,
1580 .get_eeprom = cp_get_eeprom,
1581 .set_eeprom = cp_set_eeprom,
1584 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1586 struct cp_private *cp = netdev_priv(dev);
1588 unsigned long flags;
1590 if (!netif_running(dev))
1593 spin_lock_irqsave(&cp->lock, flags);
1594 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1595 spin_unlock_irqrestore(&cp->lock, flags);
1599 static int cp_set_mac_address(struct net_device *dev, void *p)
1601 struct cp_private *cp = netdev_priv(dev);
1602 struct sockaddr *addr = p;
1604 if (!is_valid_ether_addr(addr->sa_data))
1605 return -EADDRNOTAVAIL;
1607 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1609 spin_lock_irq(&cp->lock);
1611 cpw8_f(Cfg9346, Cfg9346_Unlock);
1612 cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1613 cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1614 cpw8_f(Cfg9346, Cfg9346_Lock);
1616 spin_unlock_irq(&cp->lock);
1621 /* Serial EEPROM section. */
1623 /* EEPROM_Ctrl bits. */
1624 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1625 #define EE_CS 0x08 /* EEPROM chip select. */
1626 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1627 #define EE_WRITE_0 0x00
1628 #define EE_WRITE_1 0x02
1629 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1630 #define EE_ENB (0x80 | EE_CS)
1632 /* Delay between EEPROM clock transitions.
1633 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1636 #define eeprom_delay() readl(ee_addr)
1638 /* The EEPROM commands include the alway-set leading bit. */
1639 #define EE_EXTEND_CMD (4)
1640 #define EE_WRITE_CMD (5)
1641 #define EE_READ_CMD (6)
1642 #define EE_ERASE_CMD (7)
1644 #define EE_EWDS_ADDR (0)
1645 #define EE_WRAL_ADDR (1)
1646 #define EE_ERAL_ADDR (2)
1647 #define EE_EWEN_ADDR (3)
1649 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1651 static void eeprom_cmd_start(void __iomem *ee_addr)
1653 writeb (EE_ENB & ~EE_CS, ee_addr);
1654 writeb (EE_ENB, ee_addr);
1658 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1662 /* Shift the command bits out. */
1663 for (i = cmd_len - 1; i >= 0; i--) {
1664 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1665 writeb (EE_ENB | dataval, ee_addr);
1667 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1670 writeb (EE_ENB, ee_addr);
1674 static void eeprom_cmd_end(void __iomem *ee_addr)
1676 writeb (~EE_CS, ee_addr);
1680 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1683 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1685 eeprom_cmd_start(ee_addr);
1686 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1687 eeprom_cmd_end(ee_addr);
1690 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1694 void __iomem *ee_addr = ioaddr + Cfg9346;
1695 int read_cmd = location | (EE_READ_CMD << addr_len);
1697 eeprom_cmd_start(ee_addr);
1698 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1700 for (i = 16; i > 0; i--) {
1701 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1704 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1706 writeb (EE_ENB, ee_addr);
1710 eeprom_cmd_end(ee_addr);
1715 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1719 void __iomem *ee_addr = ioaddr + Cfg9346;
1720 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1722 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1724 eeprom_cmd_start(ee_addr);
1725 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1726 eeprom_cmd(ee_addr, val, 16);
1727 eeprom_cmd_end(ee_addr);
1729 eeprom_cmd_start(ee_addr);
1730 for (i = 0; i < 20000; i++)
1731 if (readb(ee_addr) & EE_DATA_READ)
1733 eeprom_cmd_end(ee_addr);
1735 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1738 static int cp_get_eeprom_len(struct net_device *dev)
1740 struct cp_private *cp = netdev_priv(dev);
1743 spin_lock_irq(&cp->lock);
1744 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1745 spin_unlock_irq(&cp->lock);
1750 static int cp_get_eeprom(struct net_device *dev,
1751 struct ethtool_eeprom *eeprom, u8 *data)
1753 struct cp_private *cp = netdev_priv(dev);
1754 unsigned int addr_len;
1756 u32 offset = eeprom->offset >> 1;
1757 u32 len = eeprom->len;
1760 eeprom->magic = CP_EEPROM_MAGIC;
1762 spin_lock_irq(&cp->lock);
1764 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1766 if (eeprom->offset & 1) {
1767 val = read_eeprom(cp->regs, offset, addr_len);
1768 data[i++] = (u8)(val >> 8);
1772 while (i < len - 1) {
1773 val = read_eeprom(cp->regs, offset, addr_len);
1774 data[i++] = (u8)val;
1775 data[i++] = (u8)(val >> 8);
1780 val = read_eeprom(cp->regs, offset, addr_len);
1784 spin_unlock_irq(&cp->lock);
1788 static int cp_set_eeprom(struct net_device *dev,
1789 struct ethtool_eeprom *eeprom, u8 *data)
1791 struct cp_private *cp = netdev_priv(dev);
1792 unsigned int addr_len;
1794 u32 offset = eeprom->offset >> 1;
1795 u32 len = eeprom->len;
1798 if (eeprom->magic != CP_EEPROM_MAGIC)
1801 spin_lock_irq(&cp->lock);
1803 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1805 if (eeprom->offset & 1) {
1806 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1807 val |= (u16)data[i++] << 8;
1808 write_eeprom(cp->regs, offset, val, addr_len);
1812 while (i < len - 1) {
1813 val = (u16)data[i++];
1814 val |= (u16)data[i++] << 8;
1815 write_eeprom(cp->regs, offset, val, addr_len);
1820 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1821 val |= (u16)data[i];
1822 write_eeprom(cp->regs, offset, val, addr_len);
1825 spin_unlock_irq(&cp->lock);
1829 /* Put the board into D3cold state and wait for WakeUp signal */
1830 static void cp_set_d3_state (struct cp_private *cp)
1832 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1833 pci_set_power_state (cp->pdev, PCI_D3hot);
1836 static const struct net_device_ops cp_netdev_ops = {
1837 .ndo_open = cp_open,
1838 .ndo_stop = cp_close,
1839 .ndo_validate_addr = eth_validate_addr,
1840 .ndo_set_mac_address = cp_set_mac_address,
1841 .ndo_set_multicast_list = cp_set_rx_mode,
1842 .ndo_get_stats = cp_get_stats,
1843 .ndo_do_ioctl = cp_ioctl,
1844 .ndo_start_xmit = cp_start_xmit,
1845 .ndo_tx_timeout = cp_tx_timeout,
1846 #if CP_VLAN_TAG_USED
1847 .ndo_vlan_rx_register = cp_vlan_rx_register,
1850 .ndo_change_mtu = cp_change_mtu,
1853 #ifdef CONFIG_NET_POLL_CONTROLLER
1854 .ndo_poll_controller = cp_poll_controller,
1858 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1860 struct net_device *dev;
1861 struct cp_private *cp;
1864 resource_size_t pciaddr;
1865 unsigned int addr_len, i, pci_using_dac;
1868 static int version_printed;
1869 if (version_printed++ == 0)
1870 pr_info("%s", version);
1873 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1874 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1875 dev_info(&pdev->dev,
1876 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1877 pdev->vendor, pdev->device, pdev->revision);
1881 dev = alloc_etherdev(sizeof(struct cp_private));
1884 SET_NETDEV_DEV(dev, &pdev->dev);
1886 cp = netdev_priv(dev);
1889 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1890 spin_lock_init (&cp->lock);
1891 cp->mii_if.dev = dev;
1892 cp->mii_if.mdio_read = mdio_read;
1893 cp->mii_if.mdio_write = mdio_write;
1894 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1895 cp->mii_if.phy_id_mask = 0x1f;
1896 cp->mii_if.reg_num_mask = 0x1f;
1897 cp_set_rxbufsize(cp);
1899 rc = pci_enable_device(pdev);
1903 rc = pci_set_mwi(pdev);
1905 goto err_out_disable;
1907 rc = pci_request_regions(pdev, DRV_NAME);
1911 pciaddr = pci_resource_start(pdev, 1);
1914 dev_err(&pdev->dev, "no MMIO resource\n");
1917 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1919 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1920 (unsigned long long)pci_resource_len(pdev, 1));
1924 /* Configure DMA attributes. */
1925 if ((sizeof(dma_addr_t) > 4) &&
1926 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1927 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1932 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1935 "No usable DMA configuration, aborting.\n");
1938 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1941 "No usable consistent DMA configuration, "
1947 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1948 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1950 regs = ioremap(pciaddr, CP_REGS_SIZE);
1953 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1954 (unsigned long long)pci_resource_len(pdev, 1),
1955 (unsigned long long)pciaddr);
1958 dev->base_addr = (unsigned long) regs;
1963 /* read MAC address from EEPROM */
1964 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1965 for (i = 0; i < 3; i++)
1966 ((__le16 *) (dev->dev_addr))[i] =
1967 cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1968 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1970 dev->netdev_ops = &cp_netdev_ops;
1971 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1972 dev->ethtool_ops = &cp_ethtool_ops;
1973 dev->watchdog_timeo = TX_TIMEOUT;
1975 #if CP_VLAN_TAG_USED
1976 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1980 dev->features |= NETIF_F_HIGHDMA;
1982 #if 0 /* disabled by default until verified */
1983 dev->features |= NETIF_F_TSO;
1986 dev->irq = pdev->irq;
1988 rc = register_netdev(dev);
1992 pr_info("%s: RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
1998 pci_set_drvdata(pdev, dev);
2000 /* enable busmastering and memory-write-invalidate */
2001 pci_set_master(pdev);
2003 if (cp->wol_enabled)
2004 cp_set_d3_state (cp);
2011 pci_release_regions(pdev);
2013 pci_clear_mwi(pdev);
2015 pci_disable_device(pdev);
2021 static void cp_remove_one (struct pci_dev *pdev)
2023 struct net_device *dev = pci_get_drvdata(pdev);
2024 struct cp_private *cp = netdev_priv(dev);
2026 unregister_netdev(dev);
2028 if (cp->wol_enabled)
2029 pci_set_power_state (pdev, PCI_D0);
2030 pci_release_regions(pdev);
2031 pci_clear_mwi(pdev);
2032 pci_disable_device(pdev);
2033 pci_set_drvdata(pdev, NULL);
2038 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2040 struct net_device *dev = pci_get_drvdata(pdev);
2041 struct cp_private *cp = netdev_priv(dev);
2042 unsigned long flags;
2044 if (!netif_running(dev))
2047 netif_device_detach (dev);
2048 netif_stop_queue (dev);
2050 spin_lock_irqsave (&cp->lock, flags);
2052 /* Disable Rx and Tx */
2053 cpw16 (IntrMask, 0);
2054 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2056 spin_unlock_irqrestore (&cp->lock, flags);
2058 pci_save_state(pdev);
2059 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2060 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2065 static int cp_resume (struct pci_dev *pdev)
2067 struct net_device *dev = pci_get_drvdata (pdev);
2068 struct cp_private *cp = netdev_priv(dev);
2069 unsigned long flags;
2071 if (!netif_running(dev))
2074 netif_device_attach (dev);
2076 pci_set_power_state(pdev, PCI_D0);
2077 pci_restore_state(pdev);
2078 pci_enable_wake(pdev, PCI_D0, 0);
2080 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2081 cp_init_rings_index (cp);
2083 netif_start_queue (dev);
2085 spin_lock_irqsave (&cp->lock, flags);
2087 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2089 spin_unlock_irqrestore (&cp->lock, flags);
2093 #endif /* CONFIG_PM */
2095 static struct pci_driver cp_driver = {
2097 .id_table = cp_pci_tbl,
2098 .probe = cp_init_one,
2099 .remove = cp_remove_one,
2101 .resume = cp_resume,
2102 .suspend = cp_suspend,
2106 static int __init cp_init (void)
2109 pr_info("%s", version);
2111 return pci_register_driver(&cp_driver);
2114 static void __exit cp_exit (void)
2116 pci_unregister_driver (&cp_driver);
2119 module_init(cp_init);
2120 module_exit(cp_exit);