2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
46 #ifdef CONFIG_SIS190_NAPI
47 #define NAPI_SUFFIX "-NAPI"
49 #define NAPI_SUFFIX ""
52 #define DRV_VERSION "1.2" NAPI_SUFFIX
53 #define DRV_NAME "sis190"
54 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
55 #define PFX DRV_NAME ": "
57 #ifdef CONFIG_SIS190_NAPI
58 #define sis190_rx_skb netif_receive_skb
59 #define sis190_rx_quota(count, quota) min(count, quota)
61 #define sis190_rx_skb netif_rx
62 #define sis190_rx_quota(count, quota) count
65 #define MAC_ADDR_LEN 6
67 #define NUM_TX_DESC 64
68 #define NUM_RX_DESC 64
69 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
70 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
71 #define RX_BUF_SIZE 1536
72 #define RX_BUF_MASK 0xfff8
74 #define SIS190_REGS_SIZE 0x80
75 #define SIS190_TX_TIMEOUT (6*HZ)
76 #define SIS190_PHY_TIMEOUT (10*HZ)
77 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
78 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
81 /* Enhanced PHY access register bit definitions */
82 #define EhnMIIread 0x0000
83 #define EhnMIIwrite 0x0020
84 #define EhnMIIdataShift 16
85 #define EhnMIIpmdShift 6 /* 7016 only */
86 #define EhnMIIregShift 11
87 #define EhnMIIreq 0x0010
88 #define EhnMIInotDone 0x0010
90 /* Write/read MMIO register */
91 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
92 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
93 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
94 #define SIS_R8(reg) readb (ioaddr + (reg))
95 #define SIS_R16(reg) readw (ioaddr + (reg))
96 #define SIS_R32(reg) readl (ioaddr + (reg))
98 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
100 enum sis190_registers {
102 TxDescStartAddr = 0x04,
103 rsv0 = 0x08, // reserved
104 TxSts = 0x0c, // unused (Control/Status)
106 RxDescStartAddr = 0x14,
107 rsv1 = 0x18, // reserved
108 RxSts = 0x1c, // unused
112 IntrTimer = 0x2c, // unused (Interupt Timer)
113 PMControl = 0x30, // unused (Power Mgmt Control/Status)
114 rsv2 = 0x34, // reserved
117 StationControl = 0x40,
119 GIoCR = 0x48, // unused (GMAC IO Compensation)
120 GIoCtrl = 0x4c, // unused (GMAC IO Control)
122 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
123 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
124 rsv3 = 0x5c, // reserved
128 // Undocumented = 0x6c,
130 RxWolData = 0x74, // unused (Rx WOL Data Access)
131 RxMPSControl = 0x78, // unused (Rx MPS Control)
132 rsv4 = 0x7c, // reserved
135 enum sis190_register_content {
137 SoftInt = 0x40000000, // unused
138 Timeup = 0x20000000, // unused
139 PauseFrame = 0x00080000, // unused
140 MagicPacket = 0x00040000, // unused
141 WakeupFrame = 0x00020000, // unused
142 LinkChange = 0x00010000,
143 RxQEmpty = 0x00000080,
145 TxQ1Empty = 0x00000020, // unused
146 TxQ1Int = 0x00000010,
147 TxQ0Empty = 0x00000008, // unused
148 TxQ0Int = 0x00000004,
153 RxRES = 0x00200000, // unused
155 RxRUNT = 0x00100000, // unused
156 RxRWT = 0x00400000, // unused
160 CmdRxEnb = 0x08, // unused
162 RxBufEmpty = 0x01, // unused
165 Cfg9346_Lock = 0x00, // unused
166 Cfg9346_Unlock = 0xc0, // unused
169 AcceptErr = 0x20, // unused
170 AcceptRunt = 0x10, // unused
171 AcceptBroadcast = 0x0800,
172 AcceptMulticast = 0x0400,
173 AcceptMyPhys = 0x0200,
174 AcceptAllPhys = 0x0100,
178 RxCfgDMAShift = 8, // 0x1a in RxControl ?
181 TxInterFrameGapShift = 24,
182 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
192 LinkStatus = 0x02, // unused
193 FullDup = 0x01, // unused
196 TBILinkOK = 0x02000000, // unused
213 enum _DescStatusBit {
223 RxSizeMask = 0x0000ffff
226 enum sis190_eeprom_access_register_bits {
227 EECS = 0x00000001, // unused
228 EECLK = 0x00000002, // unused
229 EEDO = 0x00000008, // unused
230 EEDI = 0x00000004, // unused
233 EEWOP = 0x00000100 // unused
236 /* EEPROM Addresses */
237 enum sis190_eeprom_address {
238 EEPROMSignature = 0x00,
239 EEPROMCLK = 0x01, // unused
244 struct sis190_private {
245 void __iomem *mmio_addr;
246 struct pci_dev *pci_dev;
247 struct net_device_stats stats;
256 struct RxDesc *RxDescRing;
257 struct TxDesc *TxDescRing;
258 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
259 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
260 struct work_struct phy_task;
261 struct timer_list timer;
263 struct mii_if_info mii_if;
266 const static struct {
268 u8 version; /* depend on docs */
269 u32 RxConfigMask; /* clear the bits supported by this chip */
270 } sis_chip_info[] = {
271 { DRV_NAME, 0x00, 0xff7e1880, },
274 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
275 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
279 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
281 static int rx_copybreak = 200;
287 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
288 module_param(rx_copybreak, int, 0);
289 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
290 module_param_named(debug, debug.msg_enable, int, 0);
291 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
292 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
293 MODULE_VERSION(DRV_VERSION);
294 MODULE_LICENSE("GPL");
296 static const u32 sis190_intr_mask =
297 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
300 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
301 * The chips use a 64 element hash table based on the Ethernet CRC.
303 static int multicast_filter_limit = 32;
305 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
309 SIS_W32(GMIIControl, ctl);
313 for (i = 0; i < 100; i++) {
314 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
320 printk(KERN_ERR PFX "PHY command failed !\n");
323 static void mdio_write(void __iomem *ioaddr, int reg, int val)
327 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
328 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
329 (((u32) val) << EhnMIIdataShift));
332 static int mdio_read(void __iomem *ioaddr, int reg)
336 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
337 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
339 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
342 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
344 struct sis190_private *tp = netdev_priv(dev);
346 mdio_write(tp->mmio_addr, reg, val);
349 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
351 struct sis190_private *tp = netdev_priv(dev);
353 return mdio_read(tp->mmio_addr, reg);
356 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
361 if (!(SIS_R32(ROMControl) & 0x0002))
364 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
366 for (i = 0; i < 200; i++) {
367 if (!(SIS_R32(ROMInterface) & EEREQ)) {
368 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
377 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
379 SIS_W32(IntrMask, 0x00);
380 SIS_W32(IntrStatus, 0xffffffff);
384 static void sis190_asic_down(void __iomem *ioaddr)
386 /* Stop the chip's Tx and Rx DMA processes. */
388 SIS_W32(TxControl, 0x1a00);
389 SIS_W32(RxControl, 0x1a00);
391 sis190_irq_mask_and_ack(ioaddr);
394 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
396 desc->size |= cpu_to_le32(RingEnd);
399 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
401 u32 eor = le32_to_cpu(desc->size) & RingEnd;
404 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
406 desc->status = cpu_to_le32(OWNbit | INTbit);
409 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
412 desc->addr = cpu_to_le32(mapping);
413 sis190_give_to_asic(desc, rx_buf_sz);
416 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
419 desc->addr = 0xdeadbeef;
420 desc->size &= cpu_to_le32(RingEnd);
425 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
426 struct RxDesc *desc, u32 rx_buf_sz)
432 skb = dev_alloc_skb(rx_buf_sz);
438 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
441 sis190_map_to_asic(desc, mapping, rx_buf_sz);
447 sis190_make_unusable_by_asic(desc);
451 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
456 for (cur = start; cur < end; cur++) {
457 int ret, i = cur % NUM_RX_DESC;
459 if (tp->Rx_skbuff[i])
462 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
463 tp->RxDescRing + i, tp->rx_buf_sz);
470 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
471 struct RxDesc *desc, int rx_buf_sz)
475 if (pkt_size < rx_copybreak) {
478 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
480 skb_reserve(skb, NET_IP_ALIGN);
481 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
483 sis190_give_to_asic(desc, rx_buf_sz);
490 static int sis190_rx_interrupt(struct net_device *dev,
491 struct sis190_private *tp, void __iomem *ioaddr)
493 struct net_device_stats *stats = &tp->stats;
494 u32 rx_left, cur_rx = tp->cur_rx;
497 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
498 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
500 for (; rx_left > 0; rx_left--, cur_rx++) {
501 unsigned int entry = cur_rx % NUM_RX_DESC;
502 struct RxDesc *desc = tp->RxDescRing + entry;
505 if (desc->status & OWNbit)
508 status = le32_to_cpu(desc->PSize);
510 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
513 if (status & RxCRC) {
514 net_intr(tp, KERN_INFO "%s: bad crc. status = %08x.\n",
517 stats->rx_crc_errors++;
518 sis190_give_to_asic(desc, tp->rx_buf_sz);
519 } else if (!(status & PADbit)) {
520 net_intr(tp, KERN_INFO "%s: bad pad. status = %08x.\n",
523 stats->rx_length_errors++;
524 sis190_give_to_asic(desc, tp->rx_buf_sz);
526 struct sk_buff *skb = tp->Rx_skbuff[entry];
527 int pkt_size = (status & RxSizeMask) - 4;
528 void (*pci_action)(struct pci_dev *, dma_addr_t,
529 size_t, int) = pci_dma_sync_single_for_device;
531 if (unlikely(pkt_size > tp->rx_buf_sz)) {
532 net_intr(tp, KERN_INFO
533 "%s: (frag) status = %08x.\n",
536 stats->rx_length_errors++;
537 sis190_give_to_asic(desc, tp->rx_buf_sz);
541 pci_dma_sync_single_for_cpu(tp->pci_dev,
542 le32_to_cpu(desc->addr), tp->rx_buf_sz,
545 if (sis190_try_rx_copy(&skb, pkt_size, desc,
547 pci_action = pci_unmap_single;
548 tp->Rx_skbuff[entry] = NULL;
549 sis190_make_unusable_by_asic(desc);
552 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
553 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
556 skb_put(skb, pkt_size);
557 skb->protocol = eth_type_trans(skb, dev);
561 dev->last_rx = jiffies;
562 stats->rx_bytes += pkt_size;
566 count = cur_rx - tp->cur_rx;
569 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
570 if (!delta && count && netif_msg_intr(tp))
571 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
572 tp->dirty_rx += delta;
574 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
575 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
580 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
585 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
587 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
589 memset(desc, 0x00, sizeof(*desc));
592 static void sis190_tx_interrupt(struct net_device *dev,
593 struct sis190_private *tp, void __iomem *ioaddr)
595 u32 pending, dirty_tx = tp->dirty_tx;
597 * It would not be needed if queueing was allowed to be enabled
598 * again too early (hint: think preempt and unclocked smp systems).
600 unsigned int queue_stopped;
603 pending = tp->cur_tx - dirty_tx;
604 queue_stopped = (pending == NUM_TX_DESC);
606 for (; pending; pending--, dirty_tx++) {
607 unsigned int entry = dirty_tx % NUM_TX_DESC;
608 struct TxDesc *txd = tp->TxDescRing + entry;
611 if (le32_to_cpu(txd->status) & OWNbit)
614 skb = tp->Tx_skbuff[entry];
616 tp->stats.tx_packets++;
617 tp->stats.tx_bytes += skb->len;
619 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
620 tp->Tx_skbuff[entry] = NULL;
621 dev_kfree_skb_irq(skb);
624 if (tp->dirty_tx != dirty_tx) {
625 tp->dirty_tx = dirty_tx;
628 netif_wake_queue(dev);
633 * The interrupt handler does all of the Rx thread work and cleans up after
636 static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
638 struct net_device *dev = __dev;
639 struct sis190_private *tp = netdev_priv(dev);
640 void __iomem *ioaddr = tp->mmio_addr;
641 unsigned int handled = 0;
644 status = SIS_R32(IntrStatus);
646 if ((status == 0xffffffff) || !status)
651 if (unlikely(!netif_running(dev))) {
652 sis190_asic_down(ioaddr);
656 SIS_W32(IntrStatus, status);
658 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
660 if (status & LinkChange) {
661 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
662 schedule_work(&tp->phy_task);
666 sis190_rx_interrupt(dev, tp, ioaddr);
668 if (status & TxQ0Int)
669 sis190_tx_interrupt(dev, tp, ioaddr);
671 return IRQ_RETVAL(handled);
674 #ifdef CONFIG_NET_POLL_CONTROLLER
675 static void sis190_netpoll(struct net_device *dev)
677 struct sis190_private *tp = netdev_priv(dev);
678 struct pci_dev *pdev = tp->pci_dev;
680 disable_irq(pdev->irq);
681 sis190_interrupt(pdev->irq, dev, NULL);
682 enable_irq(pdev->irq);
686 static void sis190_free_rx_skb(struct sis190_private *tp,
687 struct sk_buff **sk_buff, struct RxDesc *desc)
689 struct pci_dev *pdev = tp->pci_dev;
691 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
693 dev_kfree_skb(*sk_buff);
695 sis190_make_unusable_by_asic(desc);
698 static void sis190_rx_clear(struct sis190_private *tp)
702 for (i = 0; i < NUM_RX_DESC; i++) {
703 if (!tp->Rx_skbuff[i])
705 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
709 static void sis190_init_ring_indexes(struct sis190_private *tp)
711 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
714 static int sis190_init_ring(struct net_device *dev)
716 struct sis190_private *tp = netdev_priv(dev);
718 sis190_init_ring_indexes(tp);
720 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
721 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
723 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
726 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
735 static void sis190_set_rx_mode(struct net_device *dev)
737 struct sis190_private *tp = netdev_priv(dev);
738 void __iomem *ioaddr = tp->mmio_addr;
740 u32 mc_filter[2]; /* Multicast hash filter */
743 if (dev->flags & IFF_PROMISC) {
744 /* Unconditionally log net taps. */
745 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
748 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
750 mc_filter[1] = mc_filter[0] = 0xffffffff;
751 } else if ((dev->mc_count > multicast_filter_limit) ||
752 (dev->flags & IFF_ALLMULTI)) {
753 /* Too many to filter perfectly -- accept all multicasts. */
754 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
755 mc_filter[1] = mc_filter[0] = 0xffffffff;
757 struct dev_mc_list *mclist;
760 rx_mode = AcceptBroadcast | AcceptMyPhys;
761 mc_filter[1] = mc_filter[0] = 0;
762 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
763 i++, mclist = mclist->next) {
765 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
766 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
767 rx_mode |= AcceptMulticast;
771 spin_lock_irqsave(&tp->lock, flags);
773 SIS_W16(RxMacControl, rx_mode | 0x2);
774 SIS_W32(RxHashTable, mc_filter[0]);
775 SIS_W32(RxHashTable + 4, mc_filter[1]);
777 spin_unlock_irqrestore(&tp->lock, flags);
780 static void sis190_soft_reset(void __iomem *ioaddr)
782 SIS_W32(IntrControl, 0x8000);
785 SIS_W32(IntrControl, 0x0);
786 sis190_asic_down(ioaddr);
790 static void sis190_hw_start(struct net_device *dev)
792 struct sis190_private *tp = netdev_priv(dev);
793 void __iomem *ioaddr = tp->mmio_addr;
795 sis190_soft_reset(ioaddr);
797 SIS_W32(TxDescStartAddr, tp->tx_dma);
798 SIS_W32(RxDescStartAddr, tp->rx_dma);
800 SIS_W32(IntrStatus, 0xffffffff);
801 SIS_W32(IntrMask, 0x0);
803 * Default is 100Mbps.
804 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
806 SIS_W16(StationControl, 0x1901);
807 SIS_W32(GMIIControl, 0x0);
808 SIS_W32(TxMacControl, 0x60);
809 SIS_W16(RxMacControl, 0x02);
810 SIS_W32(RxHashTable, 0x0);
812 SIS_W32(RxWolCtrl, 0x0);
813 SIS_W32(RxWolData, 0x0);
817 sis190_set_rx_mode(dev);
819 /* Enable all known interrupts by setting the interrupt mask. */
820 SIS_W32(IntrMask, sis190_intr_mask);
822 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
823 SIS_W32(RxControl, 0x1a1d);
825 netif_start_queue(dev);
828 static void sis190_phy_task(void * data)
830 struct net_device *dev = data;
831 struct sis190_private *tp = netdev_priv(dev);
832 void __iomem *ioaddr = tp->mmio_addr;
837 val = mdio_read(ioaddr, MII_BMCR);
838 if (val & BMCR_RESET) {
839 // FIXME: needlessly high ? -- FR 02/07/2005
840 mod_timer(&tp->timer, jiffies + HZ/10);
841 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
842 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
844 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
845 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
853 { LPA_1000XFULL | LPA_SLCT,
854 "1000 Mbps Full Duplex",
856 { LPA_1000XHALF | LPA_SLCT,
857 "1000 Mbps Half Duplex",
860 "100 Mbps Full Duplex",
863 "100 Mbps Half Duplex",
866 "10 Mbps Full Duplex",
869 "10 Mbps Half Duplex",
871 { 0, "unknown", 0x0000 }
874 val = mdio_read(ioaddr, 0x1f);
875 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
877 val = mdio_read(ioaddr, MII_LPA);
878 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
880 for (p = reg31; p->ctl; p++) {
881 if ((val & p->val) == p->val)
885 SIS_W16(StationControl, p->ctl);
886 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
888 netif_carrier_on(dev);
894 static void sis190_phy_timer(unsigned long __opaque)
896 struct net_device *dev = (struct net_device *)__opaque;
897 struct sis190_private *tp = netdev_priv(dev);
899 if (likely(netif_running(dev)))
900 schedule_work(&tp->phy_task);
903 static inline void sis190_delete_timer(struct net_device *dev)
905 struct sis190_private *tp = netdev_priv(dev);
907 del_timer_sync(&tp->timer);
910 static inline void sis190_request_timer(struct net_device *dev)
912 struct sis190_private *tp = netdev_priv(dev);
913 struct timer_list *timer = &tp->timer;
916 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
917 timer->data = (unsigned long)dev;
918 timer->function = sis190_phy_timer;
922 static void sis190_set_rxbufsize(struct sis190_private *tp,
923 struct net_device *dev)
925 unsigned int mtu = dev->mtu;
927 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
928 /* RxDesc->size has a licence to kill the lower bits */
929 if (tp->rx_buf_sz & 0x07) {
931 tp->rx_buf_sz &= RX_BUF_MASK;
935 static int sis190_open(struct net_device *dev)
937 struct sis190_private *tp = netdev_priv(dev);
938 struct pci_dev *pdev = tp->pci_dev;
941 sis190_set_rxbufsize(tp, dev);
944 * Rx and Tx descriptors need 256 bytes alignment.
945 * pci_alloc_consistent() guarantees a stronger alignment.
947 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
951 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
955 rc = sis190_init_ring(dev);
959 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
961 sis190_request_timer(dev);
963 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
965 goto err_release_timer_2;
967 sis190_hw_start(dev);
972 sis190_delete_timer(dev);
975 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
978 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
983 static void sis190_tx_clear(struct sis190_private *tp)
987 for (i = 0; i < NUM_TX_DESC; i++) {
988 struct sk_buff *skb = tp->Tx_skbuff[i];
993 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
994 tp->Tx_skbuff[i] = NULL;
997 tp->stats.tx_dropped++;
999 tp->cur_tx = tp->dirty_tx = 0;
1002 static void sis190_down(struct net_device *dev)
1004 struct sis190_private *tp = netdev_priv(dev);
1005 void __iomem *ioaddr = tp->mmio_addr;
1006 unsigned int poll_locked = 0;
1008 sis190_delete_timer(dev);
1010 netif_stop_queue(dev);
1012 flush_scheduled_work();
1015 spin_lock_irq(&tp->lock);
1017 sis190_asic_down(ioaddr);
1019 spin_unlock_irq(&tp->lock);
1021 synchronize_irq(dev->irq);
1024 netif_poll_disable(dev);
1028 synchronize_sched();
1030 } while (SIS_R32(IntrMask));
1032 sis190_tx_clear(tp);
1033 sis190_rx_clear(tp);
1036 static int sis190_close(struct net_device *dev)
1038 struct sis190_private *tp = netdev_priv(dev);
1039 struct pci_dev *pdev = tp->pci_dev;
1043 free_irq(dev->irq, dev);
1045 netif_poll_enable(dev);
1047 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1048 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1050 tp->TxDescRing = NULL;
1051 tp->RxDescRing = NULL;
1056 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1058 struct sis190_private *tp = netdev_priv(dev);
1059 void __iomem *ioaddr = tp->mmio_addr;
1060 u32 len, entry, dirty_tx;
1061 struct TxDesc *desc;
1064 if (unlikely(skb->len < ETH_ZLEN)) {
1065 skb = skb_padto(skb, ETH_ZLEN);
1067 tp->stats.tx_dropped++;
1075 entry = tp->cur_tx % NUM_TX_DESC;
1076 desc = tp->TxDescRing + entry;
1078 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1079 netif_stop_queue(dev);
1080 net_tx_err(tp, KERN_ERR PFX
1081 "%s: BUG! Tx Ring full when queue awake!\n",
1083 return NETDEV_TX_BUSY;
1086 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1088 tp->Tx_skbuff[entry] = skb;
1090 desc->PSize = cpu_to_le32(len);
1091 desc->addr = cpu_to_le32(mapping);
1093 desc->size = cpu_to_le32(len);
1094 if (entry == (NUM_TX_DESC - 1))
1095 desc->size |= cpu_to_le32(RingEnd);
1099 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1105 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1107 dev->trans_start = jiffies;
1109 dirty_tx = tp->dirty_tx;
1110 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1111 netif_stop_queue(dev);
1113 if (dirty_tx != tp->dirty_tx)
1114 netif_wake_queue(dev);
1117 return NETDEV_TX_OK;
1120 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1122 struct sis190_private *tp = netdev_priv(dev);
1127 static void sis190_release_board(struct pci_dev *pdev)
1129 struct net_device *dev = pci_get_drvdata(pdev);
1130 struct sis190_private *tp = netdev_priv(dev);
1132 iounmap(tp->mmio_addr);
1133 pci_release_regions(pdev);
1134 pci_disable_device(pdev);
1138 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1140 struct sis190_private *tp;
1141 struct net_device *dev;
1142 void __iomem *ioaddr;
1145 dev = alloc_etherdev(sizeof(*tp));
1147 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1152 SET_MODULE_OWNER(dev);
1153 SET_NETDEV_DEV(dev, &pdev->dev);
1155 tp = netdev_priv(dev);
1156 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1158 rc = pci_enable_device(pdev);
1160 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1161 goto err_free_dev_1;
1166 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1167 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1169 goto err_pci_disable_2;
1171 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1172 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1174 goto err_pci_disable_2;
1177 rc = pci_request_regions(pdev, DRV_NAME);
1179 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1181 goto err_pci_disable_2;
1184 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1186 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1188 goto err_free_res_3;
1191 pci_set_master(pdev);
1193 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1195 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1198 goto err_free_res_3;
1202 tp->mmio_addr = ioaddr;
1204 tp->mii_if.dev = dev;
1205 tp->mii_if.mdio_read = __mdio_read;
1206 tp->mii_if.mdio_write = __mdio_write;
1207 // tp->mii_if.phy_id = XXX;
1208 tp->mii_if.phy_id_mask = 0x1f;
1209 tp->mii_if.reg_num_mask = 0x1f;
1211 sis190_irq_mask_and_ack(ioaddr);
1213 sis190_soft_reset(ioaddr);
1218 pci_release_regions(pdev);
1220 pci_disable_device(pdev);
1228 static void sis190_tx_timeout(struct net_device *dev)
1230 struct sis190_private *tp = netdev_priv(dev);
1231 void __iomem *ioaddr = tp->mmio_addr;
1234 /* Disable Tx, if not already */
1235 tmp8 = SIS_R8(TxControl);
1236 if (tmp8 & CmdTxEnb)
1237 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1240 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1241 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1243 /* Disable interrupts by clearing the interrupt mask. */
1244 SIS_W32(IntrMask, 0x0000);
1246 /* Stop a shared interrupt from scavenging while we are. */
1247 spin_lock_irq(&tp->lock);
1248 sis190_tx_clear(tp);
1249 spin_unlock_irq(&tp->lock);
1251 /* ...and finally, reset everything. */
1252 sis190_hw_start(dev);
1254 netif_wake_queue(dev);
1257 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1258 struct net_device *dev)
1260 struct sis190_private *tp = netdev_priv(dev);
1261 void __iomem *ioaddr = tp->mmio_addr;
1265 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1268 /* Check to see if there is a sane EEPROM */
1269 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1271 if ((sig == 0xffff) || (sig == 0x0000)) {
1272 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1273 pci_name(pdev), sig);
1277 /* Get MAC address from EEPROM */
1278 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1279 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1281 ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
1288 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1290 * @dev: network device to get address for
1292 * SiS965 model, use APC CMOS RAM to store MAC address.
1293 * APC CMOS RAM is accessed through ISA bridge.
1294 * MAC address is read into @net_dev->dev_addr.
1296 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1297 struct net_device *dev)
1299 struct sis190_private *tp = netdev_priv(dev);
1300 struct pci_dev *isa_bridge;
1304 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1307 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1309 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1314 /* Enable port 78h & 79h to access APC Registers. */
1315 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1316 reg = (tmp8 & ~0x02);
1317 pci_write_config_byte(isa_bridge, 0x48, reg);
1319 pci_read_config_byte(isa_bridge, 0x48, ®);
1321 for (i = 0; i < MAC_ADDR_LEN; i++) {
1322 outb(0x9 + i, 0x78);
1323 dev->dev_addr[i] = inb(0x79);
1329 /* Restore the value to ISA Bridge */
1330 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1331 pci_dev_put(isa_bridge);
1337 * sis190_init_rxfilter - Initialize the Rx filter
1338 * @dev: network device to initialize
1340 * Set receive filter address to our MAC address
1341 * and enable packet filtering.
1343 static inline void sis190_init_rxfilter(struct net_device *dev)
1345 struct sis190_private *tp = netdev_priv(dev);
1346 void __iomem *ioaddr = tp->mmio_addr;
1350 ctl = SIS_R16(RxMacControl);
1352 * Disable packet filtering before setting filter.
1353 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1354 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1356 SIS_W16(RxMacControl, ctl & ~0x0f00);
1358 for (i = 0; i < MAC_ADDR_LEN; i++)
1359 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1361 SIS_W16(RxMacControl, ctl);
1365 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1369 pci_read_config_byte(pdev, 0x73, &from);
1371 return (from & 0x00000001) ?
1372 sis190_get_mac_addr_from_apc(pdev, dev) :
1373 sis190_get_mac_addr_from_eeprom(pdev, dev);
1376 static void sis190_set_speed_auto(struct net_device *dev)
1378 struct sis190_private *tp = netdev_priv(dev);
1379 void __iomem *ioaddr = tp->mmio_addr;
1382 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1384 val = mdio_read(ioaddr, MII_ADVERTISE);
1386 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1388 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1389 ADVERTISE_100FULL | ADVERTISE_10FULL |
1390 ADVERTISE_100HALF | ADVERTISE_10HALF);
1392 // Enable 1000 Full Mode.
1393 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1395 // Enable auto-negotiation and restart auto-negotiation.
1396 mdio_write(ioaddr, MII_BMCR,
1397 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1400 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1402 struct sis190_private *tp = netdev_priv(dev);
1404 return mii_ethtool_gset(&tp->mii_if, cmd);
1407 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1409 struct sis190_private *tp = netdev_priv(dev);
1411 return mii_ethtool_sset(&tp->mii_if, cmd);
1414 static void sis190_get_drvinfo(struct net_device *dev,
1415 struct ethtool_drvinfo *info)
1417 struct sis190_private *tp = netdev_priv(dev);
1419 strcpy(info->driver, DRV_NAME);
1420 strcpy(info->version, DRV_VERSION);
1421 strcpy(info->bus_info, pci_name(tp->pci_dev));
1424 static int sis190_get_regs_len(struct net_device *dev)
1426 return SIS190_REGS_SIZE;
1429 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1432 struct sis190_private *tp = netdev_priv(dev);
1433 unsigned long flags;
1435 if (regs->len > SIS190_REGS_SIZE)
1436 regs->len = SIS190_REGS_SIZE;
1438 spin_lock_irqsave(&tp->lock, flags);
1439 memcpy_fromio(p, tp->mmio_addr, regs->len);
1440 spin_unlock_irqrestore(&tp->lock, flags);
1443 static int sis190_nway_reset(struct net_device *dev)
1445 struct sis190_private *tp = netdev_priv(dev);
1447 return mii_nway_restart(&tp->mii_if);
1450 static u32 sis190_get_msglevel(struct net_device *dev)
1452 struct sis190_private *tp = netdev_priv(dev);
1454 return tp->msg_enable;
1457 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1459 struct sis190_private *tp = netdev_priv(dev);
1461 tp->msg_enable = value;
1464 static struct ethtool_ops sis190_ethtool_ops = {
1465 .get_settings = sis190_get_settings,
1466 .set_settings = sis190_set_settings,
1467 .get_drvinfo = sis190_get_drvinfo,
1468 .get_regs_len = sis190_get_regs_len,
1469 .get_regs = sis190_get_regs,
1470 .get_link = ethtool_op_get_link,
1471 .get_msglevel = sis190_get_msglevel,
1472 .set_msglevel = sis190_set_msglevel,
1473 .nway_reset = sis190_nway_reset,
1476 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1478 struct sis190_private *tp = netdev_priv(dev);
1480 return !netif_running(dev) ? -EINVAL :
1481 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1484 static int __devinit sis190_init_one(struct pci_dev *pdev,
1485 const struct pci_device_id *ent)
1487 static int printed_version = 0;
1488 struct sis190_private *tp;
1489 struct net_device *dev;
1490 void __iomem *ioaddr;
1493 if (!printed_version) {
1494 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1495 printed_version = 1;
1498 dev = sis190_init_board(pdev);
1504 tp = netdev_priv(dev);
1505 ioaddr = tp->mmio_addr;
1507 rc = sis190_get_mac_addr(pdev, dev);
1509 goto err_release_board;
1511 sis190_init_rxfilter(dev);
1513 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1515 dev->open = sis190_open;
1516 dev->stop = sis190_close;
1517 dev->do_ioctl = sis190_ioctl;
1518 dev->get_stats = sis190_get_stats;
1519 dev->tx_timeout = sis190_tx_timeout;
1520 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1521 dev->hard_start_xmit = sis190_start_xmit;
1522 #ifdef CONFIG_NET_POLL_CONTROLLER
1523 dev->poll_controller = sis190_netpoll;
1525 dev->set_multicast_list = sis190_set_rx_mode;
1526 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1527 dev->irq = pdev->irq;
1528 dev->base_addr = (unsigned long) 0xdead;
1530 spin_lock_init(&tp->lock);
1531 rc = register_netdev(dev);
1533 goto err_release_board;
1535 pci_set_drvdata(pdev, dev);
1537 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1538 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1539 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1541 dev->dev_addr[0], dev->dev_addr[1],
1542 dev->dev_addr[2], dev->dev_addr[3],
1543 dev->dev_addr[4], dev->dev_addr[5]);
1545 netif_carrier_off(dev);
1547 sis190_set_speed_auto(dev);
1552 sis190_release_board(pdev);
1556 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1558 struct net_device *dev = pci_get_drvdata(pdev);
1560 unregister_netdev(dev);
1561 sis190_release_board(pdev);
1562 pci_set_drvdata(pdev, NULL);
1565 static struct pci_driver sis190_pci_driver = {
1567 .id_table = sis190_pci_tbl,
1568 .probe = sis190_init_one,
1569 .remove = __devexit_p(sis190_remove_one),
1572 static int __init sis190_init_module(void)
1574 return pci_module_init(&sis190_pci_driver);
1577 static void __exit sis190_cleanup_module(void)
1579 pci_unregister_driver(&sis190_pci_driver);
1582 module_init(sis190_init_module);
1583 module_exit(sis190_cleanup_module);