2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/netdevice.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/etherdevice.h>
29 #include <linux/ethtool.h>
30 #include <linux/pci.h>
31 #include <linux/mii.h>
32 #include <linux/delay.h>
33 #include <linux/crc32.h>
34 #include <linux/dma-mapping.h>
37 #define PHY_MAX_ADDR 32
38 #define PHY_ID_ANY 0x1f
39 #define MII_REG_ANY 0x1f
41 #define DRV_VERSION "1.4"
42 #define DRV_NAME "sis190"
43 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
45 #define sis190_rx_skb netif_rx
46 #define sis190_rx_quota(count, quota) count
48 #define MAC_ADDR_LEN 6
50 #define NUM_TX_DESC 64 /* [8..1024] */
51 #define NUM_RX_DESC 64 /* [8..8192] */
52 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
53 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
54 #define RX_BUF_SIZE 1536
55 #define RX_BUF_MASK 0xfff8
57 #define SIS190_REGS_SIZE 0x80
58 #define SIS190_TX_TIMEOUT (6*HZ)
59 #define SIS190_PHY_TIMEOUT (10*HZ)
60 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
61 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
64 /* Enhanced PHY access register bit definitions */
65 #define EhnMIIread 0x0000
66 #define EhnMIIwrite 0x0020
67 #define EhnMIIdataShift 16
68 #define EhnMIIpmdShift 6 /* 7016 only */
69 #define EhnMIIregShift 11
70 #define EhnMIIreq 0x0010
71 #define EhnMIInotDone 0x0010
73 /* Write/read MMIO register */
74 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
75 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
76 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
77 #define SIS_R8(reg) readb (ioaddr + (reg))
78 #define SIS_R16(reg) readw (ioaddr + (reg))
79 #define SIS_R32(reg) readl (ioaddr + (reg))
81 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
83 enum sis190_registers {
85 TxDescStartAddr = 0x04,
86 rsv0 = 0x08, // reserved
87 TxSts = 0x0c, // unused (Control/Status)
89 RxDescStartAddr = 0x14,
90 rsv1 = 0x18, // reserved
91 RxSts = 0x1c, // unused
95 IntrTimer = 0x2c, // unused (Interupt Timer)
96 PMControl = 0x30, // unused (Power Mgmt Control/Status)
97 rsv2 = 0x34, // reserved
100 StationControl = 0x40,
102 GIoCR = 0x48, // unused (GMAC IO Compensation)
103 GIoCtrl = 0x4c, // unused (GMAC IO Control)
105 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
106 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
107 rsv3 = 0x5c, // reserved
111 // Undocumented = 0x6c,
113 RxWolData = 0x74, // unused (Rx WOL Data Access)
114 RxMPSControl = 0x78, // unused (Rx MPS Control)
115 rsv4 = 0x7c, // reserved
118 enum sis190_register_content {
120 SoftInt = 0x40000000, // unused
121 Timeup = 0x20000000, // unused
122 PauseFrame = 0x00080000, // unused
123 MagicPacket = 0x00040000, // unused
124 WakeupFrame = 0x00020000, // unused
125 LinkChange = 0x00010000,
126 RxQEmpty = 0x00000080,
128 TxQ1Empty = 0x00000020, // unused
129 TxQ1Int = 0x00000010,
130 TxQ0Empty = 0x00000008, // unused
131 TxQ0Int = 0x00000004,
137 CmdRxEnb = 0x08, // unused
139 RxBufEmpty = 0x01, // unused
142 Cfg9346_Lock = 0x00, // unused
143 Cfg9346_Unlock = 0xc0, // unused
146 AcceptErr = 0x20, // unused
147 AcceptRunt = 0x10, // unused
148 AcceptBroadcast = 0x0800,
149 AcceptMulticast = 0x0400,
150 AcceptMyPhys = 0x0200,
151 AcceptAllPhys = 0x0100,
155 RxCfgDMAShift = 8, // 0x1a in RxControl ?
158 TxInterFrameGapShift = 24,
159 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
161 LinkStatus = 0x02, // unused
162 FullDup = 0x01, // unused
165 TBILinkOK = 0x02000000, // unused
182 enum _DescStatusBit {
184 OWNbit = 0x80000000, // RXOWN/TXOWN
185 INTbit = 0x40000000, // RXINT/TXINT
186 CRCbit = 0x00020000, // CRCOFF/CRCEN
187 PADbit = 0x00010000, // PREADD/PADEN
189 RingEnd = 0x80000000,
191 LSEN = 0x08000000, // TSO ? -- FR
210 ColCountMask = 0x0000ffff,
224 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
233 RxSizeMask = 0x0000ffff
235 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
236 * provide two (unused with Linux) Tx queues. No publically
237 * available documentation alas.
241 enum sis190_eeprom_access_register_bits {
242 EECS = 0x00000001, // unused
243 EECLK = 0x00000002, // unused
244 EEDO = 0x00000008, // unused
245 EEDI = 0x00000004, // unused
248 EEWOP = 0x00000100 // unused
251 /* EEPROM Addresses */
252 enum sis190_eeprom_address {
253 EEPROMSignature = 0x00,
254 EEPROMCLK = 0x01, // unused
259 enum sis190_feature {
265 struct sis190_private {
266 void __iomem *mmio_addr;
267 struct pci_dev *pci_dev;
268 struct net_device *dev;
277 struct RxDesc *RxDescRing;
278 struct TxDesc *TxDescRing;
279 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
280 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
281 struct work_struct phy_task;
282 struct timer_list timer;
284 struct mii_if_info mii_if;
285 struct list_head first_phy;
291 struct list_head list;
298 enum sis190_phy_type {
305 static struct mii_chip_info {
310 } mii_chip_table[] = {
311 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
312 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
313 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
314 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
315 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
316 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
317 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
321 static const struct {
323 } sis_chip_info[] = {
324 { "SiS 190 PCI Fast Ethernet adapter" },
325 { "SiS 191 PCI Gigabit Ethernet adapter" },
328 static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
329 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
330 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
334 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
336 static int rx_copybreak = 200;
342 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
343 module_param(rx_copybreak, int, 0);
344 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
345 module_param_named(debug, debug.msg_enable, int, 0);
346 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
347 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
348 MODULE_VERSION(DRV_VERSION);
349 MODULE_LICENSE("GPL");
351 static const u32 sis190_intr_mask =
352 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
355 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
356 * The chips use a 64 element hash table based on the Ethernet CRC.
358 static const int multicast_filter_limit = 32;
360 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
364 SIS_W32(GMIIControl, ctl);
368 for (i = 0; i < 100; i++) {
369 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
375 pr_err("PHY command failed !\n");
378 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
380 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
381 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
382 (((u32) val) << EhnMIIdataShift));
385 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
387 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
388 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
390 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
393 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
395 struct sis190_private *tp = netdev_priv(dev);
397 mdio_write(tp->mmio_addr, phy_id, reg, val);
400 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
402 struct sis190_private *tp = netdev_priv(dev);
404 return mdio_read(tp->mmio_addr, phy_id, reg);
407 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
409 mdio_read(ioaddr, phy_id, reg);
410 return mdio_read(ioaddr, phy_id, reg);
413 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
418 if (!(SIS_R32(ROMControl) & 0x0002))
421 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
423 for (i = 0; i < 200; i++) {
424 if (!(SIS_R32(ROMInterface) & EEREQ)) {
425 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
434 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
436 SIS_W32(IntrMask, 0x00);
437 SIS_W32(IntrStatus, 0xffffffff);
441 static void sis190_asic_down(void __iomem *ioaddr)
443 /* Stop the chip's Tx and Rx DMA processes. */
445 SIS_W32(TxControl, 0x1a00);
446 SIS_W32(RxControl, 0x1a00);
448 sis190_irq_mask_and_ack(ioaddr);
451 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
453 desc->size |= cpu_to_le32(RingEnd);
456 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
458 u32 eor = le32_to_cpu(desc->size) & RingEnd;
461 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
463 desc->status = cpu_to_le32(OWNbit | INTbit);
466 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
469 desc->addr = cpu_to_le32(mapping);
470 sis190_give_to_asic(desc, rx_buf_sz);
473 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
476 desc->addr = cpu_to_le32(0xdeadbeef);
477 desc->size &= cpu_to_le32(RingEnd);
482 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
485 u32 rx_buf_sz = tp->rx_buf_sz;
489 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
491 goto skb_alloc_failed;
492 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
494 if (pci_dma_mapping_error(tp->pci_dev, mapping))
496 sis190_map_to_asic(desc, mapping, rx_buf_sz);
501 dev_kfree_skb_any(skb);
503 sis190_make_unusable_by_asic(desc);
507 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
512 for (cur = start; cur < end; cur++) {
513 unsigned int i = cur % NUM_RX_DESC;
515 if (tp->Rx_skbuff[i])
518 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
520 if (!tp->Rx_skbuff[i])
526 static bool sis190_try_rx_copy(struct sis190_private *tp,
527 struct sk_buff **sk_buff, int pkt_size,
533 if (pkt_size >= rx_copybreak)
536 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
540 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
542 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
549 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
551 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
553 if ((status & CRCOK) && !(status & ErrMask))
556 if (!(status & CRCOK))
557 stats->rx_crc_errors++;
558 else if (status & OVRUN)
559 stats->rx_over_errors++;
560 else if (status & (SHORT | LIMIT))
561 stats->rx_length_errors++;
562 else if (status & (MIIER | NIBON | COLON))
563 stats->rx_frame_errors++;
569 static int sis190_rx_interrupt(struct net_device *dev,
570 struct sis190_private *tp, void __iomem *ioaddr)
572 struct net_device_stats *stats = &dev->stats;
573 u32 rx_left, cur_rx = tp->cur_rx;
576 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
577 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
579 for (; rx_left > 0; rx_left--, cur_rx++) {
580 unsigned int entry = cur_rx % NUM_RX_DESC;
581 struct RxDesc *desc = tp->RxDescRing + entry;
584 if (le32_to_cpu(desc->status) & OWNbit)
587 status = le32_to_cpu(desc->PSize);
589 //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
591 if (sis190_rx_pkt_err(status, stats) < 0)
592 sis190_give_to_asic(desc, tp->rx_buf_sz);
594 struct sk_buff *skb = tp->Rx_skbuff[entry];
595 dma_addr_t addr = le32_to_cpu(desc->addr);
596 int pkt_size = (status & RxSizeMask) - 4;
597 struct pci_dev *pdev = tp->pci_dev;
599 if (unlikely(pkt_size > tp->rx_buf_sz)) {
600 netif_info(tp, intr, dev,
601 "(frag) status = %08x\n", status);
603 stats->rx_length_errors++;
604 sis190_give_to_asic(desc, tp->rx_buf_sz);
609 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
610 pci_dma_sync_single_for_device(pdev, addr,
611 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
612 sis190_give_to_asic(desc, tp->rx_buf_sz);
614 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
616 tp->Rx_skbuff[entry] = NULL;
617 sis190_make_unusable_by_asic(desc);
620 skb_put(skb, pkt_size);
621 skb->protocol = eth_type_trans(skb, dev);
626 stats->rx_bytes += pkt_size;
627 if ((status & BCAST) == MCAST)
631 count = cur_rx - tp->cur_rx;
634 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
636 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
637 tp->dirty_rx += delta;
639 if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
640 netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
645 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
650 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
652 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
654 memset(desc, 0x00, sizeof(*desc));
657 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
659 #define TxErrMask (WND | TABRT | FIFO | LINK)
661 if (!unlikely(status & TxErrMask))
665 stats->tx_window_errors++;
667 stats->tx_aborted_errors++;
669 stats->tx_fifo_errors++;
671 stats->tx_carrier_errors++;
678 static void sis190_tx_interrupt(struct net_device *dev,
679 struct sis190_private *tp, void __iomem *ioaddr)
681 struct net_device_stats *stats = &dev->stats;
682 u32 pending, dirty_tx = tp->dirty_tx;
684 * It would not be needed if queueing was allowed to be enabled
685 * again too early (hint: think preempt and unclocked smp systems).
687 unsigned int queue_stopped;
690 pending = tp->cur_tx - dirty_tx;
691 queue_stopped = (pending == NUM_TX_DESC);
693 for (; pending; pending--, dirty_tx++) {
694 unsigned int entry = dirty_tx % NUM_TX_DESC;
695 struct TxDesc *txd = tp->TxDescRing + entry;
696 u32 status = le32_to_cpu(txd->status);
702 skb = tp->Tx_skbuff[entry];
704 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
706 stats->tx_bytes += skb->len;
707 stats->collisions += ((status & ColCountMask) - 1);
710 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
711 tp->Tx_skbuff[entry] = NULL;
712 dev_kfree_skb_irq(skb);
715 if (tp->dirty_tx != dirty_tx) {
716 tp->dirty_tx = dirty_tx;
719 netif_wake_queue(dev);
724 * The interrupt handler does all of the Rx thread work and cleans up after
727 static irqreturn_t sis190_interrupt(int irq, void *__dev)
729 struct net_device *dev = __dev;
730 struct sis190_private *tp = netdev_priv(dev);
731 void __iomem *ioaddr = tp->mmio_addr;
732 unsigned int handled = 0;
735 status = SIS_R32(IntrStatus);
737 if ((status == 0xffffffff) || !status)
742 if (unlikely(!netif_running(dev))) {
743 sis190_asic_down(ioaddr);
747 SIS_W32(IntrStatus, status);
749 // netif_info(tp, intr, dev, "status = %08x\n", status);
751 if (status & LinkChange) {
752 netif_info(tp, intr, dev, "link change\n");
753 schedule_work(&tp->phy_task);
757 sis190_rx_interrupt(dev, tp, ioaddr);
759 if (status & TxQ0Int)
760 sis190_tx_interrupt(dev, tp, ioaddr);
762 return IRQ_RETVAL(handled);
765 #ifdef CONFIG_NET_POLL_CONTROLLER
766 static void sis190_netpoll(struct net_device *dev)
768 struct sis190_private *tp = netdev_priv(dev);
769 struct pci_dev *pdev = tp->pci_dev;
771 disable_irq(pdev->irq);
772 sis190_interrupt(pdev->irq, dev);
773 enable_irq(pdev->irq);
777 static void sis190_free_rx_skb(struct sis190_private *tp,
778 struct sk_buff **sk_buff, struct RxDesc *desc)
780 struct pci_dev *pdev = tp->pci_dev;
782 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
784 dev_kfree_skb(*sk_buff);
786 sis190_make_unusable_by_asic(desc);
789 static void sis190_rx_clear(struct sis190_private *tp)
793 for (i = 0; i < NUM_RX_DESC; i++) {
794 if (!tp->Rx_skbuff[i])
796 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
800 static void sis190_init_ring_indexes(struct sis190_private *tp)
802 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
805 static int sis190_init_ring(struct net_device *dev)
807 struct sis190_private *tp = netdev_priv(dev);
809 sis190_init_ring_indexes(tp);
811 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
812 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
814 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
817 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
826 static void sis190_set_rx_mode(struct net_device *dev)
828 struct sis190_private *tp = netdev_priv(dev);
829 void __iomem *ioaddr = tp->mmio_addr;
831 u32 mc_filter[2]; /* Multicast hash filter */
834 if (dev->flags & IFF_PROMISC) {
836 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
838 mc_filter[1] = mc_filter[0] = 0xffffffff;
839 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
840 (dev->flags & IFF_ALLMULTI)) {
841 /* Too many to filter perfectly -- accept all multicasts. */
842 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
843 mc_filter[1] = mc_filter[0] = 0xffffffff;
845 struct dev_mc_list *mclist;
847 rx_mode = AcceptBroadcast | AcceptMyPhys;
848 mc_filter[1] = mc_filter[0] = 0;
849 netdev_for_each_mc_addr(mclist, dev) {
851 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
852 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
853 rx_mode |= AcceptMulticast;
857 spin_lock_irqsave(&tp->lock, flags);
859 SIS_W16(RxMacControl, rx_mode | 0x2);
860 SIS_W32(RxHashTable, mc_filter[0]);
861 SIS_W32(RxHashTable + 4, mc_filter[1]);
863 spin_unlock_irqrestore(&tp->lock, flags);
866 static void sis190_soft_reset(void __iomem *ioaddr)
868 SIS_W32(IntrControl, 0x8000);
870 SIS_W32(IntrControl, 0x0);
871 sis190_asic_down(ioaddr);
874 static void sis190_hw_start(struct net_device *dev)
876 struct sis190_private *tp = netdev_priv(dev);
877 void __iomem *ioaddr = tp->mmio_addr;
879 sis190_soft_reset(ioaddr);
881 SIS_W32(TxDescStartAddr, tp->tx_dma);
882 SIS_W32(RxDescStartAddr, tp->rx_dma);
884 SIS_W32(IntrStatus, 0xffffffff);
885 SIS_W32(IntrMask, 0x0);
886 SIS_W32(GMIIControl, 0x0);
887 SIS_W32(TxMacControl, 0x60);
888 SIS_W16(RxMacControl, 0x02);
889 SIS_W32(RxHashTable, 0x0);
891 SIS_W32(RxWolCtrl, 0x0);
892 SIS_W32(RxWolData, 0x0);
896 sis190_set_rx_mode(dev);
898 /* Enable all known interrupts by setting the interrupt mask. */
899 SIS_W32(IntrMask, sis190_intr_mask);
901 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
902 SIS_W32(RxControl, 0x1a1d);
904 netif_start_queue(dev);
907 static void sis190_phy_task(struct work_struct *work)
909 struct sis190_private *tp =
910 container_of(work, struct sis190_private, phy_task);
911 struct net_device *dev = tp->dev;
912 void __iomem *ioaddr = tp->mmio_addr;
913 int phy_id = tp->mii_if.phy_id;
918 if (!netif_running(dev))
921 val = mdio_read(ioaddr, phy_id, MII_BMCR);
922 if (val & BMCR_RESET) {
923 // FIXME: needlessly high ? -- FR 02/07/2005
924 mod_timer(&tp->timer, jiffies + HZ/10);
925 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
926 BMSR_ANEGCOMPLETE)) {
927 netif_carrier_off(dev);
928 netif_warn(tp, link, dev, "auto-negotiating...\n");
929 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
937 { LPA_1000FULL, 0x07000c00 | 0x00001000,
938 "1000 Mbps Full Duplex" },
939 { LPA_1000HALF, 0x07000c00,
940 "1000 Mbps Half Duplex" },
941 { LPA_100FULL, 0x04000800 | 0x00001000,
942 "100 Mbps Full Duplex" },
943 { LPA_100HALF, 0x04000800,
944 "100 Mbps Half Duplex" },
945 { LPA_10FULL, 0x04000400 | 0x00001000,
946 "10 Mbps Full Duplex" },
947 { LPA_10HALF, 0x04000400,
948 "10 Mbps Half Duplex" },
949 { 0, 0x04000400, "unknown" }
951 u16 adv, autoexp, gigadv, gigrec;
953 val = mdio_read(ioaddr, phy_id, 0x1f);
954 netif_info(tp, link, dev, "mii ext = %04x\n", val);
956 val = mdio_read(ioaddr, phy_id, MII_LPA);
957 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
958 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
959 netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
962 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
963 /* check for gigabit speed */
964 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
965 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
966 val = (gigadv & (gigrec >> 2));
967 if (val & ADVERTISE_1000FULL)
969 else if (val & ADVERTISE_1000HALF)
975 for (p = reg31; p->val; p++) {
976 if ((val & p->val) == p->val)
981 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
983 if ((tp->features & F_HAS_RGMII) &&
984 (tp->features & F_PHY_BCM5461)) {
985 // Set Tx Delay in RGMII mode.
986 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
988 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
989 p->ctl |= 0x03000000;
992 SIS_W32(StationControl, p->ctl);
994 if (tp->features & F_HAS_RGMII) {
995 SIS_W32(RGDelay, 0x0441);
996 SIS_W32(RGDelay, 0x0440);
999 tp->negotiated_lpa = p->val;
1001 netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1002 netif_carrier_on(dev);
1009 static void sis190_phy_timer(unsigned long __opaque)
1011 struct net_device *dev = (struct net_device *)__opaque;
1012 struct sis190_private *tp = netdev_priv(dev);
1014 if (likely(netif_running(dev)))
1015 schedule_work(&tp->phy_task);
1018 static inline void sis190_delete_timer(struct net_device *dev)
1020 struct sis190_private *tp = netdev_priv(dev);
1022 del_timer_sync(&tp->timer);
1025 static inline void sis190_request_timer(struct net_device *dev)
1027 struct sis190_private *tp = netdev_priv(dev);
1028 struct timer_list *timer = &tp->timer;
1031 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1032 timer->data = (unsigned long)dev;
1033 timer->function = sis190_phy_timer;
1037 static void sis190_set_rxbufsize(struct sis190_private *tp,
1038 struct net_device *dev)
1040 unsigned int mtu = dev->mtu;
1042 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1043 /* RxDesc->size has a licence to kill the lower bits */
1044 if (tp->rx_buf_sz & 0x07) {
1046 tp->rx_buf_sz &= RX_BUF_MASK;
1050 static int sis190_open(struct net_device *dev)
1052 struct sis190_private *tp = netdev_priv(dev);
1053 struct pci_dev *pdev = tp->pci_dev;
1056 sis190_set_rxbufsize(tp, dev);
1059 * Rx and Tx descriptors need 256 bytes alignment.
1060 * pci_alloc_consistent() guarantees a stronger alignment.
1062 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1063 if (!tp->TxDescRing)
1066 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1067 if (!tp->RxDescRing)
1070 rc = sis190_init_ring(dev);
1074 sis190_request_timer(dev);
1076 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1078 goto err_release_timer_2;
1080 sis190_hw_start(dev);
1084 err_release_timer_2:
1085 sis190_delete_timer(dev);
1086 sis190_rx_clear(tp);
1088 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1091 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1096 static void sis190_tx_clear(struct sis190_private *tp)
1100 for (i = 0; i < NUM_TX_DESC; i++) {
1101 struct sk_buff *skb = tp->Tx_skbuff[i];
1106 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1107 tp->Tx_skbuff[i] = NULL;
1110 tp->dev->stats.tx_dropped++;
1112 tp->cur_tx = tp->dirty_tx = 0;
1115 static void sis190_down(struct net_device *dev)
1117 struct sis190_private *tp = netdev_priv(dev);
1118 void __iomem *ioaddr = tp->mmio_addr;
1119 unsigned int poll_locked = 0;
1121 sis190_delete_timer(dev);
1123 netif_stop_queue(dev);
1126 spin_lock_irq(&tp->lock);
1128 sis190_asic_down(ioaddr);
1130 spin_unlock_irq(&tp->lock);
1132 synchronize_irq(dev->irq);
1137 synchronize_sched();
1139 } while (SIS_R32(IntrMask));
1141 sis190_tx_clear(tp);
1142 sis190_rx_clear(tp);
1145 static int sis190_close(struct net_device *dev)
1147 struct sis190_private *tp = netdev_priv(dev);
1148 struct pci_dev *pdev = tp->pci_dev;
1152 free_irq(dev->irq, dev);
1154 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1155 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1157 tp->TxDescRing = NULL;
1158 tp->RxDescRing = NULL;
1163 static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1164 struct net_device *dev)
1166 struct sis190_private *tp = netdev_priv(dev);
1167 void __iomem *ioaddr = tp->mmio_addr;
1168 u32 len, entry, dirty_tx;
1169 struct TxDesc *desc;
1172 if (unlikely(skb->len < ETH_ZLEN)) {
1173 if (skb_padto(skb, ETH_ZLEN)) {
1174 dev->stats.tx_dropped++;
1182 entry = tp->cur_tx % NUM_TX_DESC;
1183 desc = tp->TxDescRing + entry;
1185 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1186 netif_stop_queue(dev);
1187 netif_err(tp, tx_err, dev,
1188 "BUG! Tx Ring full when queue awake!\n");
1189 return NETDEV_TX_BUSY;
1192 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1193 if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
1194 netif_err(tp, tx_err, dev,
1195 "PCI mapping failed, dropping packet");
1196 return NETDEV_TX_BUSY;
1199 tp->Tx_skbuff[entry] = skb;
1201 desc->PSize = cpu_to_le32(len);
1202 desc->addr = cpu_to_le32(mapping);
1204 desc->size = cpu_to_le32(len);
1205 if (entry == (NUM_TX_DESC - 1))
1206 desc->size |= cpu_to_le32(RingEnd);
1210 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1211 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1213 desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1214 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1215 desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1222 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1224 dirty_tx = tp->dirty_tx;
1225 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1226 netif_stop_queue(dev);
1228 if (dirty_tx != tp->dirty_tx)
1229 netif_wake_queue(dev);
1232 return NETDEV_TX_OK;
1235 static void sis190_free_phy(struct list_head *first_phy)
1237 struct sis190_phy *cur, *next;
1239 list_for_each_entry_safe(cur, next, first_phy, list) {
1245 * sis190_default_phy - Select default PHY for sis190 mac.
1246 * @dev: the net device to probe for
1248 * Select first detected PHY with link as default.
1249 * If no one is link on, select PHY whose types is HOME as default.
1250 * If HOME doesn't exist, select LAN.
1252 static u16 sis190_default_phy(struct net_device *dev)
1254 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1255 struct sis190_private *tp = netdev_priv(dev);
1256 struct mii_if_info *mii_if = &tp->mii_if;
1257 void __iomem *ioaddr = tp->mmio_addr;
1260 phy_home = phy_default = phy_lan = NULL;
1262 list_for_each_entry(phy, &tp->first_phy, list) {
1263 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1265 // Link ON & Not select default PHY & not ghost PHY.
1266 if ((status & BMSR_LSTATUS) &&
1268 (phy->type != UNKNOWN)) {
1271 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1272 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1273 status | BMCR_ANENABLE | BMCR_ISOLATE);
1274 if (phy->type == HOME)
1276 else if (phy->type == LAN)
1283 phy_default = phy_home;
1285 phy_default = phy_lan;
1287 phy_default = list_first_entry(&tp->first_phy,
1288 struct sis190_phy, list);
1291 if (mii_if->phy_id != phy_default->phy_id) {
1292 mii_if->phy_id = phy_default->phy_id;
1293 if (netif_msg_probe(tp))
1294 pr_info("%s: Using transceiver at address %d as default\n",
1295 pci_name(tp->pci_dev), mii_if->phy_id);
1298 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1299 status &= (~BMCR_ISOLATE);
1301 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1302 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1307 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1308 struct sis190_phy *phy, unsigned int phy_id,
1311 void __iomem *ioaddr = tp->mmio_addr;
1312 struct mii_chip_info *p;
1314 INIT_LIST_HEAD(&phy->list);
1315 phy->status = mii_status;
1316 phy->phy_id = phy_id;
1318 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1319 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1321 for (p = mii_chip_table; p->type; p++) {
1322 if ((p->id[0] == phy->id[0]) &&
1323 (p->id[1] == (phy->id[1] & 0xfff0))) {
1329 phy->type = (p->type == MIX) ?
1330 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1331 LAN : HOME) : p->type;
1332 tp->features |= p->feature;
1333 if (netif_msg_probe(tp))
1334 pr_info("%s: %s transceiver at address %d\n",
1335 pci_name(tp->pci_dev), p->name, phy_id);
1337 phy->type = UNKNOWN;
1338 if (netif_msg_probe(tp))
1339 pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1340 pci_name(tp->pci_dev),
1341 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1345 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1347 if (tp->features & F_PHY_88E1111) {
1348 void __iomem *ioaddr = tp->mmio_addr;
1349 int phy_id = tp->mii_if.phy_id;
1355 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1357 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1359 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1365 * sis190_mii_probe - Probe MII PHY for sis190
1366 * @dev: the net device to probe for
1368 * Search for total of 32 possible mii phy addresses.
1369 * Identify and set current phy if found one,
1370 * return error if it failed to found.
1372 static int __devinit sis190_mii_probe(struct net_device *dev)
1374 struct sis190_private *tp = netdev_priv(dev);
1375 struct mii_if_info *mii_if = &tp->mii_if;
1376 void __iomem *ioaddr = tp->mmio_addr;
1380 INIT_LIST_HEAD(&tp->first_phy);
1382 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1383 struct sis190_phy *phy;
1386 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1388 // Try next mii if the current one is not accessible.
1389 if (status == 0xffff || status == 0x0000)
1392 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1394 sis190_free_phy(&tp->first_phy);
1399 sis190_init_phy(dev, tp, phy, phy_id, status);
1401 list_add(&tp->first_phy, &phy->list);
1404 if (list_empty(&tp->first_phy)) {
1405 if (netif_msg_probe(tp))
1406 pr_info("%s: No MII transceivers found!\n",
1407 pci_name(tp->pci_dev));
1412 /* Select default PHY for mac */
1413 sis190_default_phy(dev);
1415 sis190_mii_probe_88e1111_fixup(tp);
1418 mii_if->mdio_read = __mdio_read;
1419 mii_if->mdio_write = __mdio_write;
1420 mii_if->phy_id_mask = PHY_ID_ANY;
1421 mii_if->reg_num_mask = MII_REG_ANY;
1426 static void sis190_mii_remove(struct net_device *dev)
1428 struct sis190_private *tp = netdev_priv(dev);
1430 sis190_free_phy(&tp->first_phy);
1433 static void sis190_release_board(struct pci_dev *pdev)
1435 struct net_device *dev = pci_get_drvdata(pdev);
1436 struct sis190_private *tp = netdev_priv(dev);
1438 iounmap(tp->mmio_addr);
1439 pci_release_regions(pdev);
1440 pci_disable_device(pdev);
1444 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1446 struct sis190_private *tp;
1447 struct net_device *dev;
1448 void __iomem *ioaddr;
1451 dev = alloc_etherdev(sizeof(*tp));
1453 if (netif_msg_drv(&debug))
1454 pr_err("unable to alloc new ethernet\n");
1459 SET_NETDEV_DEV(dev, &pdev->dev);
1461 tp = netdev_priv(dev);
1463 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1465 rc = pci_enable_device(pdev);
1467 if (netif_msg_probe(tp))
1468 pr_err("%s: enable failure\n", pci_name(pdev));
1469 goto err_free_dev_1;
1474 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1475 if (netif_msg_probe(tp))
1476 pr_err("%s: region #0 is no MMIO resource\n",
1478 goto err_pci_disable_2;
1480 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1481 if (netif_msg_probe(tp))
1482 pr_err("%s: invalid PCI region size(s)\n",
1484 goto err_pci_disable_2;
1487 rc = pci_request_regions(pdev, DRV_NAME);
1489 if (netif_msg_probe(tp))
1490 pr_err("%s: could not request regions\n",
1492 goto err_pci_disable_2;
1495 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1497 if (netif_msg_probe(tp))
1498 pr_err("%s: DMA configuration failed\n",
1500 goto err_free_res_3;
1503 pci_set_master(pdev);
1505 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1507 if (netif_msg_probe(tp))
1508 pr_err("%s: cannot remap MMIO, aborting\n",
1511 goto err_free_res_3;
1515 tp->mmio_addr = ioaddr;
1517 sis190_irq_mask_and_ack(ioaddr);
1519 sis190_soft_reset(ioaddr);
1524 pci_release_regions(pdev);
1526 pci_disable_device(pdev);
1534 static void sis190_tx_timeout(struct net_device *dev)
1536 struct sis190_private *tp = netdev_priv(dev);
1537 void __iomem *ioaddr = tp->mmio_addr;
1540 /* Disable Tx, if not already */
1541 tmp8 = SIS_R8(TxControl);
1542 if (tmp8 & CmdTxEnb)
1543 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1545 netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1546 SIS_R32(TxControl), SIS_R32(TxSts));
1548 /* Disable interrupts by clearing the interrupt mask. */
1549 SIS_W32(IntrMask, 0x0000);
1551 /* Stop a shared interrupt from scavenging while we are. */
1552 spin_lock_irq(&tp->lock);
1553 sis190_tx_clear(tp);
1554 spin_unlock_irq(&tp->lock);
1556 /* ...and finally, reset everything. */
1557 sis190_hw_start(dev);
1559 netif_wake_queue(dev);
1562 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1564 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1567 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1568 struct net_device *dev)
1570 struct sis190_private *tp = netdev_priv(dev);
1571 void __iomem *ioaddr = tp->mmio_addr;
1575 if (netif_msg_probe(tp))
1576 pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1578 /* Check to see if there is a sane EEPROM */
1579 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1581 if ((sig == 0xffff) || (sig == 0x0000)) {
1582 if (netif_msg_probe(tp))
1583 pr_info("%s: Error EEPROM read %x\n",
1584 pci_name(pdev), sig);
1588 /* Get MAC address from EEPROM */
1589 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1590 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1592 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1595 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1601 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1603 * @dev: network device to get address for
1605 * SiS96x model, use APC CMOS RAM to store MAC address.
1606 * APC CMOS RAM is accessed through ISA bridge.
1607 * MAC address is read into @net_dev->dev_addr.
1609 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1610 struct net_device *dev)
1612 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1613 struct sis190_private *tp = netdev_priv(dev);
1614 struct pci_dev *isa_bridge;
1618 if (netif_msg_probe(tp))
1619 pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1621 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1622 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1628 if (netif_msg_probe(tp))
1629 pr_info("%s: Can not find ISA bridge\n",
1634 /* Enable port 78h & 79h to access APC Registers. */
1635 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1636 reg = (tmp8 & ~0x02);
1637 pci_write_config_byte(isa_bridge, 0x48, reg);
1639 pci_read_config_byte(isa_bridge, 0x48, ®);
1641 for (i = 0; i < MAC_ADDR_LEN; i++) {
1642 outb(0x9 + i, 0x78);
1643 dev->dev_addr[i] = inb(0x79);
1649 sis190_set_rgmii(tp, reg);
1651 /* Restore the value to ISA Bridge */
1652 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1653 pci_dev_put(isa_bridge);
1659 * sis190_init_rxfilter - Initialize the Rx filter
1660 * @dev: network device to initialize
1662 * Set receive filter address to our MAC address
1663 * and enable packet filtering.
1665 static inline void sis190_init_rxfilter(struct net_device *dev)
1667 struct sis190_private *tp = netdev_priv(dev);
1668 void __iomem *ioaddr = tp->mmio_addr;
1672 ctl = SIS_R16(RxMacControl);
1674 * Disable packet filtering before setting filter.
1675 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1676 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1678 SIS_W16(RxMacControl, ctl & ~0x0f00);
1680 for (i = 0; i < MAC_ADDR_LEN; i++)
1681 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1683 SIS_W16(RxMacControl, ctl);
1687 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1688 struct net_device *dev)
1692 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1696 pci_read_config_byte(pdev, 0x73, ®);
1698 if (reg & 0x00000001)
1699 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1704 static void sis190_set_speed_auto(struct net_device *dev)
1706 struct sis190_private *tp = netdev_priv(dev);
1707 void __iomem *ioaddr = tp->mmio_addr;
1708 int phy_id = tp->mii_if.phy_id;
1711 netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1713 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1715 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1717 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1718 ADVERTISE_100FULL | ADVERTISE_10FULL |
1719 ADVERTISE_100HALF | ADVERTISE_10HALF);
1721 // Enable 1000 Full Mode.
1722 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1724 // Enable auto-negotiation and restart auto-negotiation.
1725 mdio_write(ioaddr, phy_id, MII_BMCR,
1726 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1729 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1731 struct sis190_private *tp = netdev_priv(dev);
1733 return mii_ethtool_gset(&tp->mii_if, cmd);
1736 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1738 struct sis190_private *tp = netdev_priv(dev);
1740 return mii_ethtool_sset(&tp->mii_if, cmd);
1743 static void sis190_get_drvinfo(struct net_device *dev,
1744 struct ethtool_drvinfo *info)
1746 struct sis190_private *tp = netdev_priv(dev);
1748 strcpy(info->driver, DRV_NAME);
1749 strcpy(info->version, DRV_VERSION);
1750 strcpy(info->bus_info, pci_name(tp->pci_dev));
1753 static int sis190_get_regs_len(struct net_device *dev)
1755 return SIS190_REGS_SIZE;
1758 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1761 struct sis190_private *tp = netdev_priv(dev);
1762 unsigned long flags;
1764 if (regs->len > SIS190_REGS_SIZE)
1765 regs->len = SIS190_REGS_SIZE;
1767 spin_lock_irqsave(&tp->lock, flags);
1768 memcpy_fromio(p, tp->mmio_addr, regs->len);
1769 spin_unlock_irqrestore(&tp->lock, flags);
1772 static int sis190_nway_reset(struct net_device *dev)
1774 struct sis190_private *tp = netdev_priv(dev);
1776 return mii_nway_restart(&tp->mii_if);
1779 static u32 sis190_get_msglevel(struct net_device *dev)
1781 struct sis190_private *tp = netdev_priv(dev);
1783 return tp->msg_enable;
1786 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1788 struct sis190_private *tp = netdev_priv(dev);
1790 tp->msg_enable = value;
1793 static const struct ethtool_ops sis190_ethtool_ops = {
1794 .get_settings = sis190_get_settings,
1795 .set_settings = sis190_set_settings,
1796 .get_drvinfo = sis190_get_drvinfo,
1797 .get_regs_len = sis190_get_regs_len,
1798 .get_regs = sis190_get_regs,
1799 .get_link = ethtool_op_get_link,
1800 .get_msglevel = sis190_get_msglevel,
1801 .set_msglevel = sis190_set_msglevel,
1802 .nway_reset = sis190_nway_reset,
1805 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1807 struct sis190_private *tp = netdev_priv(dev);
1809 return !netif_running(dev) ? -EINVAL :
1810 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1813 static const struct net_device_ops sis190_netdev_ops = {
1814 .ndo_open = sis190_open,
1815 .ndo_stop = sis190_close,
1816 .ndo_do_ioctl = sis190_ioctl,
1817 .ndo_start_xmit = sis190_start_xmit,
1818 .ndo_tx_timeout = sis190_tx_timeout,
1819 .ndo_set_multicast_list = sis190_set_rx_mode,
1820 .ndo_change_mtu = eth_change_mtu,
1821 .ndo_set_mac_address = eth_mac_addr,
1822 .ndo_validate_addr = eth_validate_addr,
1823 #ifdef CONFIG_NET_POLL_CONTROLLER
1824 .ndo_poll_controller = sis190_netpoll,
1828 static int __devinit sis190_init_one(struct pci_dev *pdev,
1829 const struct pci_device_id *ent)
1831 static int printed_version = 0;
1832 struct sis190_private *tp;
1833 struct net_device *dev;
1834 void __iomem *ioaddr;
1837 if (!printed_version) {
1838 if (netif_msg_drv(&debug))
1839 pr_info(SIS190_DRIVER_NAME " loaded\n");
1840 printed_version = 1;
1843 dev = sis190_init_board(pdev);
1849 pci_set_drvdata(pdev, dev);
1851 tp = netdev_priv(dev);
1852 ioaddr = tp->mmio_addr;
1854 rc = sis190_get_mac_addr(pdev, dev);
1856 goto err_release_board;
1858 sis190_init_rxfilter(dev);
1860 INIT_WORK(&tp->phy_task, sis190_phy_task);
1862 dev->netdev_ops = &sis190_netdev_ops;
1864 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1865 dev->irq = pdev->irq;
1866 dev->base_addr = (unsigned long) 0xdead;
1867 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1869 spin_lock_init(&tp->lock);
1871 rc = sis190_mii_probe(dev);
1873 goto err_release_board;
1875 rc = register_netdev(dev);
1877 goto err_remove_mii;
1879 if (netif_msg_probe(tp)) {
1880 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1882 sis_chip_info[ent->driver_data].name,
1883 ioaddr, dev->irq, dev->dev_addr);
1884 netdev_info(dev, "%s mode.\n",
1885 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1888 netif_carrier_off(dev);
1890 sis190_set_speed_auto(dev);
1895 sis190_mii_remove(dev);
1897 sis190_release_board(pdev);
1901 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1903 struct net_device *dev = pci_get_drvdata(pdev);
1905 sis190_mii_remove(dev);
1906 flush_scheduled_work();
1907 unregister_netdev(dev);
1908 sis190_release_board(pdev);
1909 pci_set_drvdata(pdev, NULL);
1912 static struct pci_driver sis190_pci_driver = {
1914 .id_table = sis190_pci_tbl,
1915 .probe = sis190_init_one,
1916 .remove = __devexit_p(sis190_remove_one),
1919 static int __init sis190_init_module(void)
1921 return pci_register_driver(&sis190_pci_driver);
1924 static void __exit sis190_cleanup_module(void)
1926 pci_unregister_driver(&sis190_pci_driver);
1929 module_init(sis190_init_module);
1930 module_exit(sis190_cleanup_module);