2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
50 #define DRV_VERSION "1.4"
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
55 #define sis190_rx_skb netif_rx
56 #define sis190_rx_quota(count, quota) count
58 #define MAC_ADDR_LEN 6
60 #define NUM_TX_DESC 64 /* [8..1024] */
61 #define NUM_RX_DESC 64 /* [8..8192] */
62 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64 #define RX_BUF_SIZE 1536
65 #define RX_BUF_MASK 0xfff8
67 #define SIS190_REGS_SIZE 0x80
68 #define SIS190_TX_TIMEOUT (6*HZ)
69 #define SIS190_PHY_TIMEOUT (10*HZ)
70 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
74 /* Enhanced PHY access register bit definitions */
75 #define EhnMIIread 0x0000
76 #define EhnMIIwrite 0x0020
77 #define EhnMIIdataShift 16
78 #define EhnMIIpmdShift 6 /* 7016 only */
79 #define EhnMIIregShift 11
80 #define EhnMIIreq 0x0010
81 #define EhnMIInotDone 0x0010
83 /* Write/read MMIO register */
84 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87 #define SIS_R8(reg) readb (ioaddr + (reg))
88 #define SIS_R16(reg) readw (ioaddr + (reg))
89 #define SIS_R32(reg) readl (ioaddr + (reg))
91 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
93 enum sis190_registers {
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08, // reserved
97 TxSts = 0x0c, // unused (Control/Status)
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18, // reserved
101 RxSts = 0x1c, // unused
105 IntrTimer = 0x2c, // unused (Interupt Timer)
106 PMControl = 0x30, // unused (Power Mgmt Control/Status)
107 rsv2 = 0x34, // reserved
110 StationControl = 0x40,
112 GIoCR = 0x48, // unused (GMAC IO Compensation)
113 GIoCtrl = 0x4c, // unused (GMAC IO Control)
115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
117 rsv3 = 0x5c, // reserved
121 // Undocumented = 0x6c,
123 RxWolData = 0x74, // unused (Rx WOL Data Access)
124 RxMPSControl = 0x78, // unused (Rx MPS Control)
125 rsv4 = 0x7c, // reserved
128 enum sis190_register_content {
130 SoftInt = 0x40000000, // unused
131 Timeup = 0x20000000, // unused
132 PauseFrame = 0x00080000, // unused
133 MagicPacket = 0x00040000, // unused
134 WakeupFrame = 0x00020000, // unused
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
138 TxQ1Empty = 0x00000020, // unused
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008, // unused
141 TxQ0Int = 0x00000004,
147 CmdRxEnb = 0x08, // unused
149 RxBufEmpty = 0x01, // unused
152 Cfg9346_Lock = 0x00, // unused
153 Cfg9346_Unlock = 0xc0, // unused
156 AcceptErr = 0x20, // unused
157 AcceptRunt = 0x10, // unused
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
165 RxCfgDMAShift = 8, // 0x1a in RxControl ?
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
171 LinkStatus = 0x02, // unused
172 FullDup = 0x01, // unused
175 TBILinkOK = 0x02000000, // unused
192 enum _DescStatusBit {
194 OWNbit = 0x80000000, // RXOWN/TXOWN
195 INTbit = 0x40000000, // RXINT/TXINT
196 CRCbit = 0x00020000, // CRCOFF/CRCEN
197 PADbit = 0x00010000, // PREADD/PADEN
199 RingEnd = 0x80000000,
201 LSEN = 0x08000000, // TSO ? -- FR
220 ColCountMask = 0x0000ffff,
234 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
243 RxSizeMask = 0x0000ffff
245 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
246 * provide two (unused with Linux) Tx queues. No publically
247 * available documentation alas.
251 enum sis190_eeprom_access_register_bits {
252 EECS = 0x00000001, // unused
253 EECLK = 0x00000002, // unused
254 EEDO = 0x00000008, // unused
255 EEDI = 0x00000004, // unused
258 EEWOP = 0x00000100 // unused
261 /* EEPROM Addresses */
262 enum sis190_eeprom_address {
263 EEPROMSignature = 0x00,
264 EEPROMCLK = 0x01, // unused
269 enum sis190_feature {
275 struct sis190_private {
276 void __iomem *mmio_addr;
277 struct pci_dev *pci_dev;
278 struct net_device *dev;
287 struct RxDesc *RxDescRing;
288 struct TxDesc *TxDescRing;
289 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
290 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
291 struct work_struct phy_task;
292 struct timer_list timer;
294 struct mii_if_info mii_if;
295 struct list_head first_phy;
301 struct list_head list;
308 enum sis190_phy_type {
315 static struct mii_chip_info {
320 } mii_chip_table[] = {
321 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
322 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
323 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
324 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
325 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
326 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
327 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
331 static const struct {
333 } sis_chip_info[] = {
334 { "SiS 190 PCI Fast Ethernet adapter" },
335 { "SiS 191 PCI Gigabit Ethernet adapter" },
338 static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
339 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
340 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
344 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
346 static int rx_copybreak = 200;
352 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
353 module_param(rx_copybreak, int, 0);
354 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
355 module_param_named(debug, debug.msg_enable, int, 0);
356 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
357 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
358 MODULE_VERSION(DRV_VERSION);
359 MODULE_LICENSE("GPL");
361 static const u32 sis190_intr_mask =
362 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
365 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
366 * The chips use a 64 element hash table based on the Ethernet CRC.
368 static const int multicast_filter_limit = 32;
370 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
374 SIS_W32(GMIIControl, ctl);
378 for (i = 0; i < 100; i++) {
379 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
385 printk(KERN_ERR PFX "PHY command failed !\n");
388 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
390 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
391 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
392 (((u32) val) << EhnMIIdataShift));
395 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
397 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
398 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
400 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
403 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
405 struct sis190_private *tp = netdev_priv(dev);
407 mdio_write(tp->mmio_addr, phy_id, reg, val);
410 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
412 struct sis190_private *tp = netdev_priv(dev);
414 return mdio_read(tp->mmio_addr, phy_id, reg);
417 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
419 mdio_read(ioaddr, phy_id, reg);
420 return mdio_read(ioaddr, phy_id, reg);
423 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
428 if (!(SIS_R32(ROMControl) & 0x0002))
431 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
433 for (i = 0; i < 200; i++) {
434 if (!(SIS_R32(ROMInterface) & EEREQ)) {
435 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
444 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
446 SIS_W32(IntrMask, 0x00);
447 SIS_W32(IntrStatus, 0xffffffff);
451 static void sis190_asic_down(void __iomem *ioaddr)
453 /* Stop the chip's Tx and Rx DMA processes. */
455 SIS_W32(TxControl, 0x1a00);
456 SIS_W32(RxControl, 0x1a00);
458 sis190_irq_mask_and_ack(ioaddr);
461 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
463 desc->size |= cpu_to_le32(RingEnd);
466 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
468 u32 eor = le32_to_cpu(desc->size) & RingEnd;
471 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
473 desc->status = cpu_to_le32(OWNbit | INTbit);
476 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
479 desc->addr = cpu_to_le32(mapping);
480 sis190_give_to_asic(desc, rx_buf_sz);
483 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
486 desc->addr = cpu_to_le32(0xdeadbeef);
487 desc->size &= cpu_to_le32(RingEnd);
492 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
495 u32 rx_buf_sz = tp->rx_buf_sz;
498 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
502 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
504 sis190_map_to_asic(desc, mapping, rx_buf_sz);
506 sis190_make_unusable_by_asic(desc);
511 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
516 for (cur = start; cur < end; cur++) {
517 unsigned int i = cur % NUM_RX_DESC;
519 if (tp->Rx_skbuff[i])
522 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
524 if (!tp->Rx_skbuff[i])
530 static bool sis190_try_rx_copy(struct sis190_private *tp,
531 struct sk_buff **sk_buff, int pkt_size,
537 if (pkt_size >= rx_copybreak)
540 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
544 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
546 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
553 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
555 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
557 if ((status & CRCOK) && !(status & ErrMask))
560 if (!(status & CRCOK))
561 stats->rx_crc_errors++;
562 else if (status & OVRUN)
563 stats->rx_over_errors++;
564 else if (status & (SHORT | LIMIT))
565 stats->rx_length_errors++;
566 else if (status & (MIIER | NIBON | COLON))
567 stats->rx_frame_errors++;
573 static int sis190_rx_interrupt(struct net_device *dev,
574 struct sis190_private *tp, void __iomem *ioaddr)
576 struct net_device_stats *stats = &dev->stats;
577 u32 rx_left, cur_rx = tp->cur_rx;
580 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
581 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
583 for (; rx_left > 0; rx_left--, cur_rx++) {
584 unsigned int entry = cur_rx % NUM_RX_DESC;
585 struct RxDesc *desc = tp->RxDescRing + entry;
588 if (le32_to_cpu(desc->status) & OWNbit)
591 status = le32_to_cpu(desc->PSize);
593 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
596 if (sis190_rx_pkt_err(status, stats) < 0)
597 sis190_give_to_asic(desc, tp->rx_buf_sz);
599 struct sk_buff *skb = tp->Rx_skbuff[entry];
600 dma_addr_t addr = le32_to_cpu(desc->addr);
601 int pkt_size = (status & RxSizeMask) - 4;
602 struct pci_dev *pdev = tp->pci_dev;
604 if (unlikely(pkt_size > tp->rx_buf_sz)) {
605 net_intr(tp, KERN_INFO
606 "%s: (frag) status = %08x.\n",
609 stats->rx_length_errors++;
610 sis190_give_to_asic(desc, tp->rx_buf_sz);
615 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
616 pci_dma_sync_single_for_device(pdev, addr,
617 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
618 sis190_give_to_asic(desc, tp->rx_buf_sz);
620 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
622 tp->Rx_skbuff[entry] = NULL;
623 sis190_make_unusable_by_asic(desc);
626 skb_put(skb, pkt_size);
627 skb->protocol = eth_type_trans(skb, dev);
632 stats->rx_bytes += pkt_size;
633 if ((status & BCAST) == MCAST)
637 count = cur_rx - tp->cur_rx;
640 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
641 if (!delta && count && netif_msg_intr(tp))
642 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
643 tp->dirty_rx += delta;
645 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
646 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
651 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
656 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
658 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
660 memset(desc, 0x00, sizeof(*desc));
663 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
665 #define TxErrMask (WND | TABRT | FIFO | LINK)
667 if (!unlikely(status & TxErrMask))
671 stats->tx_window_errors++;
673 stats->tx_aborted_errors++;
675 stats->tx_fifo_errors++;
677 stats->tx_carrier_errors++;
684 static void sis190_tx_interrupt(struct net_device *dev,
685 struct sis190_private *tp, void __iomem *ioaddr)
687 struct net_device_stats *stats = &dev->stats;
688 u32 pending, dirty_tx = tp->dirty_tx;
690 * It would not be needed if queueing was allowed to be enabled
691 * again too early (hint: think preempt and unclocked smp systems).
693 unsigned int queue_stopped;
696 pending = tp->cur_tx - dirty_tx;
697 queue_stopped = (pending == NUM_TX_DESC);
699 for (; pending; pending--, dirty_tx++) {
700 unsigned int entry = dirty_tx % NUM_TX_DESC;
701 struct TxDesc *txd = tp->TxDescRing + entry;
702 u32 status = le32_to_cpu(txd->status);
708 skb = tp->Tx_skbuff[entry];
710 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
712 stats->tx_bytes += skb->len;
713 stats->collisions += ((status & ColCountMask) - 1);
716 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
717 tp->Tx_skbuff[entry] = NULL;
718 dev_kfree_skb_irq(skb);
721 if (tp->dirty_tx != dirty_tx) {
722 tp->dirty_tx = dirty_tx;
725 netif_wake_queue(dev);
730 * The interrupt handler does all of the Rx thread work and cleans up after
733 static irqreturn_t sis190_interrupt(int irq, void *__dev)
735 struct net_device *dev = __dev;
736 struct sis190_private *tp = netdev_priv(dev);
737 void __iomem *ioaddr = tp->mmio_addr;
738 unsigned int handled = 0;
741 status = SIS_R32(IntrStatus);
743 if ((status == 0xffffffff) || !status)
748 if (unlikely(!netif_running(dev))) {
749 sis190_asic_down(ioaddr);
753 SIS_W32(IntrStatus, status);
755 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
757 if (status & LinkChange) {
758 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
759 schedule_work(&tp->phy_task);
763 sis190_rx_interrupt(dev, tp, ioaddr);
765 if (status & TxQ0Int)
766 sis190_tx_interrupt(dev, tp, ioaddr);
768 return IRQ_RETVAL(handled);
771 #ifdef CONFIG_NET_POLL_CONTROLLER
772 static void sis190_netpoll(struct net_device *dev)
774 struct sis190_private *tp = netdev_priv(dev);
775 struct pci_dev *pdev = tp->pci_dev;
777 disable_irq(pdev->irq);
778 sis190_interrupt(pdev->irq, dev);
779 enable_irq(pdev->irq);
783 static void sis190_free_rx_skb(struct sis190_private *tp,
784 struct sk_buff **sk_buff, struct RxDesc *desc)
786 struct pci_dev *pdev = tp->pci_dev;
788 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
790 dev_kfree_skb(*sk_buff);
792 sis190_make_unusable_by_asic(desc);
795 static void sis190_rx_clear(struct sis190_private *tp)
799 for (i = 0; i < NUM_RX_DESC; i++) {
800 if (!tp->Rx_skbuff[i])
802 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
806 static void sis190_init_ring_indexes(struct sis190_private *tp)
808 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
811 static int sis190_init_ring(struct net_device *dev)
813 struct sis190_private *tp = netdev_priv(dev);
815 sis190_init_ring_indexes(tp);
817 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
818 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
820 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
823 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
832 static void sis190_set_rx_mode(struct net_device *dev)
834 struct sis190_private *tp = netdev_priv(dev);
835 void __iomem *ioaddr = tp->mmio_addr;
837 u32 mc_filter[2]; /* Multicast hash filter */
840 if (dev->flags & IFF_PROMISC) {
842 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
844 mc_filter[1] = mc_filter[0] = 0xffffffff;
845 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
846 (dev->flags & IFF_ALLMULTI)) {
847 /* Too many to filter perfectly -- accept all multicasts. */
848 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
849 mc_filter[1] = mc_filter[0] = 0xffffffff;
851 struct dev_mc_list *mclist;
853 rx_mode = AcceptBroadcast | AcceptMyPhys;
854 mc_filter[1] = mc_filter[0] = 0;
855 netdev_for_each_mc_addr(mclist, dev) {
857 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
858 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
859 rx_mode |= AcceptMulticast;
863 spin_lock_irqsave(&tp->lock, flags);
865 SIS_W16(RxMacControl, rx_mode | 0x2);
866 SIS_W32(RxHashTable, mc_filter[0]);
867 SIS_W32(RxHashTable + 4, mc_filter[1]);
869 spin_unlock_irqrestore(&tp->lock, flags);
872 static void sis190_soft_reset(void __iomem *ioaddr)
874 SIS_W32(IntrControl, 0x8000);
876 SIS_W32(IntrControl, 0x0);
877 sis190_asic_down(ioaddr);
880 static void sis190_hw_start(struct net_device *dev)
882 struct sis190_private *tp = netdev_priv(dev);
883 void __iomem *ioaddr = tp->mmio_addr;
885 sis190_soft_reset(ioaddr);
887 SIS_W32(TxDescStartAddr, tp->tx_dma);
888 SIS_W32(RxDescStartAddr, tp->rx_dma);
890 SIS_W32(IntrStatus, 0xffffffff);
891 SIS_W32(IntrMask, 0x0);
892 SIS_W32(GMIIControl, 0x0);
893 SIS_W32(TxMacControl, 0x60);
894 SIS_W16(RxMacControl, 0x02);
895 SIS_W32(RxHashTable, 0x0);
897 SIS_W32(RxWolCtrl, 0x0);
898 SIS_W32(RxWolData, 0x0);
902 sis190_set_rx_mode(dev);
904 /* Enable all known interrupts by setting the interrupt mask. */
905 SIS_W32(IntrMask, sis190_intr_mask);
907 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
908 SIS_W32(RxControl, 0x1a1d);
910 netif_start_queue(dev);
913 static void sis190_phy_task(struct work_struct *work)
915 struct sis190_private *tp =
916 container_of(work, struct sis190_private, phy_task);
917 struct net_device *dev = tp->dev;
918 void __iomem *ioaddr = tp->mmio_addr;
919 int phy_id = tp->mii_if.phy_id;
924 if (!netif_running(dev))
927 val = mdio_read(ioaddr, phy_id, MII_BMCR);
928 if (val & BMCR_RESET) {
929 // FIXME: needlessly high ? -- FR 02/07/2005
930 mod_timer(&tp->timer, jiffies + HZ/10);
931 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
932 BMSR_ANEGCOMPLETE)) {
933 netif_carrier_off(dev);
934 net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
936 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
944 { LPA_1000FULL, 0x07000c00 | 0x00001000,
945 "1000 Mbps Full Duplex" },
946 { LPA_1000HALF, 0x07000c00,
947 "1000 Mbps Half Duplex" },
948 { LPA_100FULL, 0x04000800 | 0x00001000,
949 "100 Mbps Full Duplex" },
950 { LPA_100HALF, 0x04000800,
951 "100 Mbps Half Duplex" },
952 { LPA_10FULL, 0x04000400 | 0x00001000,
953 "10 Mbps Full Duplex" },
954 { LPA_10HALF, 0x04000400,
955 "10 Mbps Half Duplex" },
956 { 0, 0x04000400, "unknown" }
958 u16 adv, autoexp, gigadv, gigrec;
960 val = mdio_read(ioaddr, phy_id, 0x1f);
961 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
963 val = mdio_read(ioaddr, phy_id, MII_LPA);
964 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
965 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
966 net_link(tp, KERN_INFO "%s: mii lpa=%04x adv=%04x exp=%04x.\n",
967 dev->name, val, adv, autoexp);
969 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
970 /* check for gigabit speed */
971 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
972 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
973 val = (gigadv & (gigrec >> 2));
974 if (val & ADVERTISE_1000FULL)
976 else if (val & ADVERTISE_1000HALF)
982 for (p = reg31; p->val; p++) {
983 if ((val & p->val) == p->val)
988 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
990 if ((tp->features & F_HAS_RGMII) &&
991 (tp->features & F_PHY_BCM5461)) {
992 // Set Tx Delay in RGMII mode.
993 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
995 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
996 p->ctl |= 0x03000000;
999 SIS_W32(StationControl, p->ctl);
1001 if (tp->features & F_HAS_RGMII) {
1002 SIS_W32(RGDelay, 0x0441);
1003 SIS_W32(RGDelay, 0x0440);
1006 tp->negotiated_lpa = p->val;
1008 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
1010 netif_carrier_on(dev);
1017 static void sis190_phy_timer(unsigned long __opaque)
1019 struct net_device *dev = (struct net_device *)__opaque;
1020 struct sis190_private *tp = netdev_priv(dev);
1022 if (likely(netif_running(dev)))
1023 schedule_work(&tp->phy_task);
1026 static inline void sis190_delete_timer(struct net_device *dev)
1028 struct sis190_private *tp = netdev_priv(dev);
1030 del_timer_sync(&tp->timer);
1033 static inline void sis190_request_timer(struct net_device *dev)
1035 struct sis190_private *tp = netdev_priv(dev);
1036 struct timer_list *timer = &tp->timer;
1039 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1040 timer->data = (unsigned long)dev;
1041 timer->function = sis190_phy_timer;
1045 static void sis190_set_rxbufsize(struct sis190_private *tp,
1046 struct net_device *dev)
1048 unsigned int mtu = dev->mtu;
1050 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1051 /* RxDesc->size has a licence to kill the lower bits */
1052 if (tp->rx_buf_sz & 0x07) {
1054 tp->rx_buf_sz &= RX_BUF_MASK;
1058 static int sis190_open(struct net_device *dev)
1060 struct sis190_private *tp = netdev_priv(dev);
1061 struct pci_dev *pdev = tp->pci_dev;
1064 sis190_set_rxbufsize(tp, dev);
1067 * Rx and Tx descriptors need 256 bytes alignment.
1068 * pci_alloc_consistent() guarantees a stronger alignment.
1070 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1071 if (!tp->TxDescRing)
1074 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1075 if (!tp->RxDescRing)
1078 rc = sis190_init_ring(dev);
1082 sis190_request_timer(dev);
1084 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1086 goto err_release_timer_2;
1088 sis190_hw_start(dev);
1092 err_release_timer_2:
1093 sis190_delete_timer(dev);
1094 sis190_rx_clear(tp);
1096 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1099 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1104 static void sis190_tx_clear(struct sis190_private *tp)
1108 for (i = 0; i < NUM_TX_DESC; i++) {
1109 struct sk_buff *skb = tp->Tx_skbuff[i];
1114 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1115 tp->Tx_skbuff[i] = NULL;
1118 tp->dev->stats.tx_dropped++;
1120 tp->cur_tx = tp->dirty_tx = 0;
1123 static void sis190_down(struct net_device *dev)
1125 struct sis190_private *tp = netdev_priv(dev);
1126 void __iomem *ioaddr = tp->mmio_addr;
1127 unsigned int poll_locked = 0;
1129 sis190_delete_timer(dev);
1131 netif_stop_queue(dev);
1134 spin_lock_irq(&tp->lock);
1136 sis190_asic_down(ioaddr);
1138 spin_unlock_irq(&tp->lock);
1140 synchronize_irq(dev->irq);
1145 synchronize_sched();
1147 } while (SIS_R32(IntrMask));
1149 sis190_tx_clear(tp);
1150 sis190_rx_clear(tp);
1153 static int sis190_close(struct net_device *dev)
1155 struct sis190_private *tp = netdev_priv(dev);
1156 struct pci_dev *pdev = tp->pci_dev;
1160 free_irq(dev->irq, dev);
1162 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1163 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1165 tp->TxDescRing = NULL;
1166 tp->RxDescRing = NULL;
1171 static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1172 struct net_device *dev)
1174 struct sis190_private *tp = netdev_priv(dev);
1175 void __iomem *ioaddr = tp->mmio_addr;
1176 u32 len, entry, dirty_tx;
1177 struct TxDesc *desc;
1180 if (unlikely(skb->len < ETH_ZLEN)) {
1181 if (skb_padto(skb, ETH_ZLEN)) {
1182 dev->stats.tx_dropped++;
1190 entry = tp->cur_tx % NUM_TX_DESC;
1191 desc = tp->TxDescRing + entry;
1193 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1194 netif_stop_queue(dev);
1195 net_tx_err(tp, KERN_ERR PFX
1196 "%s: BUG! Tx Ring full when queue awake!\n",
1198 return NETDEV_TX_BUSY;
1201 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1203 tp->Tx_skbuff[entry] = skb;
1205 desc->PSize = cpu_to_le32(len);
1206 desc->addr = cpu_to_le32(mapping);
1208 desc->size = cpu_to_le32(len);
1209 if (entry == (NUM_TX_DESC - 1))
1210 desc->size |= cpu_to_le32(RingEnd);
1214 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1215 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1217 desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1218 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1219 desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1226 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1228 dirty_tx = tp->dirty_tx;
1229 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1230 netif_stop_queue(dev);
1232 if (dirty_tx != tp->dirty_tx)
1233 netif_wake_queue(dev);
1236 return NETDEV_TX_OK;
1239 static void sis190_free_phy(struct list_head *first_phy)
1241 struct sis190_phy *cur, *next;
1243 list_for_each_entry_safe(cur, next, first_phy, list) {
1249 * sis190_default_phy - Select default PHY for sis190 mac.
1250 * @dev: the net device to probe for
1252 * Select first detected PHY with link as default.
1253 * If no one is link on, select PHY whose types is HOME as default.
1254 * If HOME doesn't exist, select LAN.
1256 static u16 sis190_default_phy(struct net_device *dev)
1258 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1259 struct sis190_private *tp = netdev_priv(dev);
1260 struct mii_if_info *mii_if = &tp->mii_if;
1261 void __iomem *ioaddr = tp->mmio_addr;
1264 phy_home = phy_default = phy_lan = NULL;
1266 list_for_each_entry(phy, &tp->first_phy, list) {
1267 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1269 // Link ON & Not select default PHY & not ghost PHY.
1270 if ((status & BMSR_LSTATUS) &&
1272 (phy->type != UNKNOWN)) {
1275 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1276 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1277 status | BMCR_ANENABLE | BMCR_ISOLATE);
1278 if (phy->type == HOME)
1280 else if (phy->type == LAN)
1287 phy_default = phy_home;
1289 phy_default = phy_lan;
1291 phy_default = list_first_entry(&tp->first_phy,
1292 struct sis190_phy, list);
1295 if (mii_if->phy_id != phy_default->phy_id) {
1296 mii_if->phy_id = phy_default->phy_id;
1297 net_probe(tp, KERN_INFO
1298 "%s: Using transceiver at address %d as default.\n",
1299 pci_name(tp->pci_dev), mii_if->phy_id);
1302 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1303 status &= (~BMCR_ISOLATE);
1305 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1306 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1311 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1312 struct sis190_phy *phy, unsigned int phy_id,
1315 void __iomem *ioaddr = tp->mmio_addr;
1316 struct mii_chip_info *p;
1318 INIT_LIST_HEAD(&phy->list);
1319 phy->status = mii_status;
1320 phy->phy_id = phy_id;
1322 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1323 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1325 for (p = mii_chip_table; p->type; p++) {
1326 if ((p->id[0] == phy->id[0]) &&
1327 (p->id[1] == (phy->id[1] & 0xfff0))) {
1333 phy->type = (p->type == MIX) ?
1334 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1335 LAN : HOME) : p->type;
1336 tp->features |= p->feature;
1337 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1338 pci_name(tp->pci_dev), p->name, phy_id);
1340 phy->type = UNKNOWN;
1341 net_probe(tp, KERN_INFO
1342 "%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1343 pci_name(tp->pci_dev),
1344 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1348 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1350 if (tp->features & F_PHY_88E1111) {
1351 void __iomem *ioaddr = tp->mmio_addr;
1352 int phy_id = tp->mii_if.phy_id;
1358 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1360 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1362 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1368 * sis190_mii_probe - Probe MII PHY for sis190
1369 * @dev: the net device to probe for
1371 * Search for total of 32 possible mii phy addresses.
1372 * Identify and set current phy if found one,
1373 * return error if it failed to found.
1375 static int __devinit sis190_mii_probe(struct net_device *dev)
1377 struct sis190_private *tp = netdev_priv(dev);
1378 struct mii_if_info *mii_if = &tp->mii_if;
1379 void __iomem *ioaddr = tp->mmio_addr;
1383 INIT_LIST_HEAD(&tp->first_phy);
1385 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1386 struct sis190_phy *phy;
1389 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1391 // Try next mii if the current one is not accessible.
1392 if (status == 0xffff || status == 0x0000)
1395 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1397 sis190_free_phy(&tp->first_phy);
1402 sis190_init_phy(dev, tp, phy, phy_id, status);
1404 list_add(&tp->first_phy, &phy->list);
1407 if (list_empty(&tp->first_phy)) {
1408 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1409 pci_name(tp->pci_dev));
1414 /* Select default PHY for mac */
1415 sis190_default_phy(dev);
1417 sis190_mii_probe_88e1111_fixup(tp);
1420 mii_if->mdio_read = __mdio_read;
1421 mii_if->mdio_write = __mdio_write;
1422 mii_if->phy_id_mask = PHY_ID_ANY;
1423 mii_if->reg_num_mask = MII_REG_ANY;
1428 static void sis190_mii_remove(struct net_device *dev)
1430 struct sis190_private *tp = netdev_priv(dev);
1432 sis190_free_phy(&tp->first_phy);
1435 static void sis190_release_board(struct pci_dev *pdev)
1437 struct net_device *dev = pci_get_drvdata(pdev);
1438 struct sis190_private *tp = netdev_priv(dev);
1440 iounmap(tp->mmio_addr);
1441 pci_release_regions(pdev);
1442 pci_disable_device(pdev);
1446 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1448 struct sis190_private *tp;
1449 struct net_device *dev;
1450 void __iomem *ioaddr;
1453 dev = alloc_etherdev(sizeof(*tp));
1455 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1460 SET_NETDEV_DEV(dev, &pdev->dev);
1462 tp = netdev_priv(dev);
1464 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1466 rc = pci_enable_device(pdev);
1468 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1469 goto err_free_dev_1;
1474 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1475 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1477 goto err_pci_disable_2;
1479 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1480 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1482 goto err_pci_disable_2;
1485 rc = pci_request_regions(pdev, DRV_NAME);
1487 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1489 goto err_pci_disable_2;
1492 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1494 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1496 goto err_free_res_3;
1499 pci_set_master(pdev);
1501 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1503 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1506 goto err_free_res_3;
1510 tp->mmio_addr = ioaddr;
1512 sis190_irq_mask_and_ack(ioaddr);
1514 sis190_soft_reset(ioaddr);
1519 pci_release_regions(pdev);
1521 pci_disable_device(pdev);
1529 static void sis190_tx_timeout(struct net_device *dev)
1531 struct sis190_private *tp = netdev_priv(dev);
1532 void __iomem *ioaddr = tp->mmio_addr;
1535 /* Disable Tx, if not already */
1536 tmp8 = SIS_R8(TxControl);
1537 if (tmp8 & CmdTxEnb)
1538 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1541 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1542 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1544 /* Disable interrupts by clearing the interrupt mask. */
1545 SIS_W32(IntrMask, 0x0000);
1547 /* Stop a shared interrupt from scavenging while we are. */
1548 spin_lock_irq(&tp->lock);
1549 sis190_tx_clear(tp);
1550 spin_unlock_irq(&tp->lock);
1552 /* ...and finally, reset everything. */
1553 sis190_hw_start(dev);
1555 netif_wake_queue(dev);
1558 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1560 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1563 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1564 struct net_device *dev)
1566 struct sis190_private *tp = netdev_priv(dev);
1567 void __iomem *ioaddr = tp->mmio_addr;
1571 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1574 /* Check to see if there is a sane EEPROM */
1575 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1577 if ((sig == 0xffff) || (sig == 0x0000)) {
1578 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1579 pci_name(pdev), sig);
1583 /* Get MAC address from EEPROM */
1584 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1585 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1587 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1590 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1596 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1598 * @dev: network device to get address for
1600 * SiS96x model, use APC CMOS RAM to store MAC address.
1601 * APC CMOS RAM is accessed through ISA bridge.
1602 * MAC address is read into @net_dev->dev_addr.
1604 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1605 struct net_device *dev)
1607 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1608 struct sis190_private *tp = netdev_priv(dev);
1609 struct pci_dev *isa_bridge;
1613 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1616 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1617 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1623 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1628 /* Enable port 78h & 79h to access APC Registers. */
1629 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1630 reg = (tmp8 & ~0x02);
1631 pci_write_config_byte(isa_bridge, 0x48, reg);
1633 pci_read_config_byte(isa_bridge, 0x48, ®);
1635 for (i = 0; i < MAC_ADDR_LEN; i++) {
1636 outb(0x9 + i, 0x78);
1637 dev->dev_addr[i] = inb(0x79);
1643 sis190_set_rgmii(tp, reg);
1645 /* Restore the value to ISA Bridge */
1646 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1647 pci_dev_put(isa_bridge);
1653 * sis190_init_rxfilter - Initialize the Rx filter
1654 * @dev: network device to initialize
1656 * Set receive filter address to our MAC address
1657 * and enable packet filtering.
1659 static inline void sis190_init_rxfilter(struct net_device *dev)
1661 struct sis190_private *tp = netdev_priv(dev);
1662 void __iomem *ioaddr = tp->mmio_addr;
1666 ctl = SIS_R16(RxMacControl);
1668 * Disable packet filtering before setting filter.
1669 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1670 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1672 SIS_W16(RxMacControl, ctl & ~0x0f00);
1674 for (i = 0; i < MAC_ADDR_LEN; i++)
1675 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1677 SIS_W16(RxMacControl, ctl);
1681 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1682 struct net_device *dev)
1686 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1690 pci_read_config_byte(pdev, 0x73, ®);
1692 if (reg & 0x00000001)
1693 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1698 static void sis190_set_speed_auto(struct net_device *dev)
1700 struct sis190_private *tp = netdev_priv(dev);
1701 void __iomem *ioaddr = tp->mmio_addr;
1702 int phy_id = tp->mii_if.phy_id;
1705 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1707 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1709 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1711 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1712 ADVERTISE_100FULL | ADVERTISE_10FULL |
1713 ADVERTISE_100HALF | ADVERTISE_10HALF);
1715 // Enable 1000 Full Mode.
1716 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1718 // Enable auto-negotiation and restart auto-negotiation.
1719 mdio_write(ioaddr, phy_id, MII_BMCR,
1720 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1723 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1725 struct sis190_private *tp = netdev_priv(dev);
1727 return mii_ethtool_gset(&tp->mii_if, cmd);
1730 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1732 struct sis190_private *tp = netdev_priv(dev);
1734 return mii_ethtool_sset(&tp->mii_if, cmd);
1737 static void sis190_get_drvinfo(struct net_device *dev,
1738 struct ethtool_drvinfo *info)
1740 struct sis190_private *tp = netdev_priv(dev);
1742 strcpy(info->driver, DRV_NAME);
1743 strcpy(info->version, DRV_VERSION);
1744 strcpy(info->bus_info, pci_name(tp->pci_dev));
1747 static int sis190_get_regs_len(struct net_device *dev)
1749 return SIS190_REGS_SIZE;
1752 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1755 struct sis190_private *tp = netdev_priv(dev);
1756 unsigned long flags;
1758 if (regs->len > SIS190_REGS_SIZE)
1759 regs->len = SIS190_REGS_SIZE;
1761 spin_lock_irqsave(&tp->lock, flags);
1762 memcpy_fromio(p, tp->mmio_addr, regs->len);
1763 spin_unlock_irqrestore(&tp->lock, flags);
1766 static int sis190_nway_reset(struct net_device *dev)
1768 struct sis190_private *tp = netdev_priv(dev);
1770 return mii_nway_restart(&tp->mii_if);
1773 static u32 sis190_get_msglevel(struct net_device *dev)
1775 struct sis190_private *tp = netdev_priv(dev);
1777 return tp->msg_enable;
1780 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1782 struct sis190_private *tp = netdev_priv(dev);
1784 tp->msg_enable = value;
1787 static const struct ethtool_ops sis190_ethtool_ops = {
1788 .get_settings = sis190_get_settings,
1789 .set_settings = sis190_set_settings,
1790 .get_drvinfo = sis190_get_drvinfo,
1791 .get_regs_len = sis190_get_regs_len,
1792 .get_regs = sis190_get_regs,
1793 .get_link = ethtool_op_get_link,
1794 .get_msglevel = sis190_get_msglevel,
1795 .set_msglevel = sis190_set_msglevel,
1796 .nway_reset = sis190_nway_reset,
1799 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1801 struct sis190_private *tp = netdev_priv(dev);
1803 return !netif_running(dev) ? -EINVAL :
1804 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1807 static const struct net_device_ops sis190_netdev_ops = {
1808 .ndo_open = sis190_open,
1809 .ndo_stop = sis190_close,
1810 .ndo_do_ioctl = sis190_ioctl,
1811 .ndo_start_xmit = sis190_start_xmit,
1812 .ndo_tx_timeout = sis190_tx_timeout,
1813 .ndo_set_multicast_list = sis190_set_rx_mode,
1814 .ndo_change_mtu = eth_change_mtu,
1815 .ndo_set_mac_address = eth_mac_addr,
1816 .ndo_validate_addr = eth_validate_addr,
1817 #ifdef CONFIG_NET_POLL_CONTROLLER
1818 .ndo_poll_controller = sis190_netpoll,
1822 static int __devinit sis190_init_one(struct pci_dev *pdev,
1823 const struct pci_device_id *ent)
1825 static int printed_version = 0;
1826 struct sis190_private *tp;
1827 struct net_device *dev;
1828 void __iomem *ioaddr;
1831 if (!printed_version) {
1832 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1833 printed_version = 1;
1836 dev = sis190_init_board(pdev);
1842 pci_set_drvdata(pdev, dev);
1844 tp = netdev_priv(dev);
1845 ioaddr = tp->mmio_addr;
1847 rc = sis190_get_mac_addr(pdev, dev);
1849 goto err_release_board;
1851 sis190_init_rxfilter(dev);
1853 INIT_WORK(&tp->phy_task, sis190_phy_task);
1855 dev->netdev_ops = &sis190_netdev_ops;
1857 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1858 dev->irq = pdev->irq;
1859 dev->base_addr = (unsigned long) 0xdead;
1860 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1862 spin_lock_init(&tp->lock);
1864 rc = sis190_mii_probe(dev);
1866 goto err_release_board;
1868 rc = register_netdev(dev);
1870 goto err_remove_mii;
1872 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n",
1873 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1874 ioaddr, dev->irq, dev->dev_addr);
1876 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1877 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1879 netif_carrier_off(dev);
1881 sis190_set_speed_auto(dev);
1886 sis190_mii_remove(dev);
1888 sis190_release_board(pdev);
1892 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1894 struct net_device *dev = pci_get_drvdata(pdev);
1896 sis190_mii_remove(dev);
1897 flush_scheduled_work();
1898 unregister_netdev(dev);
1899 sis190_release_board(pdev);
1900 pci_set_drvdata(pdev, NULL);
1903 static struct pci_driver sis190_pci_driver = {
1905 .id_table = sis190_pci_tbl,
1906 .probe = sis190_init_one,
1907 .remove = __devexit_p(sis190_remove_one),
1910 static int __init sis190_init_module(void)
1912 return pci_register_driver(&sis190_pci_driver);
1915 static void __exit sis190_cleanup_module(void)
1917 pci_unregister_driver(&sis190_pci_driver);
1920 module_init(sis190_init_module);
1921 module_exit(sis190_cleanup_module);