X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2F8139cp.c;h=9c149750e2bf9754518941e9233e94800d45b889;hb=fb6cb3270af8f2ad3dd556906a9c52aa85754849;hp=ca4c9ac7e11550689f37bfa990ad9fead086dec1;hpb=8662d061719a202e8196a19c1043ce271318d31b;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index ca4c9ac..9c14975 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -19,14 +19,13 @@ See the file COPYING in this distribution for more information. Contributors: - + Wake-on-LAN support - Felipe Damasio PCI suspend/resume - Felipe Damasio LinkChg interrupt - Felipe Damasio - + TODO: * Test Tx checksumming thoroughly - * Implement dev->tx_timeout Low priority TODO: * Complete reset on PciErr @@ -47,13 +46,15 @@ */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #define DRV_NAME "8139cp" -#define DRV_VERSION "1.2" +#define DRV_VERSION "1.3" #define DRV_RELDATE "Mar 22, 2004" -#include #include +#include #include #include #include @@ -63,6 +64,7 @@ #include #include #include +#include #include #include #include @@ -79,7 +81,7 @@ #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define CP_VLAN_TAG_USED 1 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \ - do { (tx_desc)->opts2 = (vlan_tag_value); } while (0) + do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0) #else #define CP_VLAN_TAG_USED 0 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \ @@ -88,29 +90,23 @@ /* These identify the driver base version and may not be removed. */ static char version[] = -KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; +DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; MODULE_AUTHOR("Jeff Garzik "); MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); +MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); static int debug = -1; -MODULE_PARM (debug, "i"); +module_param(debug, int, 0); MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static int multicast_filter_limit = 32; -MODULE_PARM (multicast_filter_limit, "i"); +module_param(multicast_filter_limit, int, 0); MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); -#define PFX DRV_NAME ": " - -#ifndef TRUE -#define FALSE 0 -#define TRUE (!FALSE) -#endif - #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK) @@ -132,7 +128,6 @@ MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered mu (CP)->tx_tail - (CP)->tx_head - 1) #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ -#define RX_OFFSET 2 #define CP_INTERNAL_PHY 32 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ @@ -187,6 +182,9 @@ enum { RingEnd = (1 << 30), /* End of descriptor ring */ FirstFrag = (1 << 29), /* First segment of a packet */ LastFrag = (1 << 28), /* Final segment of a packet */ + LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */ + MSSShift = 16, /* MSS value position */ + MSSMask = 0xfff, /* MSS value: 11 bits */ TxError = (1 << 23), /* Tx error summary */ RxError = (1 << 20), /* Rx error summary */ IPCS = (1 << 18), /* Calculate IP checksum */ @@ -305,31 +303,25 @@ static const unsigned int cp_rx_config = (RX_DMA_BURST << RxCfgDMAShift); struct cp_desc { - u32 opts1; - u32 opts2; - u64 addr; -}; - -struct ring_info { - struct sk_buff *skb; - dma_addr_t mapping; - unsigned frag; + __le32 opts1; + __le32 opts2; + __le64 addr; }; struct cp_dma_stats { - u64 tx_ok; - u64 rx_ok; - u64 tx_err; - u32 rx_err; - u16 rx_fifo; - u16 frame_align; - u32 tx_ok_1col; - u32 tx_ok_mcol; - u64 rx_ok_phys; - u64 rx_ok_bcast; - u32 rx_ok_mcast; - u16 tx_abort; - u16 tx_underrun; + __le64 tx_ok; + __le64 rx_ok; + __le64 tx_err; + __le32 rx_err; + __le16 rx_fifo; + __le16 frame_align; + __le32 tx_ok_1col; + __le32 tx_ok_mcol; + __le64 rx_ok_phys; + __le64 rx_ok_bcast; + __le32 rx_ok_mcast; + __le16 tx_abort; + __le16 tx_underrun; } __attribute__((packed)); struct cp_extra_stats { @@ -342,32 +334,31 @@ struct cp_private { spinlock_t lock; u32 msg_enable; + struct napi_struct napi; + struct pci_dev *pdev; u32 rx_config; u16 cpcmd; - struct net_device_stats net_stats; struct cp_extra_stats cp_stats; - struct cp_dma_stats *nic_stats; - dma_addr_t nic_stats_dma; - unsigned rx_tail ____cacheline_aligned; + unsigned rx_head ____cacheline_aligned; + unsigned rx_tail; struct cp_desc *rx_ring; - struct ring_info rx_skb[CP_RX_RING_SIZE]; - unsigned rx_buf_sz; + struct sk_buff *rx_skb[CP_RX_RING_SIZE]; unsigned tx_head ____cacheline_aligned; unsigned tx_tail; - struct cp_desc *tx_ring; - struct ring_info tx_skb[CP_TX_RING_SIZE]; - dma_addr_t ring_dma; + struct sk_buff *tx_skb[CP_TX_RING_SIZE]; + + unsigned rx_buf_sz; + unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ #if CP_VLAN_TAG_USED struct vlan_group *vlgrp; #endif - - unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */ + dma_addr_t ring_dma; struct mii_if_info mii_if; }; @@ -395,12 +386,18 @@ struct cp_private { static void __cp_set_rx_mode (struct net_device *dev); static void cp_tx (struct cp_private *cp); static void cp_clean_rings (struct cp_private *cp); - -static struct pci_device_id cp_pci_tbl[] = { - { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, - { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cp_poll_controller(struct net_device *dev); +#endif +static int cp_get_eeprom_len(struct net_device *dev); +static int cp_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data); +static int cp_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data); + +static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, + { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, { }, }; MODULE_DEVICE_TABLE(pci, cp_pci_tbl); @@ -433,21 +430,12 @@ static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) spin_lock_irqsave(&cp->lock, flags); cp->vlgrp = grp; - cp->cpcmd |= RxVlanOn; - cpw16(CpCmd, cp->cpcmd); - spin_unlock_irqrestore(&cp->lock, flags); -} - -static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) -{ - struct cp_private *cp = netdev_priv(dev); - unsigned long flags; + if (grp) + cp->cpcmd |= RxVlanOn; + else + cp->cpcmd &= ~RxVlanOn; - spin_lock_irqsave(&cp->lock, flags); - cp->cpcmd &= ~RxVlanOn; cpw16(CpCmd, cp->cpcmd); - if (cp->vlgrp) - cp->vlgrp->vlan_devices[vid] = NULL; spin_unlock_irqrestore(&cp->lock, flags); } #endif /* CP_VLAN_TAG_USED */ @@ -455,7 +443,7 @@ static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static inline void cp_set_rxbufsize (struct cp_private *cp) { unsigned int mtu = cp->dev->mtu; - + if (mtu > ETH_DATA_LEN) /* MTU + ethernet header + FCS + optional VLAN tag */ cp->rx_buf_sz = mtu + ETH_HLEN + 8; @@ -468,14 +456,13 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, { skb->protocol = eth_type_trans (skb, cp->dev); - cp->net_stats.rx_packets++; - cp->net_stats.rx_bytes += skb->len; - cp->dev->last_rx = jiffies; + cp->dev->stats.rx_packets++; + cp->dev->stats.rx_bytes += skb->len; #if CP_VLAN_TAG_USED - if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) { + if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) { vlan_hwaccel_receive_skb(skb, cp->vlgrp, - be16_to_cpu(desc->opts2 & 0xffff)); + swab16(le32_to_cpu(desc->opts2) & 0xffff)); } else #endif netif_receive_skb(skb); @@ -484,27 +471,25 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, u32 status, u32 len) { - if (netif_msg_rx_err (cp)) - printk (KERN_DEBUG - "%s: rx err, slot %d status 0x%x len %d\n", - cp->dev->name, rx_tail, status, len); - cp->net_stats.rx_errors++; + netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n", + rx_tail, status, len); + cp->dev->stats.rx_errors++; if (status & RxErrFrame) - cp->net_stats.rx_frame_errors++; + cp->dev->stats.rx_frame_errors++; if (status & RxErrCRC) - cp->net_stats.rx_crc_errors++; + cp->dev->stats.rx_crc_errors++; if ((status & RxErrRunt) || (status & RxErrLong)) - cp->net_stats.rx_length_errors++; + cp->dev->stats.rx_length_errors++; if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) - cp->net_stats.rx_length_errors++; + cp->dev->stats.rx_length_errors++; if (status & RxErrFIFO) - cp->net_stats.rx_fifo_errors++; + cp->dev->stats.rx_fifo_errors++; } static inline unsigned int cp_rx_csum_ok (u32 status) { unsigned int protocol = (status >> 16) & 0x3; - + if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) return 1; else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) @@ -514,12 +499,12 @@ static inline unsigned int cp_rx_csum_ok (u32 status) return 0; } -static int cp_rx_poll (struct net_device *dev, int *budget) +static int cp_rx_poll(struct napi_struct *napi, int budget) { - struct cp_private *cp = netdev_priv(dev); - unsigned rx_tail = cp->rx_tail; - unsigned rx_work = dev->quota; - unsigned rx; + struct cp_private *cp = container_of(napi, struct cp_private, napi); + struct net_device *dev = cp->dev; + unsigned int rx_tail = cp->rx_tail; + int rx; rx_status_loop: rx = 0; @@ -530,11 +515,10 @@ rx_status_loop: dma_addr_t mapping; struct sk_buff *skb, *new_skb; struct cp_desc *desc; - unsigned buflen; + const unsigned buflen = cp->rx_buf_sz; - skb = cp->rx_skb[rx_tail].skb; - if (!skb) - BUG(); + skb = cp->rx_skb[rx_tail]; + BUG_ON(!skb); desc = &cp->rx_ring[rx_tail]; status = le32_to_cpu(desc->opts1); @@ -542,7 +526,7 @@ rx_status_loop: break; len = (status & 0x1fff) - 4; - mapping = cp->rx_skb[rx_tail].mapping; + mapping = le64_to_cpu(desc->addr); if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { /* we don't support incoming fragmented frames. @@ -551,7 +535,7 @@ rx_status_loop: * that RX fragments are never encountered */ cp_rx_err_acct(cp, rx_tail, status, len); - cp->net_stats.rx_dropped++; + dev->stats.rx_dropped++; cp->cp_stats.rx_frags++; goto rx_next; } @@ -561,21 +545,16 @@ rx_status_loop: goto rx_next; } - if (netif_msg_rx_status(cp)) - printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n", - cp->dev->name, rx_tail, status, len); + netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", + rx_tail, status, len); - buflen = cp->rx_buf_sz + RX_OFFSET; - new_skb = dev_alloc_skb (buflen); + new_skb = netdev_alloc_skb_ip_align(dev, buflen); if (!new_skb) { - cp->net_stats.rx_dropped++; + dev->stats.rx_dropped++; goto rx_next; } - skb_reserve(new_skb, RX_OFFSET); - new_skb->dev = cp->dev; - - pci_unmap_single(cp->pdev, mapping, + dma_unmap_single(&cp->pdev->dev, mapping, buflen, PCI_DMA_FROMDEVICE); /* Handle checksum offloading for incoming packets. */ @@ -586,11 +565,9 @@ rx_status_loop: skb_put(skb, len); - mapping = - cp->rx_skb[rx_tail].mapping = - pci_map_single(cp->pdev, new_skb->tail, - buflen, PCI_DMA_FROMDEVICE); - cp->rx_skb[rx_tail].skb = new_skb; + mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, + PCI_DMA_FROMDEVICE); + cp->rx_skb[rx_tail] = new_skb; cp_rx_skb(cp, skb, desc); rx++; @@ -605,35 +582,31 @@ rx_next: desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); rx_tail = NEXT_RX(rx_tail); - if (!rx_work--) + if (rx >= budget) break; } cp->rx_tail = rx_tail; - dev->quota -= rx; - *budget -= rx; - /* if we did not reach work limit, then we're done with * this round of polling */ - if (rx_work) { + if (rx < budget) { + unsigned long flags; + if (cpr16(IntrStatus) & cp_rx_intr_mask) goto rx_status_loop; - local_irq_disable(); + spin_lock_irqsave(&cp->lock, flags); cpw16_f(IntrMask, cp_intr_mask); - __netif_rx_complete(dev); - local_irq_enable(); - - return 0; /* done */ + __napi_complete(napi); + spin_unlock_irqrestore(&cp->lock, flags); } - return 1; /* not done */ + return rx; } -static irqreturn_t -cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +static irqreturn_t cp_interrupt (int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct cp_private *cp; @@ -647,9 +620,8 @@ cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs) if (!status || (status == 0xFFFF)) return IRQ_NONE; - if (netif_msg_intr(cp)) - printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n", - dev->name, status, cpr8(Cmd), cpr16(CpCmd)); + netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", + status, cpr8(Cmd), cpr16(CpCmd)); cpw16(IntrStatus, status & ~cp_rx_intr_mask); @@ -663,15 +635,15 @@ cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs) } if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) - if (netif_rx_schedule_prep(dev)) { + if (napi_schedule_prep(&cp->napi)) { cpw16_f(IntrMask, cp_norx_intr_mask); - __netif_rx_schedule(dev); + __napi_schedule(&cp->napi); } if (status & (TxOK | TxErr | TxEmpty | SWInt)) cp_tx(cp); if (status & LinkChg) - mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE); + mii_check_media(&cp->mii_if, netif_msg_link(cp), false); spin_unlock(&cp->lock); @@ -680,8 +652,8 @@ cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs) pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); - printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n", - dev->name, status, pci_status); + netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n", + status, pci_status); /* TODO: reset hardware */ } @@ -689,53 +661,66 @@ cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs) return IRQ_HANDLED; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void cp_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + cp_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + static void cp_tx (struct cp_private *cp) { unsigned tx_head = cp->tx_head; unsigned tx_tail = cp->tx_tail; while (tx_tail != tx_head) { + struct cp_desc *txd = cp->tx_ring + tx_tail; struct sk_buff *skb; u32 status; rmb(); - status = le32_to_cpu(cp->tx_ring[tx_tail].opts1); + status = le32_to_cpu(txd->opts1); if (status & DescOwn) break; - skb = cp->tx_skb[tx_tail].skb; - if (!skb) - BUG(); + skb = cp->tx_skb[tx_tail]; + BUG_ON(!skb); - pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), + le32_to_cpu(txd->opts1) & 0xffff, + PCI_DMA_TODEVICE); if (status & LastFrag) { if (status & (TxError | TxFIFOUnder)) { - if (netif_msg_tx_err(cp)) - printk(KERN_DEBUG "%s: tx err, status 0x%x\n", - cp->dev->name, status); - cp->net_stats.tx_errors++; + netif_dbg(cp, tx_err, cp->dev, + "tx err, status 0x%x\n", status); + cp->dev->stats.tx_errors++; if (status & TxOWC) - cp->net_stats.tx_window_errors++; + cp->dev->stats.tx_window_errors++; if (status & TxMaxCol) - cp->net_stats.tx_aborted_errors++; + cp->dev->stats.tx_aborted_errors++; if (status & TxLinkFail) - cp->net_stats.tx_carrier_errors++; + cp->dev->stats.tx_carrier_errors++; if (status & TxFIFOUnder) - cp->net_stats.tx_fifo_errors++; + cp->dev->stats.tx_fifo_errors++; } else { - cp->net_stats.collisions += + cp->dev->stats.collisions += ((status >> TxColCntShift) & TxColCntMask); - cp->net_stats.tx_packets++; - cp->net_stats.tx_bytes += skb->len; - if (netif_msg_tx_done(cp)) - printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail); + cp->dev->stats.tx_packets++; + cp->dev->stats.tx_bytes += skb->len; + netif_dbg(cp, tx_done, cp->dev, + "tx done, slot %d\n", tx_tail); } dev_kfree_skb_irq(skb); } - cp->tx_skb[tx_tail].skb = NULL; + cp->tx_skb[tx_tail] = NULL; tx_tail = NEXT_TX(tx_tail); } @@ -746,82 +731,83 @@ static void cp_tx (struct cp_private *cp) netif_wake_queue(cp->dev); } -static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t cp_start_xmit (struct sk_buff *skb, + struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); unsigned entry; - u32 eor; + u32 eor, flags; + unsigned long intr_flags; #if CP_VLAN_TAG_USED u32 vlan_tag = 0; #endif + int mss = 0; - spin_lock_irq(&cp->lock); + spin_lock_irqsave(&cp->lock, intr_flags); /* This is a hard error, log it. */ if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { netif_stop_queue(dev); - spin_unlock_irq(&cp->lock); - printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", - dev->name); - return 1; + spin_unlock_irqrestore(&cp->lock, intr_flags); + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + return NETDEV_TX_BUSY; } #if CP_VLAN_TAG_USED if (cp->vlgrp && vlan_tx_tag_present(skb)) - vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb)); + vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb)); #endif entry = cp->tx_head; eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; + if (dev->features & NETIF_F_TSO) + mss = skb_shinfo(skb)->gso_size; + if (skb_shinfo(skb)->nr_frags == 0) { struct cp_desc *txd = &cp->tx_ring[entry]; u32 len; dma_addr_t mapping; len = skb->len; - mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); CP_VLAN_TX_TAG(txd, vlan_tag); txd->addr = cpu_to_le64(mapping); wmb(); - if (skb->ip_summed == CHECKSUM_HW) { - const struct iphdr *ip = skb->nh.iph; + flags = eor | len | DescOwn | FirstFrag | LastFrag; + + if (mss) + flags |= LargeSend | ((mss & MSSMask) << MSSShift); + else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); if (ip->protocol == IPPROTO_TCP) - txd->opts1 = cpu_to_le32(eor | len | DescOwn | - FirstFrag | LastFrag | - IPCS | TCPCS); + flags |= IPCS | TCPCS; else if (ip->protocol == IPPROTO_UDP) - txd->opts1 = cpu_to_le32(eor | len | DescOwn | - FirstFrag | LastFrag | - IPCS | UDPCS); + flags |= IPCS | UDPCS; else - BUG(); - } else - txd->opts1 = cpu_to_le32(eor | len | DescOwn | - FirstFrag | LastFrag); + WARN_ON(1); /* we need a WARN() */ + } + + txd->opts1 = cpu_to_le32(flags); wmb(); - cp->tx_skb[entry].skb = skb; - cp->tx_skb[entry].mapping = mapping; - cp->tx_skb[entry].frag = 0; + cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); } else { struct cp_desc *txd; u32 first_len, first_eor; dma_addr_t first_mapping; int frag, first_entry = entry; - const struct iphdr *ip = skb->nh.iph; + const struct iphdr *ip = ip_hdr(skb); /* We must give this initial chunk to the device last. * Otherwise we could race with the device. */ first_eor = eor; first_len = skb_headlen(skb); - first_mapping = pci_map_single(cp->pdev, skb->data, + first_mapping = dma_map_single(&cp->pdev->dev, skb->data, first_len, PCI_DMA_TODEVICE); - cp->tx_skb[entry].skb = skb; - cp->tx_skb[entry].mapping = first_mapping; - cp->tx_skb[entry].frag = 1; + cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { @@ -831,22 +817,25 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; len = this_frag->size; - mapping = pci_map_single(cp->pdev, + mapping = dma_map_single(&cp->pdev->dev, ((void *) page_address(this_frag->page) + this_frag->page_offset), len, PCI_DMA_TODEVICE); eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; - if (skb->ip_summed == CHECKSUM_HW) { - ctrl = eor | len | DescOwn | IPCS; + ctrl = eor | len | DescOwn; + + if (mss) + ctrl |= LargeSend | + ((mss & MSSMask) << MSSShift); + else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (ip->protocol == IPPROTO_TCP) - ctrl |= TCPCS; + ctrl |= IPCS | TCPCS; else if (ip->protocol == IPPROTO_UDP) - ctrl |= UDPCS; + ctrl |= IPCS | UDPCS; else BUG(); - } else - ctrl = eor | len | DescOwn; + } if (frag == skb_shinfo(skb)->nr_frags - 1) ctrl |= LastFrag; @@ -859,9 +848,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) txd->opts1 = cpu_to_le32(ctrl); wmb(); - cp->tx_skb[entry].skb = skb; - cp->tx_skb[entry].mapping = mapping; - cp->tx_skb[entry].frag = frag + 2; + cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); } @@ -870,7 +857,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) txd->addr = cpu_to_le64(first_mapping); wmb(); - if (skb->ip_summed == CHECKSUM_HW) { + if (skb->ip_summed == CHECKSUM_PARTIAL) { if (ip->protocol == IPPROTO_TCP) txd->opts1 = cpu_to_le32(first_eor | first_len | FirstFrag | DescOwn | @@ -887,18 +874,16 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) wmb(); } cp->tx_head = entry; - if (netif_msg_tx_queued(cp)) - printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", - dev->name, entry, skb->len); + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", + entry, skb->len); if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); - spin_unlock_irq(&cp->lock); + spin_unlock_irqrestore(&cp->lock, intr_flags); cpw8(TxPoll, NormalTxPoll); - dev->trans_start = jiffies; - return 0; + return NETDEV_TX_OK; } /* Set or clear the multicast filter for this adaptor. @@ -908,30 +893,27 @@ static void __cp_set_rx_mode (struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); u32 mc_filter[2]; /* Multicast hash filter */ - int i, rx_mode; + int rx_mode; u32 tmp; /* Note: do not reorder, GCC is clever about common statements. */ if (dev->flags & IFF_PROMISC) { /* Unconditionally log net taps. */ - printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n", - dev->name); rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; - } else if ((dev->mc_count > multicast_filter_limit) - || (dev->flags & IFF_ALLMULTI)) { + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; - i++, mclist = mclist->next) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; @@ -961,7 +943,7 @@ static void cp_set_rx_mode (struct net_device *dev) static void __cp_get_stats(struct cp_private *cp) { /* only lower 24 bits valid; write any value to clear */ - cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); + cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); cpw32 (RxMissed, 0); } @@ -976,7 +958,7 @@ static struct net_device_stats *cp_get_stats(struct net_device *dev) __cp_get_stats(cp); spin_unlock_irqrestore(&cp->lock, flags); - return &cp->net_stats; + return &dev->stats; } static void cp_stop_hw (struct cp_private *cp) @@ -1001,11 +983,10 @@ static void cp_reset_hw (struct cp_private *cp) if (!(cpr8(Cmd) & CmdReset)) return; - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(10); + schedule_timeout_uninterruptible(10); } - printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name); + netdev_err(cp->dev, "hardware reset timeout\n"); } static inline void cp_start_hw (struct cp_private *cp) @@ -1024,8 +1005,8 @@ static void cp_init_hw (struct cp_private *cp) cpw8_f (Cfg9346, Cfg9346_Unlock); /* Restore our idea of the MAC address. */ - cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); - cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); + cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); + cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); cp_start_hw(cp); cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ @@ -1038,7 +1019,7 @@ static void cp_init_hw (struct cp_private *cp) cpw8(Config3, PARMEnable); cp->wol_enabled = 0; - cpw8(Config5, cpr8(Config5) & PMEStatus); + cpw8(Config5, cpr8(Config5) & PMEStatus); cpw32_f(HiTxRingAddr, 0); cpw32_f(HiTxRingAddr + 4, 0); @@ -1058,27 +1039,25 @@ static void cp_init_hw (struct cp_private *cp) cpw8_f(Cfg9346, Cfg9346_Lock); } -static int cp_refill_rx (struct cp_private *cp) +static int cp_refill_rx(struct cp_private *cp) { + struct net_device *dev = cp->dev; unsigned i; for (i = 0; i < CP_RX_RING_SIZE; i++) { struct sk_buff *skb; + dma_addr_t mapping; - skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); + skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); if (!skb) goto err_out; - skb->dev = cp->dev; - skb_reserve(skb, RX_OFFSET); - - cp->rx_skb[i].mapping = pci_map_single(cp->pdev, - skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); - cp->rx_skb[i].skb = skb; - cp->rx_skb[i].frag = 0; + mapping = dma_map_single(&cp->pdev->dev, skb->data, + cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + cp->rx_skb[i] = skb; cp->rx_ring[i].opts2 = 0; - cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); + cp->rx_ring[i].addr = cpu_to_le64(mapping); if (i == (CP_RX_RING_SIZE - 1)) cp->rx_ring[i].opts1 = cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); @@ -1094,13 +1073,18 @@ err_out: return -ENOMEM; } +static void cp_init_rings_index (struct cp_private *cp) +{ + cp->rx_tail = 0; + cp->tx_head = cp->tx_tail = 0; +} + static int cp_init_rings (struct cp_private *cp) { memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); - cp->rx_tail = 0; - cp->tx_head = cp->tx_tail = 0; + cp_init_rings_index(cp); return cp_refill_rx (cp); } @@ -1109,56 +1093,59 @@ static int cp_alloc_rings (struct cp_private *cp) { void *mem; - mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma); + mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES, + &cp->ring_dma, GFP_KERNEL); if (!mem) return -ENOMEM; cp->rx_ring = mem; cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; - mem += (CP_RING_BYTES - CP_STATS_SIZE); - cp->nic_stats = mem; - cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE); - return cp_init_rings(cp); } static void cp_clean_rings (struct cp_private *cp) { + struct cp_desc *desc; unsigned i; - memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); - memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); - for (i = 0; i < CP_RX_RING_SIZE; i++) { - if (cp->rx_skb[i].skb) { - pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, + if (cp->rx_skb[i]) { + desc = cp->rx_ring + i; + dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), cp->rx_buf_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb(cp->rx_skb[i].skb); + dev_kfree_skb(cp->rx_skb[i]); } } for (i = 0; i < CP_TX_RING_SIZE; i++) { - if (cp->tx_skb[i].skb) { - struct sk_buff *skb = cp->tx_skb[i].skb; - pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, - skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb(skb); - cp->net_stats.tx_dropped++; + if (cp->tx_skb[i]) { + struct sk_buff *skb = cp->tx_skb[i]; + + desc = cp->tx_ring + i; + dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), + le32_to_cpu(desc->opts1) & 0xffff, + PCI_DMA_TODEVICE); + if (le32_to_cpu(desc->opts1) & LastFrag) + dev_kfree_skb(skb); + cp->dev->stats.tx_dropped++; } } - memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); - memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); + memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); + memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); + + memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); + memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); } static void cp_free_rings (struct cp_private *cp) { cp_clean_rings(cp); - pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); + dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, + cp->ring_dma); cp->rx_ring = NULL; cp->tx_ring = NULL; - cp->nic_stats = NULL; } static int cp_open (struct net_device *dev) @@ -1166,26 +1153,28 @@ static int cp_open (struct net_device *dev) struct cp_private *cp = netdev_priv(dev); int rc; - if (netif_msg_ifup(cp)) - printk(KERN_DEBUG "%s: enabling interface\n", dev->name); + netif_dbg(cp, ifup, dev, "enabling interface\n"); rc = cp_alloc_rings(cp); if (rc) return rc; + napi_enable(&cp->napi); + cp_init_hw(cp); - rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev); + rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); if (rc) goto err_out_hw; netif_carrier_off(dev); - mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE); + mii_check_media(&cp->mii_if, netif_msg_link(cp), true); netif_start_queue(dev); return 0; err_out_hw: + napi_disable(&cp->napi); cp_stop_hw(cp); cp_free_rings(cp); return rc; @@ -1196,8 +1185,9 @@ static int cp_close (struct net_device *dev) struct cp_private *cp = netdev_priv(dev); unsigned long flags; - if (netif_msg_ifdown(cp)) - printk(KERN_DEBUG "%s: disabling interface\n", dev->name); + napi_disable(&cp->napi); + + netif_dbg(cp, ifdown, dev, "disabling interface\n"); spin_lock_irqsave(&cp->lock, flags); @@ -1208,13 +1198,34 @@ static int cp_close (struct net_device *dev) spin_unlock_irqrestore(&cp->lock, flags); - synchronize_irq(dev->irq); free_irq(dev->irq, dev); cp_free_rings(cp); return 0; } +static void cp_tx_timeout(struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + int rc; + + netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", + cpr8(Cmd), cpr16(CpCmd), + cpr16(IntrStatus), cpr16(IntrMask)); + + spin_lock_irqsave(&cp->lock, flags); + + cp_stop_hw(cp); + cp_clean_rings(cp); + rc = cp_init_rings(cp); + cp_start_hw(cp); + + netif_wake_queue(dev); + + spin_unlock_irqrestore(&cp->lock, flags); +} + #ifdef BROKEN static int cp_change_mtu(struct net_device *dev, int new_mtu) { @@ -1250,7 +1261,7 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu) } #endif /* BROKEN */ -static char mii_2_8139_map[8] = { +static const char mii_2_8139_map[8] = { BasicModeCtrl, BasicModeStatus, 0, @@ -1327,7 +1338,7 @@ static void netdev_get_wol (struct cp_private *cp, WAKE_MCAST | WAKE_UCAST; /* We don't need to go on if WOL is disabled */ if (!cp->wol_enabled) return; - + options = cpr8 (Config3); if (options & LinkUp) wol->wolopts |= WAKE_PHY; if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC; @@ -1353,9 +1364,14 @@ static int cp_get_regs_len(struct net_device *dev) return CP_REGS_SIZE; } -static int cp_get_stats_count (struct net_device *dev) +static int cp_get_sset_count (struct net_device *dev, int sset) { - return CP_NUM_STATS; + switch (sset) { + case ETH_SS_STATS: + return CP_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -1487,46 +1503,53 @@ static void cp_get_ethtool_stats (struct net_device *dev, struct ethtool_stats *estats, u64 *tmp_stats) { struct cp_private *cp = netdev_priv(dev); - unsigned int work = 100; + struct cp_dma_stats *nic_stats; + dma_addr_t dma; int i; + nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), + &dma, GFP_KERNEL); + if (!nic_stats) + return; + /* begin NIC statistics dump */ - cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16); - cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats); + cpw32(StatsAddr + 4, (u64)dma >> 32); + cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats); cpr32(StatsAddr); - while (work-- > 0) { + for (i = 0; i < 1000; i++) { if ((cpr32(StatsAddr) & DumpStats) == 0) break; - cpu_relax(); + udelay(10); } - - if (cpr32(StatsAddr) & DumpStats) - return /* -EIO */; + cpw32(StatsAddr, 0); + cpw32(StatsAddr + 4, 0); + cpr32(StatsAddr); i = 0; - tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok); - tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok); - tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err); - tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err); - tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo); - tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align); - tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col); - tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol); - tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys); - tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast); - tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast); - tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort); - tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun); + tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok); + tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok); + tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err); + tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err); + tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo); + tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align); + tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col); + tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol); + tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys); + tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast); + tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast); + tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort); + tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun); tmp_stats[i++] = cp->cp_stats.rx_frags; - if (i != CP_NUM_STATS) - BUG(); + BUG_ON(i != CP_NUM_STATS); + + dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); } -static struct ethtool_ops cp_ethtool_ops = { +static const struct ethtool_ops cp_ethtool_ops = { .get_drvinfo = cp_get_drvinfo, .get_regs_len = cp_get_regs_len, - .get_stats_count = cp_get_stats_count, + .get_sset_count = cp_get_sset_count, .get_settings = cp_get_settings, .set_settings = cp_set_settings, .nway_reset = cp_nway_reset, @@ -1535,15 +1558,17 @@ static struct ethtool_ops cp_ethtool_ops = { .set_msglevel = cp_set_msglevel, .get_rx_csum = cp_get_rx_csum, .set_rx_csum = cp_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, + .set_tso = ethtool_op_set_tso, .get_regs = cp_get_regs, .get_wol = cp_get_wol, .set_wol = cp_set_wol, .get_strings = cp_get_strings, .get_ethtool_stats = cp_get_ethtool_stats, + .get_eeprom_len = cp_get_eeprom_len, + .get_eeprom = cp_get_eeprom, + .set_eeprom = cp_set_eeprom, }; static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) @@ -1561,6 +1586,28 @@ static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) return rc; } +static int cp_set_mac_address(struct net_device *dev, void *p) +{ + struct cp_private *cp = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + spin_lock_irq(&cp->lock); + + cpw8_f(Cfg9346, Cfg9346_Unlock); + cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); + cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); + cpw8_f(Cfg9346, Cfg9346_Lock); + + spin_unlock_irq(&cp->lock); + + return 0; +} + /* Serial EEPROM section. */ /* EEPROM_Ctrl bits. */ @@ -1579,24 +1626,32 @@ static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) #define eeprom_delay() readl(ee_addr) /* The EEPROM commands include the alway-set leading bit. */ +#define EE_EXTEND_CMD (4) #define EE_WRITE_CMD (5) #define EE_READ_CMD (6) #define EE_ERASE_CMD (7) -static int read_eeprom (void __iomem *ioaddr, int location, int addr_len) -{ - int i; - unsigned retval = 0; - void __iomem *ee_addr = ioaddr + Cfg9346; - int read_cmd = location | (EE_READ_CMD << addr_len); +#define EE_EWDS_ADDR (0) +#define EE_WRAL_ADDR (1) +#define EE_ERAL_ADDR (2) +#define EE_EWEN_ADDR (3) +#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139 + +static void eeprom_cmd_start(void __iomem *ee_addr) +{ writeb (EE_ENB & ~EE_CS, ee_addr); writeb (EE_ENB, ee_addr); eeprom_delay (); +} + +static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len) +{ + int i; - /* Shift the read command bits out. */ - for (i = 4 + addr_len; i >= 0; i--) { - int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; + /* Shift the command bits out. */ + for (i = cmd_len - 1; i >= 0; i--) { + int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0; writeb (EE_ENB | dataval, ee_addr); eeprom_delay (); writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); @@ -1604,6 +1659,33 @@ static int read_eeprom (void __iomem *ioaddr, int location, int addr_len) } writeb (EE_ENB, ee_addr); eeprom_delay (); +} + +static void eeprom_cmd_end(void __iomem *ee_addr) +{ + writeb (~EE_CS, ee_addr); + eeprom_delay (); +} + +static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd, + int addr_len) +{ + int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2)); + + eeprom_cmd_start(ee_addr); + eeprom_cmd(ee_addr, cmd, 3 + addr_len); + eeprom_cmd_end(ee_addr); +} + +static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len) +{ + int i; + u16 retval = 0; + void __iomem *ee_addr = ioaddr + Cfg9346; + int read_cmd = location | (EE_READ_CMD << addr_len); + + eeprom_cmd_start(ee_addr); + eeprom_cmd(ee_addr, read_cmd, 3 + addr_len); for (i = 16; i > 0; i--) { writeb (EE_ENB | EE_SHIFT_CLK, ee_addr); @@ -1615,13 +1697,125 @@ static int read_eeprom (void __iomem *ioaddr, int location, int addr_len) eeprom_delay (); } - /* Terminate the EEPROM access. */ - writeb (~EE_CS, ee_addr); - eeprom_delay (); + eeprom_cmd_end(ee_addr); return retval; } +static void write_eeprom(void __iomem *ioaddr, int location, u16 val, + int addr_len) +{ + int i; + void __iomem *ee_addr = ioaddr + Cfg9346; + int write_cmd = location | (EE_WRITE_CMD << addr_len); + + eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len); + + eeprom_cmd_start(ee_addr); + eeprom_cmd(ee_addr, write_cmd, 3 + addr_len); + eeprom_cmd(ee_addr, val, 16); + eeprom_cmd_end(ee_addr); + + eeprom_cmd_start(ee_addr); + for (i = 0; i < 20000; i++) + if (readb(ee_addr) & EE_DATA_READ) + break; + eeprom_cmd_end(ee_addr); + + eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len); +} + +static int cp_get_eeprom_len(struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + int size; + + spin_lock_irq(&cp->lock); + size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128; + spin_unlock_irq(&cp->lock); + + return size; +} + +static int cp_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned int addr_len; + u16 val; + u32 offset = eeprom->offset >> 1; + u32 len = eeprom->len; + u32 i = 0; + + eeprom->magic = CP_EEPROM_MAGIC; + + spin_lock_irq(&cp->lock); + + addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; + + if (eeprom->offset & 1) { + val = read_eeprom(cp->regs, offset, addr_len); + data[i++] = (u8)(val >> 8); + offset++; + } + + while (i < len - 1) { + val = read_eeprom(cp->regs, offset, addr_len); + data[i++] = (u8)val; + data[i++] = (u8)(val >> 8); + offset++; + } + + if (i < len) { + val = read_eeprom(cp->regs, offset, addr_len); + data[i] = (u8)val; + } + + spin_unlock_irq(&cp->lock); + return 0; +} + +static int cp_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned int addr_len; + u16 val; + u32 offset = eeprom->offset >> 1; + u32 len = eeprom->len; + u32 i = 0; + + if (eeprom->magic != CP_EEPROM_MAGIC) + return -EINVAL; + + spin_lock_irq(&cp->lock); + + addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; + + if (eeprom->offset & 1) { + val = read_eeprom(cp->regs, offset, addr_len) & 0xff; + val |= (u16)data[i++] << 8; + write_eeprom(cp->regs, offset, val, addr_len); + offset++; + } + + while (i < len - 1) { + val = (u16)data[i++]; + val |= (u16)data[i++] << 8; + write_eeprom(cp->regs, offset, val, addr_len); + offset++; + } + + if (i < len) { + val = read_eeprom(cp->regs, offset, addr_len) & 0xff00; + val |= (u16)data[i]; + write_eeprom(cp->regs, offset, val, addr_len); + } + + spin_unlock_irq(&cp->lock); + return 0; +} + /* Put the board into D3cold state and wait for WakeUp signal */ static void cp_set_d3_state (struct cp_private *cp) { @@ -1629,36 +1823,54 @@ static void cp_set_d3_state (struct cp_private *cp) pci_set_power_state (cp->pdev, PCI_D3hot); } +static const struct net_device_ops cp_netdev_ops = { + .ndo_open = cp_open, + .ndo_stop = cp_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = cp_set_mac_address, + .ndo_set_multicast_list = cp_set_rx_mode, + .ndo_get_stats = cp_get_stats, + .ndo_do_ioctl = cp_ioctl, + .ndo_start_xmit = cp_start_xmit, + .ndo_tx_timeout = cp_tx_timeout, +#if CP_VLAN_TAG_USED + .ndo_vlan_rx_register = cp_vlan_rx_register, +#endif +#ifdef BROKEN + .ndo_change_mtu = cp_change_mtu, +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cp_poll_controller, +#endif +}; + static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct cp_private *cp; int rc; void __iomem *regs; - long pciaddr; + resource_size_t pciaddr; unsigned int addr_len, i, pci_using_dac; - u8 pci_rev; #ifndef MODULE static int version_printed; if (version_printed++ == 0) - printk("%s", version); + pr_info("%s", version); #endif - pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev); - if (pdev->vendor == PCI_VENDOR_ID_REALTEK && - pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) { - printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", - pci_name(pdev), pdev->vendor, pdev->device, pci_rev); - printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n"); + pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { + dev_info(&pdev->dev, + "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n", + pdev->vendor, pdev->device, pdev->revision); return -ENODEV; } dev = alloc_etherdev(sizeof(struct cp_private)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); cp = netdev_priv(dev); @@ -1689,35 +1901,34 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) pciaddr = pci_resource_start(pdev, 1); if (!pciaddr) { rc = -EIO; - printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "no MMIO resource\n"); goto err_out_res; } if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { rc = -EIO; - printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n", - pci_resource_len(pdev, 1), pci_name(pdev)); + dev_err(&pdev->dev, "MMIO resource (%llx) too small\n", + (unsigned long long)pci_resource_len(pdev, 1)); goto err_out_res; } /* Configure DMA attributes. */ if ((sizeof(dma_addr_t) > 4) && - !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) && - !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { pci_using_dac = 0; - rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - printk(KERN_ERR PFX "No usable DMA configuration, " - "aborting.\n"); + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); goto err_out_res; } - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - printk(KERN_ERR PFX "No usable consistent DMA configuration, " - "aborting.\n"); + dev_err(&pdev->dev, + "No usable consistent DMA configuration, aborting\n"); goto err_out_res; } } @@ -1728,8 +1939,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) regs = ioremap(pciaddr, CP_REGS_SIZE); if (!regs) { rc = -EIO; - printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n", - pci_resource_len(pdev, 1), pciaddr, pci_name(pdev)); + dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", + (unsigned long long)pci_resource_len(pdev, 1), + (unsigned long long)pciaddr); goto err_out_res; } dev->base_addr = (unsigned long) regs; @@ -1740,57 +1952,42 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) /* read MAC address from EEPROM */ addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6; for (i = 0; i < 3; i++) - ((u16 *) (dev->dev_addr))[i] = - le16_to_cpu (read_eeprom (regs, i + 7, addr_len)); - - dev->open = cp_open; - dev->stop = cp_close; - dev->set_multicast_list = cp_set_rx_mode; - dev->hard_start_xmit = cp_start_xmit; - dev->get_stats = cp_get_stats; - dev->do_ioctl = cp_ioctl; - dev->poll = cp_rx_poll; - dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ -#ifdef BROKEN - dev->change_mtu = cp_change_mtu; -#endif + ((__le16 *) (dev->dev_addr))[i] = + cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + dev->netdev_ops = &cp_netdev_ops; + netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); dev->ethtool_ops = &cp_ethtool_ops; -#if 0 - dev->tx_timeout = cp_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; -#endif #if CP_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->vlan_rx_register = cp_vlan_rx_register; - dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid; #endif if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; +#if 0 /* disabled by default until verified */ + dev->features |= NETIF_F_TSO; +#endif + dev->irq = pdev->irq; rc = register_netdev(dev); if (rc) goto err_out_iomap; - printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, " - "%02x:%02x:%02x:%02x:%02x:%02x, " - "IRQ %d\n", - dev->name, - dev->base_addr, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5], - dev->irq); + netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n", + dev->base_addr, dev->dev_addr, dev->irq); pci_set_drvdata(pdev, dev); /* enable busmastering and memory-write-invalidate */ pci_set_master(pdev); - if (cp->wol_enabled) cp_set_d3_state (cp); + if (cp->wol_enabled) + cp_set_d3_state (cp); return 0; @@ -1812,11 +2009,10 @@ static void cp_remove_one (struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct cp_private *cp = netdev_priv(dev); - if (!dev) - BUG(); unregister_netdev(dev); iounmap(cp->regs); - if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0); + if (cp->wol_enabled) + pci_set_power_state (pdev, PCI_D0); pci_release_regions(pdev); pci_clear_mwi(pdev); pci_disable_device(pdev); @@ -1827,14 +2023,12 @@ static void cp_remove_one (struct pci_dev *pdev) #ifdef CONFIG_PM static int cp_suspend (struct pci_dev *pdev, pm_message_t state) { - struct net_device *dev; - struct cp_private *cp; + struct net_device *dev = pci_get_drvdata(pdev); + struct cp_private *cp = netdev_priv(dev); unsigned long flags; - dev = pci_get_drvdata (pdev); - cp = netdev_priv(dev); - - if (!dev || !netif_running (dev)) return 0; + if (!netif_running(dev)) + return 0; netif_device_detach (dev); netif_stop_queue (dev); @@ -1847,32 +2041,39 @@ static int cp_suspend (struct pci_dev *pdev, pm_message_t state) spin_unlock_irqrestore (&cp->lock, flags); - if (cp->pdev && cp->wol_enabled) { - pci_save_state (cp->pdev); - cp_set_d3_state (cp); - } + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int cp_resume (struct pci_dev *pdev) { - struct net_device *dev; - struct cp_private *cp; + struct net_device *dev = pci_get_drvdata (pdev); + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; - dev = pci_get_drvdata (pdev); - cp = netdev_priv(dev); + if (!netif_running(dev)) + return 0; netif_device_attach (dev); - - if (cp->pdev && cp->wol_enabled) { - pci_set_power_state (cp->pdev, PCI_D0); - pci_restore_state (cp->pdev); - } - + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_enable_wake(pdev, PCI_D0, 0); + + /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ + cp_init_rings_index (cp); cp_init_hw (cp); netif_start_queue (dev); - + + spin_lock_irqsave (&cp->lock, flags); + + mii_check_media(&cp->mii_if, netif_msg_link(cp), false); + + spin_unlock_irqrestore (&cp->lock, flags); + return 0; } #endif /* CONFIG_PM */ @@ -1891,9 +2092,9 @@ static struct pci_driver cp_driver = { static int __init cp_init (void) { #ifdef MODULE - printk("%s", version); + pr_info("%s", version); #endif - return pci_module_init (&cp_driver); + return pci_register_driver(&cp_driver); } static void __exit cp_exit (void)