TODO:
* Test Tx checksumming thoroughly
- * Implement dev->tx_timeout
Low priority TODO:
* Complete reset on PciErr
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "8139cp"
#define DRV_VERSION "1.3"
#define DRV_RELDATE "Mar 22, 2004"
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
+#include <linux/gfp.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define CP_VLAN_TAG_USED 1
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
- do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
+ do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0)
#else
#define CP_VLAN_TAG_USED 0
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
/* These identify the driver base version and may not be removed. */
static char version[] =
-KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
+DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
module_param(multicast_filter_limit, int, 0);
MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
-#define PFX DRV_NAME ": "
-
-#ifndef TRUE
-#define FALSE 0
-#define TRUE (!FALSE)
-#endif
-
#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK)
(CP)->tx_tail - (CP)->tx_head - 1)
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
-#define RX_OFFSET 2
#define CP_INTERNAL_PHY 32
/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
(RX_DMA_BURST << RxCfgDMAShift);
struct cp_desc {
- u32 opts1;
- u32 opts2;
- u64 addr;
-};
-
-struct ring_info {
- struct sk_buff *skb;
- dma_addr_t mapping;
- u32 len;
+ __le32 opts1;
+ __le32 opts2;
+ __le64 addr;
};
struct cp_dma_stats {
- u64 tx_ok;
- u64 rx_ok;
- u64 tx_err;
- u32 rx_err;
- u16 rx_fifo;
- u16 frame_align;
- u32 tx_ok_1col;
- u32 tx_ok_mcol;
- u64 rx_ok_phys;
- u64 rx_ok_bcast;
- u32 rx_ok_mcast;
- u16 tx_abort;
- u16 tx_underrun;
+ __le64 tx_ok;
+ __le64 rx_ok;
+ __le64 tx_err;
+ __le32 rx_err;
+ __le16 rx_fifo;
+ __le16 frame_align;
+ __le32 tx_ok_1col;
+ __le32 tx_ok_mcol;
+ __le64 rx_ok_phys;
+ __le64 rx_ok_bcast;
+ __le32 rx_ok_mcast;
+ __le16 tx_abort;
+ __le16 tx_underrun;
} __attribute__((packed));
struct cp_extra_stats {
spinlock_t lock;
u32 msg_enable;
+ struct napi_struct napi;
+
struct pci_dev *pdev;
u32 rx_config;
u16 cpcmd;
- struct net_device_stats net_stats;
struct cp_extra_stats cp_stats;
- unsigned rx_tail ____cacheline_aligned;
+ unsigned rx_head ____cacheline_aligned;
+ unsigned rx_tail;
struct cp_desc *rx_ring;
- struct ring_info rx_skb[CP_RX_RING_SIZE];
- unsigned rx_buf_sz;
+ struct sk_buff *rx_skb[CP_RX_RING_SIZE];
unsigned tx_head ____cacheline_aligned;
unsigned tx_tail;
-
struct cp_desc *tx_ring;
- struct ring_info tx_skb[CP_TX_RING_SIZE];
- dma_addr_t ring_dma;
+ struct sk_buff *tx_skb[CP_TX_RING_SIZE];
+
+ unsigned rx_buf_sz;
+ unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
#if CP_VLAN_TAG_USED
struct vlan_group *vlgrp;
#endif
-
- unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */
+ dma_addr_t ring_dma;
struct mii_if_info mii_if;
};
static int cp_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data);
-static struct pci_device_id cp_pci_tbl[] = {
- { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
+ { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
{ },
};
MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
spin_lock_irqsave(&cp->lock, flags);
cp->vlgrp = grp;
- cp->cpcmd |= RxVlanOn;
- cpw16(CpCmd, cp->cpcmd);
- spin_unlock_irqrestore(&cp->lock, flags);
-}
-
-static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
-{
- struct cp_private *cp = netdev_priv(dev);
- unsigned long flags;
+ if (grp)
+ cp->cpcmd |= RxVlanOn;
+ else
+ cp->cpcmd &= ~RxVlanOn;
- spin_lock_irqsave(&cp->lock, flags);
- cp->cpcmd &= ~RxVlanOn;
cpw16(CpCmd, cp->cpcmd);
- if (cp->vlgrp)
- cp->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_irqrestore(&cp->lock, flags);
}
#endif /* CP_VLAN_TAG_USED */
{
skb->protocol = eth_type_trans (skb, cp->dev);
- cp->net_stats.rx_packets++;
- cp->net_stats.rx_bytes += skb->len;
- cp->dev->last_rx = jiffies;
+ cp->dev->stats.rx_packets++;
+ cp->dev->stats.rx_bytes += skb->len;
#if CP_VLAN_TAG_USED
- if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
+ if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) {
vlan_hwaccel_receive_skb(skb, cp->vlgrp,
- be16_to_cpu(desc->opts2 & 0xffff));
+ swab16(le32_to_cpu(desc->opts2) & 0xffff));
} else
#endif
netif_receive_skb(skb);
static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
u32 status, u32 len)
{
- if (netif_msg_rx_err (cp))
- printk (KERN_DEBUG
- "%s: rx err, slot %d status 0x%x len %d\n",
- cp->dev->name, rx_tail, status, len);
- cp->net_stats.rx_errors++;
+ netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
+ rx_tail, status, len);
+ cp->dev->stats.rx_errors++;
if (status & RxErrFrame)
- cp->net_stats.rx_frame_errors++;
+ cp->dev->stats.rx_frame_errors++;
if (status & RxErrCRC)
- cp->net_stats.rx_crc_errors++;
+ cp->dev->stats.rx_crc_errors++;
if ((status & RxErrRunt) || (status & RxErrLong))
- cp->net_stats.rx_length_errors++;
+ cp->dev->stats.rx_length_errors++;
if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
- cp->net_stats.rx_length_errors++;
+ cp->dev->stats.rx_length_errors++;
if (status & RxErrFIFO)
- cp->net_stats.rx_fifo_errors++;
+ cp->dev->stats.rx_fifo_errors++;
}
static inline unsigned int cp_rx_csum_ok (u32 status)
return 0;
}
-static int cp_rx_poll (struct net_device *dev, int *budget)
+static int cp_rx_poll(struct napi_struct *napi, int budget)
{
- struct cp_private *cp = netdev_priv(dev);
- unsigned rx_tail = cp->rx_tail;
- unsigned rx_work = dev->quota;
- unsigned rx;
+ struct cp_private *cp = container_of(napi, struct cp_private, napi);
+ struct net_device *dev = cp->dev;
+ unsigned int rx_tail = cp->rx_tail;
+ int rx;
rx_status_loop:
rx = 0;
dma_addr_t mapping;
struct sk_buff *skb, *new_skb;
struct cp_desc *desc;
- unsigned buflen;
+ const unsigned buflen = cp->rx_buf_sz;
- skb = cp->rx_skb[rx_tail].skb;
+ skb = cp->rx_skb[rx_tail];
BUG_ON(!skb);
desc = &cp->rx_ring[rx_tail];
break;
len = (status & 0x1fff) - 4;
- mapping = cp->rx_skb[rx_tail].mapping;
+ mapping = le64_to_cpu(desc->addr);
if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
/* we don't support incoming fragmented frames.
* that RX fragments are never encountered
*/
cp_rx_err_acct(cp, rx_tail, status, len);
- cp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
cp->cp_stats.rx_frags++;
goto rx_next;
}
goto rx_next;
}
- if (netif_msg_rx_status(cp))
- printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
- cp->dev->name, rx_tail, status, len);
+ netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
+ rx_tail, status, len);
- buflen = cp->rx_buf_sz + RX_OFFSET;
- new_skb = dev_alloc_skb (buflen);
+ new_skb = netdev_alloc_skb_ip_align(dev, buflen);
if (!new_skb) {
- cp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
goto rx_next;
}
- skb_reserve(new_skb, RX_OFFSET);
- new_skb->dev = cp->dev;
-
- pci_unmap_single(cp->pdev, mapping,
+ dma_unmap_single(&cp->pdev->dev, mapping,
buflen, PCI_DMA_FROMDEVICE);
/* Handle checksum offloading for incoming packets. */
skb_put(skb, len);
- mapping =
- cp->rx_skb[rx_tail].mapping =
- pci_map_single(cp->pdev, new_skb->data,
- buflen, PCI_DMA_FROMDEVICE);
- cp->rx_skb[rx_tail].skb = new_skb;
+ mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
+ PCI_DMA_FROMDEVICE);
+ cp->rx_skb[rx_tail] = new_skb;
cp_rx_skb(cp, skb, desc);
rx++;
desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
rx_tail = NEXT_RX(rx_tail);
- if (!rx_work--)
+ if (rx >= budget)
break;
}
cp->rx_tail = rx_tail;
- dev->quota -= rx;
- *budget -= rx;
-
/* if we did not reach work limit, then we're done with
* this round of polling
*/
- if (rx_work) {
+ if (rx < budget) {
+ unsigned long flags;
+
if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop;
- local_irq_disable();
+ spin_lock_irqsave(&cp->lock, flags);
cpw16_f(IntrMask, cp_intr_mask);
- __netif_rx_complete(dev);
- local_irq_enable();
-
- return 0; /* done */
+ __napi_complete(napi);
+ spin_unlock_irqrestore(&cp->lock, flags);
}
- return 1; /* not done */
+ return rx;
}
-static irqreturn_t
-cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+static irqreturn_t cp_interrupt (int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct cp_private *cp;
if (!status || (status == 0xFFFF))
return IRQ_NONE;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
- dev->name, status, cpr8(Cmd), cpr16(CpCmd));
+ netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
+ status, cpr8(Cmd), cpr16(CpCmd));
cpw16(IntrStatus, status & ~cp_rx_intr_mask);
}
if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
- if (netif_rx_schedule_prep(dev)) {
+ if (napi_schedule_prep(&cp->napi)) {
cpw16_f(IntrMask, cp_norx_intr_mask);
- __netif_rx_schedule(dev);
+ __napi_schedule(&cp->napi);
}
if (status & (TxOK | TxErr | TxEmpty | SWInt))
cp_tx(cp);
if (status & LinkChg)
- mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
+ mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
spin_unlock(&cp->lock);
pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
- printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
- dev->name, status, pci_status);
+ netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
+ status, pci_status);
/* TODO: reset hardware */
}
static void cp_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
- cp_interrupt(dev->irq, dev, NULL);
+ cp_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
unsigned tx_tail = cp->tx_tail;
while (tx_tail != tx_head) {
+ struct cp_desc *txd = cp->tx_ring + tx_tail;
struct sk_buff *skb;
u32 status;
rmb();
- status = le32_to_cpu(cp->tx_ring[tx_tail].opts1);
+ status = le32_to_cpu(txd->opts1);
if (status & DescOwn)
break;
- skb = cp->tx_skb[tx_tail].skb;
+ skb = cp->tx_skb[tx_tail];
BUG_ON(!skb);
- pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
- cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
+ le32_to_cpu(txd->opts1) & 0xffff,
+ PCI_DMA_TODEVICE);
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
- if (netif_msg_tx_err(cp))
- printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
- cp->dev->name, status);
- cp->net_stats.tx_errors++;
+ netif_dbg(cp, tx_err, cp->dev,
+ "tx err, status 0x%x\n", status);
+ cp->dev->stats.tx_errors++;
if (status & TxOWC)
- cp->net_stats.tx_window_errors++;
+ cp->dev->stats.tx_window_errors++;
if (status & TxMaxCol)
- cp->net_stats.tx_aborted_errors++;
+ cp->dev->stats.tx_aborted_errors++;
if (status & TxLinkFail)
- cp->net_stats.tx_carrier_errors++;
+ cp->dev->stats.tx_carrier_errors++;
if (status & TxFIFOUnder)
- cp->net_stats.tx_fifo_errors++;
+ cp->dev->stats.tx_fifo_errors++;
} else {
- cp->net_stats.collisions +=
+ cp->dev->stats.collisions +=
((status >> TxColCntShift) & TxColCntMask);
- cp->net_stats.tx_packets++;
- cp->net_stats.tx_bytes += skb->len;
- if (netif_msg_tx_done(cp))
- printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
+ cp->dev->stats.tx_packets++;
+ cp->dev->stats.tx_bytes += skb->len;
+ netif_dbg(cp, tx_done, cp->dev,
+ "tx done, slot %d\n", tx_tail);
}
dev_kfree_skb_irq(skb);
}
- cp->tx_skb[tx_tail].skb = NULL;
+ cp->tx_skb[tx_tail] = NULL;
tx_tail = NEXT_TX(tx_tail);
}
netif_wake_queue(cp->dev);
}
-static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+ struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
unsigned entry;
u32 eor, flags;
+ unsigned long intr_flags;
#if CP_VLAN_TAG_USED
u32 vlan_tag = 0;
#endif
int mss = 0;
- spin_lock_irq(&cp->lock);
+ spin_lock_irqsave(&cp->lock, intr_flags);
/* This is a hard error, log it. */
if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
- spin_unlock_irq(&cp->lock);
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
- return 1;
+ spin_unlock_irqrestore(&cp->lock, intr_flags);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
}
#if CP_VLAN_TAG_USED
if (cp->vlgrp && vlan_tx_tag_present(skb))
- vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
+ vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));
#endif
entry = cp->tx_head;
dma_addr_t mapping;
len = skb->len;
- mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
CP_VLAN_TX_TAG(txd, vlan_tag);
txd->addr = cpu_to_le64(mapping);
wmb();
if (mss)
flags |= LargeSend | ((mss & MSSMask) << MSSShift);
- else if (skb->ip_summed == CHECKSUM_HW) {
- const struct iphdr *ip = skb->nh.iph;
+ else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP)
flags |= IPCS | TCPCS;
else if (ip->protocol == IPPROTO_UDP)
txd->opts1 = cpu_to_le32(flags);
wmb();
- cp->tx_skb[entry].skb = skb;
- cp->tx_skb[entry].mapping = mapping;
- cp->tx_skb[entry].len = len;
+ cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
} else {
struct cp_desc *txd;
u32 first_len, first_eor;
dma_addr_t first_mapping;
int frag, first_entry = entry;
- const struct iphdr *ip = skb->nh.iph;
+ const struct iphdr *ip = ip_hdr(skb);
/* We must give this initial chunk to the device last.
* Otherwise we could race with the device.
*/
first_eor = eor;
first_len = skb_headlen(skb);
- first_mapping = pci_map_single(cp->pdev, skb->data,
+ first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
first_len, PCI_DMA_TODEVICE);
- cp->tx_skb[entry].skb = skb;
- cp->tx_skb[entry].mapping = first_mapping;
- cp->tx_skb[entry].len = first_len;
+ cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
dma_addr_t mapping;
len = this_frag->size;
- mapping = pci_map_single(cp->pdev,
+ mapping = dma_map_single(&cp->pdev->dev,
((void *) page_address(this_frag->page) +
this_frag->page_offset),
len, PCI_DMA_TODEVICE);
if (mss)
ctrl |= LargeSend |
((mss & MSSMask) << MSSShift);
- else if (skb->ip_summed == CHECKSUM_HW) {
+ else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (ip->protocol == IPPROTO_TCP)
ctrl |= IPCS | TCPCS;
else if (ip->protocol == IPPROTO_UDP)
txd->opts1 = cpu_to_le32(ctrl);
wmb();
- cp->tx_skb[entry].skb = skb;
- cp->tx_skb[entry].mapping = mapping;
- cp->tx_skb[entry].len = len;
+ cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
}
txd->addr = cpu_to_le64(first_mapping);
wmb();
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (ip->protocol == IPPROTO_TCP)
txd->opts1 = cpu_to_le32(first_eor | first_len |
FirstFrag | DescOwn |
wmb();
}
cp->tx_head = entry;
- if (netif_msg_tx_queued(cp))
- printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
- dev->name, entry, skb->len);
+ netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
+ entry, skb->len);
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
- spin_unlock_irq(&cp->lock);
+ spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
dev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_OK;
}
/* Set or clear the multicast filter for this adaptor.
{
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
/* Note: do not reorder, GCC is clever about common statements. */
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit)
- || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
static void __cp_get_stats(struct cp_private *cp)
{
/* only lower 24 bits valid; write any value to clear */
- cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
+ cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
cpw32 (RxMissed, 0);
}
__cp_get_stats(cp);
spin_unlock_irqrestore(&cp->lock, flags);
- return &cp->net_stats;
+ return &dev->stats;
}
static void cp_stop_hw (struct cp_private *cp)
schedule_timeout_uninterruptible(10);
}
- printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
+ netdev_err(cp->dev, "hardware reset timeout\n");
}
static inline void cp_start_hw (struct cp_private *cp)
cpw8_f (Cfg9346, Cfg9346_Unlock);
/* Restore our idea of the MAC address. */
- cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
- cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+ cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
+ cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
cp_start_hw(cp);
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
cpw8_f(Cfg9346, Cfg9346_Lock);
}
-static int cp_refill_rx (struct cp_private *cp)
+static int cp_refill_rx(struct cp_private *cp)
{
+ struct net_device *dev = cp->dev;
unsigned i;
for (i = 0; i < CP_RX_RING_SIZE; i++) {
struct sk_buff *skb;
+ dma_addr_t mapping;
- skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
+ skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
if (!skb)
goto err_out;
- skb->dev = cp->dev;
- skb_reserve(skb, RX_OFFSET);
-
- cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
- skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
- cp->rx_skb[i].skb = skb;
+ mapping = dma_map_single(&cp->pdev->dev, skb->data,
+ cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ cp->rx_skb[i] = skb;
cp->rx_ring[i].opts2 = 0;
- cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
+ cp->rx_ring[i].addr = cpu_to_le64(mapping);
if (i == (CP_RX_RING_SIZE - 1))
cp->rx_ring[i].opts1 =
cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
{
void *mem;
- mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
+ mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
+ &cp->ring_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
static void cp_clean_rings (struct cp_private *cp)
{
+ struct cp_desc *desc;
unsigned i;
for (i = 0; i < CP_RX_RING_SIZE; i++) {
- if (cp->rx_skb[i].skb) {
- pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
+ if (cp->rx_skb[i]) {
+ desc = cp->rx_ring + i;
+ dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb(cp->rx_skb[i].skb);
+ dev_kfree_skb(cp->rx_skb[i]);
}
}
for (i = 0; i < CP_TX_RING_SIZE; i++) {
- if (cp->tx_skb[i].skb) {
- struct sk_buff *skb = cp->tx_skb[i].skb;
-
- pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
- cp->tx_skb[i].len, PCI_DMA_TODEVICE);
- if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag)
+ if (cp->tx_skb[i]) {
+ struct sk_buff *skb = cp->tx_skb[i];
+
+ desc = cp->tx_ring + i;
+ dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
+ le32_to_cpu(desc->opts1) & 0xffff,
+ PCI_DMA_TODEVICE);
+ if (le32_to_cpu(desc->opts1) & LastFrag)
dev_kfree_skb(skb);
- cp->net_stats.tx_dropped++;
+ cp->dev->stats.tx_dropped++;
}
}
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
- memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
- memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
+ memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
+ memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
}
static void cp_free_rings (struct cp_private *cp)
{
cp_clean_rings(cp);
- pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
+ dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
+ cp->ring_dma);
cp->rx_ring = NULL;
cp->tx_ring = NULL;
}
struct cp_private *cp = netdev_priv(dev);
int rc;
- if (netif_msg_ifup(cp))
- printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
+ netif_dbg(cp, ifup, dev, "enabling interface\n");
rc = cp_alloc_rings(cp);
if (rc)
return rc;
+ napi_enable(&cp->napi);
+
cp_init_hw(cp);
rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
goto err_out_hw;
netif_carrier_off(dev);
- mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
+ mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
netif_start_queue(dev);
return 0;
err_out_hw:
+ napi_disable(&cp->napi);
cp_stop_hw(cp);
cp_free_rings(cp);
return rc;
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
- if (netif_msg_ifdown(cp))
- printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
+ napi_disable(&cp->napi);
+
+ netif_dbg(cp, ifdown, dev, "disabling interface\n");
spin_lock_irqsave(&cp->lock, flags);
spin_unlock_irqrestore(&cp->lock, flags);
- synchronize_irq(dev->irq);
free_irq(dev->irq, dev);
cp_free_rings(cp);
return 0;
}
+static void cp_tx_timeout(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
+ cpr8(Cmd), cpr16(CpCmd),
+ cpr16(IntrStatus), cpr16(IntrMask));
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ cp_stop_hw(cp);
+ cp_clean_rings(cp);
+ rc = cp_init_rings(cp);
+ cp_start_hw(cp);
+
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return;
+}
+
#ifdef BROKEN
static int cp_change_mtu(struct net_device *dev, int new_mtu)
{
return CP_REGS_SIZE;
}
-static int cp_get_stats_count (struct net_device *dev)
+static int cp_get_sset_count (struct net_device *dev, int sset)
{
- return CP_NUM_STATS;
+ switch (sset) {
+ case ETH_SS_STATS:
+ return CP_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
dma_addr_t dma;
int i;
- nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
+ nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
+ &dma, GFP_KERNEL);
if (!nic_stats)
return;
/* begin NIC statistics dump */
cpw32(StatsAddr + 4, (u64)dma >> 32);
- cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
+ cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
cpr32(StatsAddr);
for (i = 0; i < 1000; i++) {
tmp_stats[i++] = cp->cp_stats.rx_frags;
BUG_ON(i != CP_NUM_STATS);
- pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
+ dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
}
-static struct ethtool_ops cp_ethtool_ops = {
+static const struct ethtool_ops cp_ethtool_ops = {
.get_drvinfo = cp_get_drvinfo,
.get_regs_len = cp_get_regs_len,
- .get_stats_count = cp_get_stats_count,
+ .get_sset_count = cp_get_sset_count,
.get_settings = cp_get_settings,
.set_settings = cp_set_settings,
.nway_reset = cp_nway_reset,
.set_msglevel = cp_set_msglevel,
.get_rx_csum = cp_get_rx_csum,
.set_rx_csum = cp_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum, /* local! */
- .get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_regs = cp_get_regs,
.get_wol = cp_get_wol,
.set_wol = cp_set_wol,
.get_strings = cp_get_strings,
.get_ethtool_stats = cp_get_ethtool_stats,
- .get_perm_addr = ethtool_op_get_perm_addr,
.get_eeprom_len = cp_get_eeprom_len,
.get_eeprom = cp_get_eeprom,
.set_eeprom = cp_set_eeprom,
return rc;
}
+static int cp_set_mac_address(struct net_device *dev, void *p)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ spin_lock_irq(&cp->lock);
+
+ cpw8_f(Cfg9346, Cfg9346_Unlock);
+ cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
+ cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
+ cpw8_f(Cfg9346, Cfg9346_Lock);
+
+ spin_unlock_irq(&cp->lock);
+
+ return 0;
+}
+
/* Serial EEPROM section. */
/* EEPROM_Ctrl bits. */
pci_set_power_state (cp->pdev, PCI_D3hot);
}
+static const struct net_device_ops cp_netdev_ops = {
+ .ndo_open = cp_open,
+ .ndo_stop = cp_close,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = cp_set_mac_address,
+ .ndo_set_multicast_list = cp_set_rx_mode,
+ .ndo_get_stats = cp_get_stats,
+ .ndo_do_ioctl = cp_ioctl,
+ .ndo_start_xmit = cp_start_xmit,
+ .ndo_tx_timeout = cp_tx_timeout,
+#if CP_VLAN_TAG_USED
+ .ndo_vlan_rx_register = cp_vlan_rx_register,
+#endif
+#ifdef BROKEN
+ .ndo_change_mtu = cp_change_mtu,
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cp_poll_controller,
+#endif
+};
+
static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
void __iomem *regs;
resource_size_t pciaddr;
unsigned int addr_len, i, pci_using_dac;
- u8 pci_rev;
#ifndef MODULE
static int version_printed;
if (version_printed++ == 0)
- printk("%s", version);
+ pr_info("%s", version);
#endif
- pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
-
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
- pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
- dev_err(&pdev->dev,
- "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
- pdev->vendor, pdev->device, pci_rev);
- dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
+ pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
+ dev_info(&pdev->dev,
+ "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
+ pdev->vendor, pdev->device, pdev->revision);
return -ENODEV;
}
dev = alloc_etherdev(sizeof(struct cp_private));
if (!dev)
return -ENOMEM;
- SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
cp = netdev_priv(dev);
/* Configure DMA attributes. */
if ((sizeof(dma_addr_t) > 4) &&
- !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
- !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
pci_using_dac = 0;
- rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
- "No usable DMA configuration, aborting.\n");
+ "No usable DMA configuration, aborting\n");
goto err_out_res;
}
- rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
- "No usable consistent DMA configuration, "
- "aborting.\n");
+ "No usable consistent DMA configuration, aborting\n");
goto err_out_res;
}
}
if (!regs) {
rc = -EIO;
dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
- (unsigned long long)pci_resource_len(pdev, 1),
+ (unsigned long long)pci_resource_len(pdev, 1),
(unsigned long long)pciaddr);
goto err_out_res;
}
/* read MAC address from EEPROM */
addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
- ((u16 *) (dev->dev_addr))[i] =
- le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
+ ((__le16 *) (dev->dev_addr))[i] =
+ cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- dev->open = cp_open;
- dev->stop = cp_close;
- dev->set_multicast_list = cp_set_rx_mode;
- dev->hard_start_xmit = cp_start_xmit;
- dev->get_stats = cp_get_stats;
- dev->do_ioctl = cp_ioctl;
- dev->poll = cp_rx_poll;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = cp_poll_controller;
-#endif
- dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
-#ifdef BROKEN
- dev->change_mtu = cp_change_mtu;
-#endif
+ dev->netdev_ops = &cp_netdev_ops;
+ netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
dev->ethtool_ops = &cp_ethtool_ops;
-#if 0
- dev->tx_timeout = cp_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
-#endif
#if CP_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->vlan_rx_register = cp_vlan_rx_register;
- dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
#endif
if (pci_using_dac)
if (rc)
goto err_out_iomap;
- printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
- "%02x:%02x:%02x:%02x:%02x:%02x, "
- "IRQ %d\n",
- dev->name,
- dev->base_addr,
- dev->dev_addr[0], dev->dev_addr[1],
- dev->dev_addr[2], dev->dev_addr[3],
- dev->dev_addr[4], dev->dev_addr[5],
- dev->irq);
+ netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
+ dev->base_addr, dev->dev_addr, dev->irq);
pci_set_drvdata(pdev, dev);
struct net_device *dev = pci_get_drvdata(pdev);
struct cp_private *cp = netdev_priv(dev);
- BUG_ON(!dev);
unregister_netdev(dev);
iounmap(cp->regs);
if (cp->wol_enabled)
#ifdef CONFIG_PM
static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
{
- struct net_device *dev;
- struct cp_private *cp;
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
- dev = pci_get_drvdata (pdev);
- cp = netdev_priv(dev);
-
- if (!dev || !netif_running (dev)) return 0;
+ if (!netif_running(dev))
+ return 0;
netif_device_detach (dev);
netif_stop_queue (dev);
spin_lock_irqsave (&cp->lock, flags);
- mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
+ mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
spin_unlock_irqrestore (&cp->lock, flags);
static int __init cp_init (void)
{
#ifdef MODULE
- printk("%s", version);
+ pr_info("%s", version);
#endif
- return pci_module_init (&cp_driver);
+ return pci_register_driver(&cp_driver);
}
static void __exit cp_exit (void)