static void rio_timer (unsigned long data);
static void rio_tx_timeout (struct net_device *dev);
static void alloc_list (struct net_device *dev);
-static int start_xmit (struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rio_interrupt (int irq, void *dev_instance);
static void rio_free_tx (struct net_device *dev, int irq);
static void tx_error (struct net_device *dev, int tx_status);
static const struct ethtool_ops ethtool_ops;
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = rio_open,
+ .ndo_start_xmit = start_xmit,
+ .ndo_stop = rio_close,
+ .ndo_get_stats = get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_multicast_list = set_multicast,
+ .ndo_do_ioctl = rio_ioctl,
+ .ndo_tx_timeout = rio_tx_timeout,
+ .ndo_change_mtu = change_mtu,
+};
+
static int __devinit
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
void *ring_space;
dma_addr_t ring_dma;
- DECLARE_MAC_BUF(mac);
if (!version_printed++)
printk ("%s", version);
strcmp (media[card_idx], "4") == 0) {
np->speed = 100;
np->full_duplex = 1;
- } else if (strcmp (media[card_idx], "100mbps_hd") == 0
- || strcmp (media[card_idx], "3") == 0) {
+ } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
+ strcmp (media[card_idx], "3") == 0) {
np->speed = 100;
np->full_duplex = 0;
} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
else if (tx_coalesce > TX_RING_SIZE-1)
tx_coalesce = TX_RING_SIZE - 1;
}
- dev->open = &rio_open;
- dev->hard_start_xmit = &start_xmit;
- dev->stop = &rio_close;
- dev->get_stats = &get_stats;
- dev->set_multicast_list = &set_multicast;
- dev->do_ioctl = &rio_ioctl;
- dev->tx_timeout = &rio_tx_timeout;
+ dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- dev->change_mtu = &change_mtu;
SET_ETHTOOL_OPS(dev, ðtool_ops);
#if 0
dev->features = NETIF_F_IP_CSUM;
card_idx++;
- printk (KERN_INFO "%s: %s, %s, IRQ %d\n",
- dev->name, np->name, print_mac(mac, dev->dev_addr), irq);
+ printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
+ dev->name, np->name, dev->dev_addr, irq);
if (tx_coalesce > 1)
printk(KERN_INFO "tx_coalesce:\t%d packets\n",
tx_coalesce);
if (np->coalesce)
- printk(KERN_INFO "rx_coalesce:\t%d packets\n"
- KERN_INFO "rx_timeout: \t%d ns\n",
+ printk(KERN_INFO
+ "rx_coalesce:\t%d packets\n"
+ "rx_timeout: \t%d ns\n",
np->rx_coalesce, np->rx_timeout*640);
if (np->vlan)
printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
int i;
u16 macctrl;
- i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev);
+ i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
if (i)
return i;
entry = np->old_rx % RX_RING_SIZE;
/* Dropped packets don't need to re-allocate */
if (np->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb (np->rx_buf_sz);
+ skb = netdev_alloc_skb_ip_align(dev,
+ np->rx_buf_sz);
if (skb == NULL) {
np->rx_ring[entry].fraginfo = 0;
printk (KERN_INFO
break;
}
np->rx_skbuff[entry] = skb;
- /* 16 byte align the IP header */
- skb_reserve (skb, 2);
np->rx_ring[entry].fraginfo =
cpu_to_le64 (pci_map_single
(np->pdev, skb->data, np->rx_buf_sz,
dev->name, readl (ioaddr + TxStatus));
rio_free_tx(dev, 0);
dev->if_port = 0;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
/* allocate and initialize Tx and Rx descriptors */
/* Allocate the rx buffers */
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
- struct sk_buff *skb = dev_alloc_skb (np->rx_buf_sz);
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
if (skb == NULL) {
printk (KERN_ERR
dev->name);
break;
}
- skb_reserve (skb, 2); /* 16 byte align the IP header. */
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
cpu_to_le64 ( pci_map_single (
/* Set RFDListPtr */
writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
writel (0, dev->base_addr + RFDListPtr1);
-
- return;
}
-static int
+static netdev_tx_t
start_xmit (struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
if (np->link_status == 0) { /* Link Down */
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
ioaddr = dev->base_addr;
entry = np->cur_tx % TX_RING_SIZE;
writel (0, dev->base_addr + TFDListPtr1);
}
- /* NETDEV WATCHDOG timer */
- dev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_OK;
}
static irqreturn_t
static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
{
- return le64_to_cpu(desc->fraginfo) & DMA_48BIT_MASK;
+ return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
}
static void
PCI_DMA_FROMDEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
- } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
+ } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
pci_dma_sync_single_for_cpu(np->pdev,
desc_to_dma(desc),
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- /* 16 byte align the IP header */
- skb_reserve (skb, 2);
skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data,
pkt_len);
}
#endif
netif_rx (skb);
- dev->last_rx = jiffies;
}
entry = (entry + 1) % RX_RING_SIZE;
}
struct sk_buff *skb;
/* Dropped packets don't need to re-allocate */
if (np->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb (np->rx_buf_sz);
+ skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
if (skb == NULL) {
np->rx_ring[entry].fraginfo = 0;
printk (KERN_INFO
break;
}
np->rx_skbuff[entry] = skb;
- /* 16 byte align the IP header */
- skb_reserve (skb, 2);
np->rx_ring[entry].fraginfo =
cpu_to_le64 (pci_map_single
(np->pdev, skb->data, np->rx_buf_sz,
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
} else if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > multicast_filter_limit)) {
+ (netdev_mc_count(dev) > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
- } else if (dev->mc_count > 0) {
- int i;
- struct dev_mc_list *mclist;
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist=mclist->next)
- {
+ netdev_for_each_mc_addr(ha, dev) {
int bit, index = 0;
- int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ int crc = ether_crc_le(ETH_ALEN, ha->addr);
/* The inverted high significant 6 bits of CRC are
used as an index to hashtable */
for (bit = 0; bit < 6; bit++)
("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
i,
(u32) (np->tx_ring_dma + i * sizeof (*desc)),
- (u32) desc->next_desc,
- (u32) desc->status, (u32) (desc->fraginfo >> 32),
- (u32) desc->fraginfo);
+ (u32)le64_to_cpu(desc->next_desc),
+ (u32)le64_to_cpu(desc->status),
+ (u32)(le64_to_cpu(desc->fraginfo) >> 32),
+ (u32)le64_to_cpu(desc->fraginfo));
printk ("\n");
}
printk ("\n");
static int
mii_wait_link (struct net_device *dev, int wait)
{
- BMSR_t bmsr;
+ __u16 bmsr;
int phy_addr;
struct netdev_private *np;
phy_addr = np->phy_addr;
do {
- bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
- if (bmsr.bits.link_status)
+ bmsr = mii_read (dev, phy_addr, MII_BMSR);
+ if (bmsr & MII_BMSR_LINK_STATUS)
return 0;
mdelay (1);
} while (--wait > 0);
mii_get_media (struct net_device *dev)
{
__u16 negotiate;
- BMSR_t bmsr;
- MSCR_t mscr;
- MSSR_t mssr;
+ __u16 bmsr;
+ __u16 mscr;
+ __u16 mssr;
int phy_addr;
struct netdev_private *np;
np = netdev_priv(dev);
phy_addr = np->phy_addr;
- bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
+ bmsr = mii_read (dev, phy_addr, MII_BMSR);
if (np->an_enable) {
- if (!bmsr.bits.an_complete) {
+ if (!(bmsr & MII_BMSR_AN_COMPLETE)) {
/* Auto-Negotiation not completed */
return -1;
}
negotiate = mii_read (dev, phy_addr, MII_ANAR) &
mii_read (dev, phy_addr, MII_ANLPAR);
- mscr.image = mii_read (dev, phy_addr, MII_MSCR);
- mssr.image = mii_read (dev, phy_addr, MII_MSSR);
- if (mscr.bits.media_1000BT_FD & mssr.bits.lp_1000BT_FD) {
+ mscr = mii_read (dev, phy_addr, MII_MSCR);
+ mssr = mii_read (dev, phy_addr, MII_MSSR);
+ if (mscr & MII_MSCR_1000BT_FD && mssr & MII_MSSR_LP_1000BT_FD) {
np->speed = 1000;
np->full_duplex = 1;
printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
- } else if (mscr.bits.media_1000BT_HD & mssr.bits.lp_1000BT_HD) {
+ } else if (mscr & MII_MSCR_1000BT_HD && mssr & MII_MSSR_LP_1000BT_HD) {
np->speed = 1000;
np->full_duplex = 0;
printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
printk (KERN_INFO "Operating at 10 Mbps, ");
}
if (bmcr & MII_BMCR_DUPLEX_MODE) {
- printk ("Full duplex\n");
+ printk (KERN_CONT "Full duplex\n");
} else {
- printk ("Half duplex\n");
+ printk (KERN_CONT "Half duplex\n");
}
}
if (np->tx_flow)
static int
mii_set_media (struct net_device *dev)
{
- PHY_SCR_t pscr;
+ __u16 pscr;
__u16 bmcr;
- BMSR_t bmsr;
+ __u16 bmsr;
__u16 anar;
int phy_addr;
struct netdev_private *np;
/* Does user set speed? */
if (np->an_enable) {
/* Advertise capabilities */
- bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
+ bmsr = mii_read (dev, phy_addr, MII_BMSR);
anar = mii_read (dev, phy_addr, MII_ANAR) &
~MII_ANAR_100BX_FD &
~MII_ANAR_100BX_HD &
~MII_ANAR_100BT4 &
~MII_ANAR_10BT_FD &
~MII_ANAR_10BT_HD;
- if (bmsr.bits.media_100BX_FD)
+ if (bmsr & MII_BMSR_100BX_FD)
anar |= MII_ANAR_100BX_FD;
- if (bmsr.bits.media_100BX_HD)
+ if (bmsr & MII_BMSR_100BX_HD)
anar |= MII_ANAR_100BX_HD;
- if (bmsr.bits.media_100BT4)
+ if (bmsr & MII_BMSR_100BT4)
anar |= MII_ANAR_100BT4;
- if (bmsr.bits.media_10BT_FD)
+ if (bmsr & MII_BMSR_10BT_FD)
anar |= MII_ANAR_10BT_FD;
- if (bmsr.bits.media_10BT_HD)
+ if (bmsr & MII_BMSR_10BT_HD)
anar |= MII_ANAR_10BT_HD;
anar |= MII_ANAR_PAUSE | MII_ANAR_ASYMMETRIC;
mii_write (dev, phy_addr, MII_ANAR, anar);
/* Enable Auto crossover */
- pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
- pscr.bits.mdi_crossover_mode = 3; /* 11'b */
- mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
+ pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
+ pscr |= 3 << 5; /* 11'b */
+ mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
/* Soft reset PHY */
mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
} else {
/* Force speed setting */
/* 1) Disable Auto crossover */
- pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
- pscr.bits.mdi_crossover_mode = 0;
- mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
+ pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
+ pscr &= ~(3 << 5);
+ mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
/* 2) PHY Reset */
bmcr = mii_read (dev, phy_addr, MII_BMCR);
}
if (np->full_duplex) {
bmcr |= MII_BMCR_DUPLEX_MODE;
- printk ("Full duplex\n");
+ printk (KERN_CONT "Full duplex\n");
} else {
- printk ("Half duplex\n");
+ printk (KERN_CONT "Half duplex\n");
}
#if 0
/* Set 1000BaseT Master/Slave setting */
- mscr.image = mii_read (dev, phy_addr, MII_MSCR);
- mscr.bits.cfg_enable = 1;
- mscr.bits.cfg_value = 0;
+ mscr = mii_read (dev, phy_addr, MII_MSCR);
+ mscr |= MII_MSCR_CFG_ENABLE;
+ mscr &= ~MII_MSCR_CFG_VALUE = 0;
#endif
mii_write (dev, phy_addr, MII_BMCR, bmcr);
mdelay(10);
mii_get_media_pcs (struct net_device *dev)
{
__u16 negotiate;
- BMSR_t bmsr;
+ __u16 bmsr;
int phy_addr;
struct netdev_private *np;
np = netdev_priv(dev);
phy_addr = np->phy_addr;
- bmsr.image = mii_read (dev, phy_addr, PCS_BMSR);
+ bmsr = mii_read (dev, phy_addr, PCS_BMSR);
if (np->an_enable) {
- if (!bmsr.bits.an_complete) {
+ if (!(bmsr & MII_BMSR_AN_COMPLETE)) {
/* Auto-Negotiation not completed */
return -1;
}
__u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
printk (KERN_INFO "Operating at 1000 Mbps, ");
if (bmcr & MII_BMCR_DUPLEX_MODE) {
- printk ("Full duplex\n");
+ printk (KERN_CONT "Full duplex\n");
} else {
- printk ("Half duplex\n");
+ printk (KERN_CONT "Half duplex\n");
}
}
if (np->tx_flow)
mii_set_media_pcs (struct net_device *dev)
{
__u16 bmcr;
- ESR_t esr;
+ __u16 esr;
__u16 anar;
int phy_addr;
struct netdev_private *np;
/* Auto-Negotiation? */
if (np->an_enable) {
/* Advertise capabilities */
- esr.image = mii_read (dev, phy_addr, PCS_ESR);
+ esr = mii_read (dev, phy_addr, PCS_ESR);
anar = mii_read (dev, phy_addr, MII_ANAR) &
~PCS_ANAR_HALF_DUPLEX &
~PCS_ANAR_FULL_DUPLEX;
- if (esr.bits.media_1000BT_HD | esr.bits.media_1000BX_HD)
+ if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD))
anar |= PCS_ANAR_HALF_DUPLEX;
- if (esr.bits.media_1000BT_FD | esr.bits.media_1000BX_FD)
+ if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD))
anar |= PCS_ANAR_FULL_DUPLEX;
anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC;
mii_write (dev, phy_addr, MII_ANAR, anar);
/* Stop Tx and Rx logics */
writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
- synchronize_irq (dev->irq);
+
free_irq (dev->irq, dev);
del_timer_sync (&np->timer);