#include <linux/eisa.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
-#include <asm/irq.h> /* For NR_IRQS only. */
+#include <asm/irq.h> /* For nr_irqs only. */
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/delay.h>
-static char version[] __devinitdata =
-DRV_NAME ": Donald Becker and others.\n";
+static const char version[] __devinitconst =
+ DRV_NAME ": Donald Becker and others.\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
{"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
{"3c980 Cyclone",
- PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
{"3c980C Python-T",
PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */
#define DN_COMPLETE 0x00010000 /* This packet has been downloaded */
struct boom_rx_desc {
- u32 next; /* Last entry points to 0. */
- s32 status;
- u32 addr; /* Up to 63 addr/len pairs possible. */
- s32 length; /* Set LAST_FRAG to indicate last pair. */
+ __le32 next; /* Last entry points to 0. */
+ __le32 status;
+ __le32 addr; /* Up to 63 addr/len pairs possible. */
+ __le32 length; /* Set LAST_FRAG to indicate last pair. */
};
/* Values for the Rx status entry. */
enum rx_desc_status {
#endif
struct boom_tx_desc {
- u32 next; /* Last entry points to 0. */
- s32 status; /* bits 0:12 length, others see below. */
+ __le32 next; /* Last entry points to 0. */
+ __le32 status; /* bits 0:12 length, others see below. */
#if DO_ZEROCOPY
struct {
- u32 addr;
- s32 length;
+ __le32 addr;
+ __le32 length;
} frag[1+MAX_SKB_FRAGS];
#else
- u32 addr;
- s32 length;
+ __le32 addr;
+ __le32 length;
#endif
};
struct sk_buff* tx_skbuff[TX_RING_SIZE];
unsigned int cur_rx, cur_tx; /* The next free ring entry */
unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
- struct net_device_stats stats; /* Generic stats */
struct vortex_extra_stats xstats; /* NIC-specific extra stats */
struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
int chip_idx, int card_idx);
-static void vortex_up(struct net_device *dev);
+static int vortex_up(struct net_device *dev);
static void vortex_down(struct net_device *dev, int final);
static int vortex_open(struct net_device *dev);
static void mdio_sync(void __iomem *ioaddr, int bits);
{
struct net_device *dev = pci_get_drvdata(pdev);
- if (dev && dev->priv) {
+ if (dev && netdev_priv(dev)) {
if (netif_running(dev)) {
netif_device_detach(dev);
vortex_down(dev, 1);
return -EBUSY;
}
if (netif_running(dev)) {
- vortex_up(dev);
- netif_device_attach(dev);
+ err = vortex_up(dev);
+ if (err)
+ return err;
+ else
+ netif_device_attach(dev);
}
}
return 0;
return rc;
}
+static const struct net_device_ops boomrang_netdev_ops = {
+ .ndo_open = vortex_open,
+ .ndo_stop = vortex_close,
+ .ndo_start_xmit = boomerang_start_xmit,
+ .ndo_tx_timeout = vortex_tx_timeout,
+ .ndo_get_stats = vortex_get_stats,
+#ifdef CONFIG_PCI
+ .ndo_do_ioctl = vortex_ioctl,
+#endif
+ .ndo_set_multicast_list = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_vortex,
+#endif
+};
+
+static const struct net_device_ops vortex_netdev_ops = {
+ .ndo_open = vortex_open,
+ .ndo_stop = vortex_close,
+ .ndo_start_xmit = vortex_start_xmit,
+ .ndo_tx_timeout = vortex_tx_timeout,
+ .ndo_get_stats = vortex_get_stats,
+#ifdef CONFIG_PCI
+ .ndo_do_ioctl = vortex_ioctl,
+#endif
+ .ndo_set_multicast_list = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_vortex,
+#endif
+};
+
/*
* Start up the PCI/EISA device which is described by *gendev.
* Return 0 on success.
static int printed_version;
int retval, print_info;
struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
- char *print_name = "3c59x";
+ const char *print_name = "3c59x";
struct pci_dev *pdev = NULL;
struct eisa_device *edev = NULL;
}
if ((edev = DEVICE_EISA(gendev))) {
- print_name = edev->dev.bus_id;
+ print_name = dev_name(&edev->dev);
}
}
printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n");
goto out;
}
- SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, gendev);
vp = netdev_priv(dev);
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
&vp->rx_ring_dma);
retval = -ENOMEM;
- if (vp->rx_ring == 0)
+ if (!vp->rx_ring)
goto free_region;
vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
for (i = 0; i < 3; i++)
- ((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
+ ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- if (print_info) {
- for (i = 0; i < 6; i++)
- printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
- }
+ if (print_info)
+ printk(" %pM", dev->dev_addr);
/* Unfortunately an all zero eeprom passes the checksum and this
gets found in the wild in failure cases. Crypto is hard 8) */
if (!is_valid_ether_addr(dev->dev_addr)) {
if (print_info)
printk(", IRQ %d\n", dev->irq);
/* Tell them about an invalid IRQ. */
- if (dev->irq <= 0 || dev->irq >= NR_IRQS)
+ if (dev->irq <= 0 || dev->irq >= nr_irqs)
printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
dev->irq);
}
/* The 3c59x-specific entries in the device structure. */
- dev->open = vortex_open;
if (vp->full_bus_master_tx) {
- dev->hard_start_xmit = boomerang_start_xmit;
+ dev->netdev_ops = &boomrang_netdev_ops;
/* Actually, it still should work with iommu. */
if (card_idx < MAX_UNITS &&
((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
hw_checksums[card_idx] == 1)) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
}
- } else {
- dev->hard_start_xmit = vortex_start_xmit;
- }
+ } else
+ dev->netdev_ops = &vortex_netdev_ops;
if (print_info) {
printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
(dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
}
- dev->stop = vortex_close;
- dev->get_stats = vortex_get_stats;
-#ifdef CONFIG_PCI
- dev->do_ioctl = vortex_ioctl;
-#endif
dev->ethtool_ops = &vortex_ethtool_ops;
- dev->set_multicast_list = set_rx_mode;
- dev->tx_timeout = vortex_tx_timeout;
dev->watchdog_timeo = (watchdog * HZ) / 1000;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = poll_vortex;
-#endif
+
if (pdev) {
vp->pm_state_valid = 1;
pci_save_state(VORTEX_PCI(vp));
}
}
-static void
+static int
vortex_up(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned int config;
- int i, mii_reg1, mii_reg5;
+ int i, mii_reg1, mii_reg5, err = 0;
if (VORTEX_PCI(vp)) {
pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
if (vp->pm_state_valid)
pci_restore_state(VORTEX_PCI(vp));
- pci_enable_device(VORTEX_PCI(vp));
+ err = pci_enable_device(VORTEX_PCI(vp));
+ if (err) {
+ printk(KERN_WARNING "%s: Could not enable device \n",
+ dev->name);
+ goto err_out;
+ }
}
/* Before initializing select the active media port. */
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
iowrite32(0x8000, vp->cb_fn_base + 4);
netif_start_queue (dev);
+err_out:
+ return err;
}
static int
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
&boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
- goto out;
+ goto err;
}
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
vp->rx_ring[i].status = 0; /* Clear complete bit. */
vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
- skb = dev_alloc_skb(PKT_BUF_SZ);
+
+ skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
+ GFP_KERNEL);
vp->rx_skbuff[i] = skb;
if (skb == NULL)
break; /* Bad news! */
- skb->dev = dev; /* Mark as being used by this device. */
- skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+
+ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
}
if (i != RX_RING_SIZE) {
}
}
retval = -ENOMEM;
- goto out_free_irq;
+ goto err_free_irq;
}
/* Wrap the ring. */
vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
}
- vortex_up(dev);
- return 0;
+ retval = vortex_up(dev);
+ if (!retval)
+ goto out;
-out_free_irq:
+err_free_irq:
free_irq(dev->irq, dev);
-out:
+err:
if (vortex_debug > 1)
printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", dev->name, retval);
+out:
return retval;
}
case XCVR_MII: case XCVR_NWAY:
{
ok = 1;
- spin_lock_bh(&vp->lock);
+ /* Interrupts are already disabled */
+ spin_lock(&vp->lock);
vortex_check_media(dev, 0);
- spin_unlock_bh(&vp->lock);
+ spin_unlock(&vp->lock);
}
break;
default: /* Other media types handled by Tx timeouts. */
issue_and_wait(dev, TxReset);
- vp->stats.tx_errors++;
+ dev->stats.tx_errors++;
if (vp->full_bus_master_tx) {
printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
} else {
- vp->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
netif_wake_queue(dev);
}
}
dump_tx_ring(dev);
}
- if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
- if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x14) dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
iowrite8(0, ioaddr + TxStatus);
if (tx_status & 0x30) { /* txJabber or txUnderrun */
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
dev->name, tx_status);
- if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
- if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
if (tx_status & 0x30) {
issue_and_wait(dev, TxReset);
}
} else {
printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
}
- /* vp->stats.tx_packets++; Counted below. */
+ /* dev->stats.tx_packets++; Counted below. */
dirty_tx++;
}
vp->dirty_tx = dirty_tx;
unsigned char rx_error = ioread8(ioaddr + RxErrors);
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
- vp->stats.rx_errors++;
- if (rx_error & 0x01) vp->stats.rx_over_errors++;
- if (rx_error & 0x02) vp->stats.rx_length_errors++;
- if (rx_error & 0x04) vp->stats.rx_frame_errors++;
- if (rx_error & 0x08) vp->stats.rx_crc_errors++;
- if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ if (rx_error & 0x01) dev->stats.rx_over_errors++;
+ if (rx_error & 0x02) dev->stats.rx_length_errors++;
+ if (rx_error & 0x04) dev->stats.rx_frame_errors++;
+ if (rx_error & 0x08) dev->stats.rx_crc_errors++;
+ if (rx_error & 0x10) dev->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- dev->last_rx = jiffies;
- vp->stats.rx_packets++;
+ dev->stats.rx_packets++;
/* Wait a limited time to go to next packet. */
for (i = 200; i >= 0; i--)
if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
} else if (vortex_debug > 0)
printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
"size %d.\n", dev->name, pkt_len);
- vp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
issue_and_wait(dev, RxDiscard);
}
unsigned char rx_error = rx_status >> 16;
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
- vp->stats.rx_errors++;
- if (rx_error & 0x01) vp->stats.rx_over_errors++;
- if (rx_error & 0x02) vp->stats.rx_length_errors++;
- if (rx_error & 0x04) vp->stats.rx_frame_errors++;
- if (rx_error & 0x08) vp->stats.rx_crc_errors++;
- if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ if (rx_error & 0x01) dev->stats.rx_over_errors++;
+ if (rx_error & 0x02) dev->stats.rx_length_errors++;
+ if (rx_error & 0x04) dev->stats.rx_frame_errors++;
+ if (rx_error & 0x08) dev->stats.rx_crc_errors++;
+ if (rx_error & 0x10) dev->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
/* Check if the packet is long enough to just accept without
copying to a properly sized skbuff. */
- if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
+ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
/* 'skb_put()' points to the start of sk_buff data area. */
}
}
netif_rx(skb);
- dev->last_rx = jiffies;
- vp->stats.rx_packets++;
+ dev->stats.rx_packets++;
}
entry = (++vp->cur_rx) % RX_RING_SIZE;
}
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb(PKT_BUF_SZ);
+ skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
if (skb == NULL) {
static unsigned long last_jif;
if (time_after(jiffies, last_jif + 10 * HZ)) {
mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
break; /* Bad news! */
}
- skb->dev = dev; /* Mark as being used by this device. */
- skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+
+ skb_reserve(skb, NET_IP_ALIGN);
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
vp->rx_skbuff[entry] = skb;
}
del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
- /* Turn off statistics ASAP. We update vp->stats below. */
+ /* Turn off statistics ASAP. We update dev->stats below. */
iowrite16(StatsDisable, ioaddr + EL3_CMD);
/* Disable the receiver and transmitter. */
update_stats(ioaddr, dev);
spin_unlock_irqrestore (&vp->lock, flags);
}
- return &vp->stats;
+ return &dev->stats;
}
/* Update statistics.
/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
- vp->stats.tx_carrier_errors += ioread8(ioaddr + 0);
- vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
- vp->stats.tx_window_errors += ioread8(ioaddr + 4);
- vp->stats.rx_fifo_errors += ioread8(ioaddr + 5);
- vp->stats.tx_packets += ioread8(ioaddr + 6);
- vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
+ dev->stats.tx_carrier_errors += ioread8(ioaddr + 0);
+ dev->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
+ dev->stats.tx_window_errors += ioread8(ioaddr + 4);
+ dev->stats.rx_fifo_errors += ioread8(ioaddr + 5);
+ dev->stats.tx_packets += ioread8(ioaddr + 6);
+ dev->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
/* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */
/* Don't bother with register 9, an extension of registers 6&7.
If we do use the 6&7 values the atomic update assumption above
is invalid. */
- vp->stats.rx_bytes += ioread16(ioaddr + 10);
- vp->stats.tx_bytes += ioread16(ioaddr + 12);
+ dev->stats.rx_bytes += ioread16(ioaddr + 10);
+ dev->stats.tx_bytes += ioread16(ioaddr + 12);
/* Extra stats for get_ethtool_stats() */
vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
vp->xstats.tx_single_collisions += ioread8(ioaddr + 3);
EL3WINDOW(4);
vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
- vp->stats.collisions = vp->xstats.tx_multiple_collisions
+ dev->stats.collisions = vp->xstats.tx_multiple_collisions
+ vp->xstats.tx_single_collisions
+ vp->xstats.tx_max_collisions;
{
u8 up = ioread8(ioaddr + 13);
- vp->stats.rx_bytes += (up & 0x0f) << 16;
- vp->stats.tx_bytes += (up & 0xf0) << 12;
+ dev->stats.rx_bytes += (up & 0x0f) << 16;
+ dev->stats.tx_bytes += (up & 0xf0) << 12;
}
EL3WINDOW(old_window >> 13);
vortex_debug = dbg;
}
-static int vortex_get_stats_count(struct net_device *dev)
+static int vortex_get_sset_count(struct net_device *dev, int sset)
{
- return VORTEX_NUM_STATS;
+ switch (sset) {
+ case ETH_SS_STATS:
+ return VORTEX_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
}
static void vortex_get_ethtool_stats(struct net_device *dev,
strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
} else {
if (VORTEX_EISA(vp))
- sprintf(info->bus_info, vp->gendev->bus_id);
+ strcpy(info->bus_info, dev_name(vp->gendev));
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
.get_msglevel = vortex_get_msglevel,
.set_msglevel = vortex_set_msglevel,
.get_ethtool_stats = vortex_get_ethtool_stats,
- .get_stats_count = vortex_get_stats_count,
+ .get_sset_count = vortex_get_sset_count,
.get_settings = vortex_get_settings,
.set_settings = vortex_set_settings,
.get_link = ethtool_op_get_link,
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
- int state = 0;
+ pci_power_t state = 0;
if(VORTEX_PCI(vp))
state = VORTEX_PCI(vp)->current_state;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
+ device_set_wakeup_enable(vp->gendev, vp->enable_wol);
+
if (vp->enable_wol) {
/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
EL3WINDOW(7);
iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
iowrite16(RxEnable, ioaddr + EL3_CMD);
- pci_enable_wake(VORTEX_PCI(vp), 0, 1);
+ if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) {
+ printk(KERN_INFO "%s: WOL not supported.\n",
+ pci_name(VORTEX_PCI(vp)));
+
+ vp->enable_wol = 0;
+ return;
+ }
/* Change the power state to D3; RxEnable doesn't take effect. */
pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
#endif
if (compaq_net_device) {
- vp = compaq_net_device->priv;
+ vp = netdev_priv(compaq_net_device);
ioaddr = ioport_map(compaq_net_device->base_addr,
VORTEX_TOTAL_SIZE);