X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Fpcnet32.c;h=9c171a7390e2b4ab09e5a77f3fee8c19732e05d3;hb=2a6f7ea1a0562abe6afbd2bbe01f6d300de12228;hp=5fcd4d94871d019812544afd0115f2f1296cb602;hpb=6dcd60c2c78ca87163730472ddea6aa1a7754b61;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 5fcd4d9..9c171a7 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -22,8 +22,12 @@ *************************************************************************/ #define DRV_NAME "pcnet32" -#define DRV_VERSION "1.32" -#define DRV_RELDATE "18.Mar.2006" +#ifdef CONFIG_PCNET32_NAPI +#define DRV_VERSION "1.33-NAPI" +#else +#define DRV_VERSION "1.33" +#endif +#define DRV_RELDATE "27.Jun.2006" #define PFX DRV_NAME ": " static const char *const version = @@ -185,10 +189,29 @@ static int homepna[MAX_UNITS]; #define PCNET32_TOTAL_SIZE 0x20 +#define CSR0 0 +#define CSR0_INIT 0x1 +#define CSR0_START 0x2 +#define CSR0_STOP 0x4 +#define CSR0_TXPOLL 0x8 +#define CSR0_INTEN 0x40 +#define CSR0_IDON 0x0100 +#define CSR0_NORMAL (CSR0_START | CSR0_INTEN) +#define PCNET32_INIT_LOW 1 +#define PCNET32_INIT_HIGH 2 +#define CSR3 3 +#define CSR4 4 +#define CSR5 5 +#define CSR5_SUSPEND 0x0001 +#define CSR15 15 +#define PCNET32_MC_FILTER 8 + +#define PCNET32_79C970A 0x2621 + /* The PCNET32 Rx and Tx ring descriptors. */ struct pcnet32_rx_head { u32 base; - s16 buf_length; + s16 buf_length; /* two`s complement of length */ s16 status; u32 msg_length; u32 reserved; @@ -196,7 +219,7 @@ struct pcnet32_rx_head { struct pcnet32_tx_head { u32 base; - s16 length; + s16 length; /* two`s complement of length */ s16 status; u32 misc; u32 reserved; @@ -230,12 +253,12 @@ struct pcnet32_access { * so the structure should be allocated using pci_alloc_consistent(). */ struct pcnet32_private { - struct pcnet32_init_block init_block; + struct pcnet32_init_block *init_block; /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ struct pcnet32_rx_head *rx_ring; struct pcnet32_tx_head *tx_ring; - dma_addr_t dma_addr;/* DMA address of beginning of this - object, returned by pci_alloc_consistent */ + dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, + returned by pci_alloc_consistent */ struct pci_dev *pci_dev; const char *name; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ @@ -272,6 +295,7 @@ struct pcnet32_private { /* each bit indicates an available PHY */ u32 phymask; + unsigned short chip_version; /* which variant this is */ }; static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); @@ -279,9 +303,8 @@ static int pcnet32_probe1(unsigned long, int, struct pci_dev *); static int pcnet32_open(struct net_device *); static int pcnet32_init_ring(struct net_device *); static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); -static int pcnet32_rx(struct net_device *); static void pcnet32_tx_timeout(struct net_device *dev); -static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *); +static irqreturn_t pcnet32_interrupt(int, void *); static int pcnet32_close(struct net_device *); static struct net_device_stats *pcnet32_get_stats(struct net_device *); static void pcnet32_load_multicast(struct net_device *dev); @@ -415,18 +438,250 @@ static struct pcnet32_access pcnet32_dwio = { .reset = pcnet32_dwio_reset }; +static void pcnet32_netif_stop(struct net_device *dev) +{ + dev->trans_start = jiffies; + netif_poll_disable(dev); + netif_tx_disable(dev); +} + +static void pcnet32_netif_start(struct net_device *dev) +{ + netif_wake_queue(dev); + netif_poll_enable(dev); +} + +/* + * Allocate space for the new sized tx ring. + * Free old resources + * Save new resources. + * Any failure keeps old resources. + * Must be called with lp->lock held. + */ +static void pcnet32_realloc_tx_ring(struct net_device *dev, + struct pcnet32_private *lp, + unsigned int size) +{ + dma_addr_t new_ring_dma_addr; + dma_addr_t *new_dma_addr_list; + struct pcnet32_tx_head *new_tx_ring; + struct sk_buff **new_skb_list; + + pcnet32_purge_tx_ring(dev); + + new_tx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + (1 << size), + &new_ring_dma_addr); + if (new_tx_ring == NULL) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Consistent memory allocation failed.\n", + dev->name); + return; + } + memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); + + new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), + GFP_ATOMIC); + if (!new_dma_addr_list) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Memory allocation failed.\n", dev->name); + goto free_new_tx_ring; + } + + new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!new_skb_list) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Memory allocation failed.\n", dev->name); + goto free_new_lists; + } + + kfree(lp->tx_skbuff); + kfree(lp->tx_dma_addr); + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, lp->tx_ring, + lp->tx_ring_dma_addr); + + lp->tx_ring_size = (1 << size); + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->tx_len_bits = (size << 12); + lp->tx_ring = new_tx_ring; + lp->tx_ring_dma_addr = new_ring_dma_addr; + lp->tx_dma_addr = new_dma_addr_list; + lp->tx_skbuff = new_skb_list; + return; + + free_new_lists: + kfree(new_dma_addr_list); + free_new_tx_ring: + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + (1 << size), + new_tx_ring, + new_ring_dma_addr); + return; +} + +/* + * Allocate space for the new sized rx ring. + * Re-use old receive buffers. + * alloc extra buffers + * free unneeded buffers + * free unneeded buffers + * Save new resources. + * Any failure keeps old resources. + * Must be called with lp->lock held. + */ +static void pcnet32_realloc_rx_ring(struct net_device *dev, + struct pcnet32_private *lp, + unsigned int size) +{ + dma_addr_t new_ring_dma_addr; + dma_addr_t *new_dma_addr_list; + struct pcnet32_rx_head *new_rx_ring; + struct sk_buff **new_skb_list; + int new, overlap; + + new_rx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + (1 << size), + &new_ring_dma_addr); + if (new_rx_ring == NULL) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Consistent memory allocation failed.\n", + dev->name); + return; + } + memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); + + new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), + GFP_ATOMIC); + if (!new_dma_addr_list) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Memory allocation failed.\n", dev->name); + goto free_new_rx_ring; + } + + new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!new_skb_list) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Memory allocation failed.\n", dev->name); + goto free_new_lists; + } + + /* first copy the current receive buffers */ + overlap = min(size, lp->rx_ring_size); + for (new = 0; new < overlap; new++) { + new_rx_ring[new] = lp->rx_ring[new]; + new_dma_addr_list[new] = lp->rx_dma_addr[new]; + new_skb_list[new] = lp->rx_skbuff[new]; + } + /* now allocate any new buffers needed */ + for (; new < size; new++ ) { + struct sk_buff *rx_skbuff; + new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ); + if (!(rx_skbuff = new_skb_list[new])) { + /* keep the original lists and buffers */ + if (netif_msg_drv(lp)) + printk(KERN_ERR + "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n", + dev->name); + goto free_all_new; + } + skb_reserve(rx_skbuff, 2); + + new_dma_addr_list[new] = + pci_map_single(lp->pci_dev, rx_skbuff->data, + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); + new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]); + new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); + new_rx_ring[new].status = le16_to_cpu(0x8000); + } + /* and free any unneeded buffers */ + for (; new < lp->rx_ring_size; new++) { + if (lp->rx_skbuff[new]) { + pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); + dev_kfree_skb(lp->rx_skbuff[new]); + } + } + + kfree(lp->rx_skbuff); + kfree(lp->rx_dma_addr); + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, lp->rx_ring, + lp->rx_ring_dma_addr); + + lp->rx_ring_size = (1 << size); + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->rx_len_bits = (size << 4); + lp->rx_ring = new_rx_ring; + lp->rx_ring_dma_addr = new_ring_dma_addr; + lp->rx_dma_addr = new_dma_addr_list; + lp->rx_skbuff = new_skb_list; + return; + + free_all_new: + for (; --new >= lp->rx_ring_size; ) { + if (new_skb_list[new]) { + pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); + dev_kfree_skb(new_skb_list[new]); + } + } + kfree(new_skb_list); + free_new_lists: + kfree(new_dma_addr_list); + free_new_rx_ring: + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + (1 << size), + new_rx_ring, + new_ring_dma_addr); + return; +} + +static void pcnet32_purge_rx_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int i; + + /* free all allocated skbuffs */ + for (i = 0; i < lp->rx_ring_size; i++) { + lp->rx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + if (lp->rx_skbuff[i]) { + pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(lp->rx_skbuff[i]); + } + lp->rx_skbuff[i] = NULL; + lp->rx_dma_addr[i] = 0; + } +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void pcnet32_poll_controller(struct net_device *dev) { disable_irq(dev->irq); - pcnet32_interrupt(0, dev, NULL); + pcnet32_interrupt(0, dev); enable_irq(dev->irq); } #endif static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r = -EOPNOTSUPP; @@ -441,7 +696,7 @@ static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r = -EOPNOTSUPP; @@ -456,7 +711,7 @@ static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); @@ -468,16 +723,18 @@ static void pcnet32_get_drvinfo(struct net_device *dev, static u32 pcnet32_get_link(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r; spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { r = mii_link_ok(&lp->mii_if); - } else { + } else if (lp->chip_version >= PCNET32_79C970A) { ulong ioaddr = dev->base_addr; /* card base I/O address */ r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); + } else { /* can not detect link on really old chips */ + r = 1; } spin_unlock_irqrestore(&lp->lock, flags); @@ -486,19 +743,19 @@ static u32 pcnet32_get_link(struct net_device *dev) static u32 pcnet32_get_msglevel(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); return lp->msg_enable; } static void pcnet32_set_msglevel(struct net_device *dev, u32 value) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); lp->msg_enable = value; } static int pcnet32_nway_reset(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r = -EOPNOTSUPP; @@ -513,7 +770,7 @@ static int pcnet32_nway_reset(struct net_device *dev) static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); ering->tx_max_pending = TX_MAX_RING_SIZE; ering->tx_pending = lp->tx_ring_size; @@ -524,58 +781,55 @@ static void pcnet32_get_ringparam(struct net_device *dev, static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; + unsigned int size; + ulong ioaddr = dev->base_addr; int i; if (ering->rx_mini_pending || ering->rx_jumbo_pending) return -EINVAL; if (netif_running(dev)) - pcnet32_close(dev); + pcnet32_netif_stop(dev); spin_lock_irqsave(&lp->lock, flags); - pcnet32_free_ring(dev); - lp->tx_ring_size = - min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); - lp->rx_ring_size = - min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); /* set the minimum ring size to 4, to allow the loopback test to work * unchanged. */ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { - if (lp->tx_ring_size <= (1 << i)) + if (size <= (1 << i)) break; } - lp->tx_ring_size = (1 << i); - lp->tx_mod_mask = lp->tx_ring_size - 1; - lp->tx_len_bits = (i << 12); + if ((1 << i) != lp->tx_ring_size) + pcnet32_realloc_tx_ring(dev, lp, i); + size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { - if (lp->rx_ring_size <= (1 << i)) + if (size <= (1 << i)) break; } - lp->rx_ring_size = (1 << i); - lp->rx_mod_mask = lp->rx_ring_size - 1; - lp->rx_len_bits = (i << 4); + if ((1 << i) != lp->rx_ring_size) + pcnet32_realloc_rx_ring(dev, lp, i); - if (pcnet32_alloc_ring(dev, dev->name)) { - pcnet32_free_ring(dev); - spin_unlock_irqrestore(&lp->lock, flags); - return -ENOMEM; + dev->weight = lp->rx_ring_size / 2; + + if (netif_running(dev)) { + pcnet32_netif_start(dev); + pcnet32_restart(dev, CSR0_NORMAL); } spin_unlock_irqrestore(&lp->lock, flags); - if (pcnet32_debug & NETIF_MSG_DRV) - printk(KERN_INFO PFX + if (netif_msg_drv(lp)) + printk(KERN_INFO "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, lp->rx_ring_size, lp->tx_ring_size); - if (netif_running(dev)) - pcnet32_open(dev); - return 0; } @@ -593,7 +847,7 @@ static int pcnet32_self_test_count(struct net_device *dev) static void pcnet32_ethtool_test(struct net_device *dev, struct ethtool_test *test, u64 * data) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); int rc; if (test->flags == ETH_TEST_FL_OFFLINE) { @@ -614,7 +868,7 @@ static void pcnet32_ethtool_test(struct net_device *dev, static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); struct pcnet32_access *a = &lp->a; /* access to registers */ ulong ioaddr = dev->base_addr; /* card base I/O address */ struct sk_buff *skb; /* sk buff */ @@ -629,29 +883,31 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) unsigned long flags; unsigned long ticks; - *data1 = 1; /* status of test, default to fail */ rc = 1; /* default to fail */ if (netif_running(dev)) +#ifdef CONFIG_PCNET32_NAPI + pcnet32_netif_stop(dev); +#else pcnet32_close(dev); +#endif spin_lock_irqsave(&lp->lock, flags); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); /* Reset the PCNET32 */ lp->a.reset(ioaddr); + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ /* switch pcnet32 to 32bit mode */ lp->a.write_bcr(ioaddr, 20, 2); - lp->init_block.mode = - le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); - lp->init_block.filter[0] = 0; - lp->init_block.filter[1] = 0; - /* purge & init rings but don't actually restart */ pcnet32_restart(dev, 0x0000); - lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ /* Initialize Transmit buffers. */ size = data_len + 15; @@ -693,14 +949,15 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) } } - x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ - x = x | 0x0002; - a->write_bcr(ioaddr, 32, x); + x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */ + a->write_bcr(ioaddr, 32, x | 0x0002); - lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ + /* set int loopback in CSR15 */ + x = a->read_csr(ioaddr, CSR15) & 0xfffc; + lp->a.write_csr(ioaddr, CSR15, x | 0x0044); teststatus = le16_to_cpu(0x8000); - lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ + lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ /* Check status of descriptors */ for (x = 0; x < numbuffs; x++) { @@ -708,7 +965,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) rmb(); while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { spin_unlock_irqrestore(&lp->lock, flags); - mdelay(1); + msleep(1); spin_lock_irqsave(&lp->lock, flags); rmb(); ticks++; @@ -721,7 +978,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) } } - lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ wmb(); if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); @@ -754,33 +1011,43 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) } x++; } - if (!rc) { - *data1 = 0; - } clean_up: + *data1 = rc; pcnet32_purge_tx_ring(dev); - x = a->read_csr(ioaddr, 15) & 0xFFFF; - a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ + + x = a->read_csr(ioaddr, CSR15); + a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ - x = x & ~0x0002; - a->write_bcr(ioaddr, 32, x); + a->write_bcr(ioaddr, 32, (x & ~0x0002)); +#ifdef CONFIG_PCNET32_NAPI + if (netif_running(dev)) { + pcnet32_netif_start(dev); + pcnet32_restart(dev, CSR0_NORMAL); + } else { + pcnet32_purge_rx_ring(dev); + lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ + } spin_unlock_irqrestore(&lp->lock, flags); - +#else if (netif_running(dev)) { + spin_unlock_irqrestore(&lp->lock, flags); pcnet32_open(dev); } else { + pcnet32_purge_rx_ring(dev); lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ + spin_unlock_irqrestore(&lp->lock, flags); } +#endif return (rc); } /* end pcnet32_loopback_test */ static void pcnet32_led_blink_callback(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); struct pcnet32_access *a = &lp->a; ulong ioaddr = dev->base_addr; unsigned long flags; @@ -797,7 +1064,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev) static int pcnet32_phys_id(struct net_device *dev, u32 data) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); struct pcnet32_access *a = &lp->a; ulong ioaddr = dev->base_addr; unsigned long flags; @@ -835,11 +1102,333 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data) return 0; } +/* + * lp->lock must be held. + */ +static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, + int can_sleep) +{ + int csr5; + struct pcnet32_private *lp = netdev_priv(dev); + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + int ticks; + + /* really old chips have to be stopped. */ + if (lp->chip_version < PCNET32_79C970A) + return 0; + + /* set SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); + + /* poll waiting for bit to be set */ + ticks = 0; + while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { + spin_unlock_irqrestore(&lp->lock, *flags); + if (can_sleep) + msleep(1); + else + mdelay(1); + spin_lock_irqsave(&lp->lock, *flags); + ticks++; + if (ticks > 200) { + if (netif_msg_hw(lp)) + printk(KERN_DEBUG + "%s: Error getting into suspend!\n", + dev->name); + return 0; + } + } + return 1; +} + +/* + * process one receive descriptor entry + */ + +static void pcnet32_rx_entry(struct net_device *dev, + struct pcnet32_private *lp, + struct pcnet32_rx_head *rxp, + int entry) +{ + int status = (short)le16_to_cpu(rxp->status) >> 8; + int rx_in_place = 0; + struct sk_buff *skb; + short pkt_len; + + if (status != 0x03) { /* There was an error. */ + /* + * There is a tricky error noted by John Murphy, + * to Russ Nelson: Even with full-sized + * buffers it's possible for a jabber packet to use two + * buffers, with only the last correctly noting the error. + */ + if (status & 0x01) /* Only count a general error at the */ + lp->stats.rx_errors++; /* end of a packet. */ + if (status & 0x20) + lp->stats.rx_frame_errors++; + if (status & 0x10) + lp->stats.rx_over_errors++; + if (status & 0x08) + lp->stats.rx_crc_errors++; + if (status & 0x04) + lp->stats.rx_fifo_errors++; + return; + } + + pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; + + /* Discard oversize frames. */ + if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { + if (netif_msg_drv(lp)) + printk(KERN_ERR "%s: Impossible packet size %d!\n", + dev->name, pkt_len); + lp->stats.rx_errors++; + return; + } + if (pkt_len < 60) { + if (netif_msg_rx_err(lp)) + printk(KERN_ERR "%s: Runt packet!\n", dev->name); + lp->stats.rx_errors++; + return; + } + + if (pkt_len > rx_copybreak) { + struct sk_buff *newskb; + + if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) { + skb_reserve(newskb, 2); + skb = lp->rx_skbuff[entry]; + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[entry], + PKT_BUF_SZ - 2, + PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[entry] = newskb; + lp->rx_dma_addr[entry] = + pci_map_single(lp->pci_dev, + newskb->data, + PKT_BUF_SZ - 2, + PCI_DMA_FROMDEVICE); + rxp->base = le32_to_cpu(lp->rx_dma_addr[entry]); + rx_in_place = 1; + } else + skb = NULL; + } else { + skb = dev_alloc_skb(pkt_len + 2); + } + + if (skb == NULL) { + if (netif_msg_drv(lp)) + printk(KERN_ERR + "%s: Memory squeeze, dropping packet.\n", + dev->name); + lp->stats.rx_dropped++; + return; + } + skb->dev = dev; + if (!rx_in_place) { + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, pkt_len); /* Make room */ + pci_dma_sync_single_for_cpu(lp->pci_dev, + lp->rx_dma_addr[entry], + pkt_len, + PCI_DMA_FROMDEVICE); + eth_copy_and_sum(skb, + (unsigned char *)(lp->rx_skbuff[entry]->data), + pkt_len, 0); + pci_dma_sync_single_for_device(lp->pci_dev, + lp->rx_dma_addr[entry], + pkt_len, + PCI_DMA_FROMDEVICE); + } + lp->stats.rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, dev); +#ifdef CONFIG_PCNET32_NAPI + netif_receive_skb(skb); +#else + netif_rx(skb); +#endif + dev->last_rx = jiffies; + lp->stats.rx_packets++; + return; +} + +static int pcnet32_rx(struct net_device *dev, int quota) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int entry = lp->cur_rx & lp->rx_mod_mask; + struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; + int npackets = 0; + + /* If we own the next entry, it's a new packet. Send it up. */ + while (quota > npackets && (short)le16_to_cpu(rxp->status) >= 0) { + pcnet32_rx_entry(dev, lp, rxp, entry); + npackets += 1; + /* + * The docs say that the buffer length isn't touched, but Andrew + * Boyd of QNX reports that some revs of the 79C965 clear it. + */ + rxp->buf_length = le16_to_cpu(2 - PKT_BUF_SZ); + wmb(); /* Make sure owner changes after others are visible */ + rxp->status = le16_to_cpu(0x8000); + entry = (++lp->cur_rx) & lp->rx_mod_mask; + rxp = &lp->rx_ring[entry]; + } + + return npackets; +} + +static int pcnet32_tx(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned int dirty_tx = lp->dirty_tx; + int delta; + int must_restart = 0; + + while (dirty_tx != lp->cur_tx) { + int entry = dirty_tx & lp->tx_mod_mask; + int status = (short)le16_to_cpu(lp->tx_ring[entry].status); + + if (status < 0) + break; /* It still hasn't been Txed */ + + lp->tx_ring[entry].base = 0; + + if (status & 0x4000) { + /* There was a major error, log it. */ + int err_status = le32_to_cpu(lp->tx_ring[entry].misc); + lp->stats.tx_errors++; + if (netif_msg_tx_err(lp)) + printk(KERN_ERR + "%s: Tx error status=%04x err_status=%08x\n", + dev->name, status, + err_status); + if (err_status & 0x04000000) + lp->stats.tx_aborted_errors++; + if (err_status & 0x08000000) + lp->stats.tx_carrier_errors++; + if (err_status & 0x10000000) + lp->stats.tx_window_errors++; +#ifndef DO_DXSUFLO + if (err_status & 0x40000000) { + lp->stats.tx_fifo_errors++; + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + if (netif_msg_tx_err(lp)) + printk(KERN_ERR + "%s: Tx FIFO error!\n", + dev->name); + must_restart = 1; + } +#else + if (err_status & 0x40000000) { + lp->stats.tx_fifo_errors++; + if (!lp->dxsuflo) { /* If controller doesn't recover ... */ + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + if (netif_msg_tx_err(lp)) + printk(KERN_ERR + "%s: Tx FIFO error!\n", + dev->name); + must_restart = 1; + } + } +#endif + } else { + if (status & 0x1800) + lp->stats.collisions++; + lp->stats.tx_packets++; + } + + /* We must free the original skb */ + if (lp->tx_skbuff[entry]) { + pci_unmap_single(lp->pci_dev, + lp->tx_dma_addr[entry], + lp->tx_skbuff[entry]-> + len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(lp->tx_skbuff[entry]); + lp->tx_skbuff[entry] = NULL; + lp->tx_dma_addr[entry] = 0; + } + dirty_tx++; + } + + delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); + if (delta > lp->tx_ring_size) { + if (netif_msg_drv(lp)) + printk(KERN_ERR + "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", + dev->name, dirty_tx, lp->cur_tx, + lp->tx_full); + dirty_tx += lp->tx_ring_size; + delta -= lp->tx_ring_size; + } + + if (lp->tx_full && + netif_queue_stopped(dev) && + delta < lp->tx_ring_size - 2) { + /* The ring is no longer full, clear tbusy. */ + lp->tx_full = 0; + netif_wake_queue(dev); + } + lp->dirty_tx = dirty_tx; + + return must_restart; +} + +#ifdef CONFIG_PCNET32_NAPI +static int pcnet32_poll(struct net_device *dev, int *budget) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int quota = min(dev->quota, *budget); + unsigned long ioaddr = dev->base_addr; + unsigned long flags; + u16 val; + + quota = pcnet32_rx(dev, quota); + + spin_lock_irqsave(&lp->lock, flags); + if (pcnet32_tx(dev)) { + /* reset the chip to clear the error condition, then restart */ + lp->a.reset(ioaddr); + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + pcnet32_restart(dev, CSR0_START); + netif_wake_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + + *budget -= quota; + dev->quota -= quota; + + if (dev->quota == 0) { + return 1; + } + + netif_rx_complete(dev); + + spin_lock_irqsave(&lp->lock, flags); + + /* clear interrupt masks */ + val = lp->a.read_csr(ioaddr, CSR3); + val &= 0x00ff; + lp->a.write_csr(ioaddr, CSR3, val); + + /* Set interrupt enable. */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); + mmiowb(); + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; +} +#endif + #define PCNET32_REGS_PER_PHY 32 #define PCNET32_MAX_PHYS 32 static int pcnet32_get_regs_len(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); int j = lp->phycount * PCNET32_REGS_PER_PHY; return ((PCNET32_NUM_REGS + j) * sizeof(u16)); @@ -850,35 +1439,16 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, { int i, csr0; u16 *buff = ptr; - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); struct pcnet32_access *a = &lp->a; ulong ioaddr = dev->base_addr; - int ticks; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); - csr0 = a->read_csr(ioaddr, 0); - if (!(csr0 & 0x0004)) { /* If not stopped */ - /* set SUSPEND (SPND) - CSR5 bit 0 */ - a->write_csr(ioaddr, 5, 0x0001); - - /* poll waiting for bit to be set */ - ticks = 0; - while (!(a->read_csr(ioaddr, 5) & 0x0001)) { - spin_unlock_irqrestore(&lp->lock, flags); - mdelay(1); - spin_lock_irqsave(&lp->lock, flags); - ticks++; - if (ticks > 200) { - if (netif_msg_hw(lp)) - printk(KERN_DEBUG - "%s: Error getting into suspend!\n", - dev->name); - break; - } - } - } + csr0 = a->read_csr(ioaddr, CSR0); + if (!(csr0 & CSR0_STOP)) /* If not stopped */ + pcnet32_suspend(dev, &flags, 1); /* read address PROM */ for (i = 0; i < 16; i += 2) @@ -915,15 +1485,18 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, } } - if (!(csr0 & 0x0004)) { /* If not stopped */ + if (!(csr0 & CSR0_STOP)) { /* If not stopped */ + int csr5; + /* clear SUSPEND (SPND) - CSR5 bit 0 */ - a->write_csr(ioaddr, 5, 0x0000); + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); } spin_unlock_irqrestore(&lp->lock, flags); } -static struct ethtool_ops pcnet32_ethtool_ops = { +static const struct ethtool_ops pcnet32_ethtool_ops = { .get_settings = pcnet32_get_settings, .set_settings = pcnet32_set_settings, .get_drvinfo = pcnet32_get_drvinfo, @@ -1019,7 +1592,6 @@ static int __devinit pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) { struct pcnet32_private *lp; - dma_addr_t lp_dma_addr; int i, media; int fdx, mii, fset, dxsuflo; int chip_version; @@ -1141,7 +1713,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) dxsuflo = 1; } - dev = alloc_etherdev(0); + dev = alloc_etherdev(sizeof(*lp)); if (!dev) { if (pcnet32_debug & NETIF_MSG_PROBE) printk(KERN_ERR PFX "Memory allocation failed.\n"); @@ -1232,25 +1804,22 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } dev->base_addr = ioaddr; + lp = netdev_priv(dev); /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ - if ((lp = - pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) { + if ((lp->init_block = + pci_alloc_consistent(pdev, sizeof(*lp->init_block), &lp->init_dma_addr)) == NULL) { if (pcnet32_debug & NETIF_MSG_PROBE) printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); ret = -ENOMEM; goto err_free_netdev; } - - memset(lp, 0, sizeof(*lp)); - lp->dma_addr = lp_dma_addr; lp->pci_dev = pdev; spin_lock_init(&lp->lock); SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); - dev->priv = lp; lp->name = chipname; lp->shared_irq = shared; lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ @@ -1264,6 +1833,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->mii_if.reg_num_mask = 0x1f; lp->dxsuflo = dxsuflo; lp->mii = mii; + lp->chip_version = chip_version; lp->msg_enable = pcnet32_debug; if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping))) @@ -1296,23 +1866,21 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) && dev->dev_addr[2] == 0x75) lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; - lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ - lp->init_block.tlen_rlen = + lp->init_block->mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ + lp->init_block->tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) - lp->init_block.phys_addr[i] = dev->dev_addr[i]; - lp->init_block.filter[0] = 0x00000000; - lp->init_block.filter[1] = 0x00000000; - lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); - lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); + lp->init_block->phys_addr[i] = dev->dev_addr[i]; + lp->init_block->filter[0] = 0x00000000; + lp->init_block->filter[1] = 0x00000000; + lp->init_block->rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); /* switch pcnet32 to 32bit mode */ a->write_bcr(ioaddr, 20, 2); - a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, - init_block)) & 0xffff); - a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, - init_block)) >> 16); + a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); + a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); if (pdev) { /* use the IRQ provided by PCI */ dev->irq = pdev->irq; @@ -1327,7 +1895,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) * boards will work. */ /* Trigger an initialization just for the interrupt. */ - a->write_csr(ioaddr, 0, 0x41); + a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT); mdelay(1); dev->irq = probe_irq_off(irq_mask); @@ -1386,6 +1954,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) dev->ethtool_ops = &pcnet32_ethtool_ops; dev->tx_timeout = pcnet32_tx_timeout; dev->watchdog_timeo = (5 * HZ); + dev->weight = lp->rx_ring_size / 2; +#ifdef CONFIG_PCNET32_NAPI + dev->poll = pcnet32_poll; +#endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = pcnet32_poll_controller; @@ -1414,7 +1986,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) err_free_ring: pcnet32_free_ring(dev); err_free_consistent: - pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); err_free_netdev: free_netdev(dev); err_release_region: @@ -1425,14 +1998,14 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) /* if any allocation fails, caller must also call pcnet32_free_ring */ static int pcnet32_alloc_ring(struct net_device *dev, char *name) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, &lp->tx_ring_dma_addr); if (lp->tx_ring == NULL) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", name); @@ -1444,59 +2017,55 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name) lp->rx_ring_size, &lp->rx_ring_dma_addr); if (lp->rx_ring == NULL) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", name); return -ENOMEM; } - lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, + lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), GFP_ATOMIC); if (!lp->tx_dma_addr) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } - memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); - lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, + lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), GFP_ATOMIC); if (!lp->rx_dma_addr) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } - memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); - lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, + lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), GFP_ATOMIC); if (!lp->tx_skbuff) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } - memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); - lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, + lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), GFP_ATOMIC); if (!lp->rx_skbuff) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } - memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); return 0; } static void pcnet32_free_ring(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); kfree(lp->tx_skbuff); lp->tx_skbuff = NULL; @@ -1529,7 +2098,7 @@ static void pcnet32_free_ring(struct net_device *dev) static int pcnet32_open(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; u16 val; int i; @@ -1560,8 +2129,7 @@ static int pcnet32_open(struct net_device *dev) "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr), (u32) (lp->rx_ring_dma_addr), - (u32) (lp->dma_addr + - offsetof(struct pcnet32_private, init_block))); + (u32) (lp->init_dma_addr)); /* set/reset autoselect bit */ val = lp->a.read_bcr(ioaddr, 2) & ~2; @@ -1578,10 +2146,7 @@ static int pcnet32_open(struct net_device *dev) val |= 2; } else if (lp->options & PCNET32_PORT_ASEL) { /* workaround of xSeries250, turn on for 79C975 only */ - i = ((lp->a.read_csr(ioaddr, 88) | - (lp->a. - read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff; - if (i == 0x2627) + if (lp->chip_version == 0x2627) val |= 3; } lp->a.write_bcr(ioaddr, 9, val); @@ -1697,13 +2262,13 @@ static int pcnet32_open(struct net_device *dev) #ifdef DO_DXSUFLO if (lp->dxsuflo) { /* Disable transmit stop on underflow */ - val = lp->a.read_csr(ioaddr, 3); + val = lp->a.read_csr(ioaddr, CSR3); val |= 0x40; - lp->a.write_csr(ioaddr, 3, val); + lp->a.write_csr(ioaddr, CSR3, val); } #endif - lp->init_block.mode = + lp->init_block->mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); pcnet32_load_multicast(dev); @@ -1713,39 +2278,36 @@ static int pcnet32_open(struct net_device *dev) } /* Re-initialize the PCNET32, and start it when done. */ - lp->a.write_csr(ioaddr, 1, (lp->dma_addr + - offsetof(struct pcnet32_private, - init_block)) & 0xffff); - lp->a.write_csr(ioaddr, 2, - (lp->dma_addr + - offsetof(struct pcnet32_private, init_block)) >> 16); + lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); + lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); - lp->a.write_csr(ioaddr, 4, 0x0915); - lp->a.write_csr(ioaddr, 0, 0x0001); + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); netif_start_queue(dev); - /* Print the link status and start the watchdog */ - pcnet32_check_media(dev, 1); - mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); + if (lp->chip_version >= PCNET32_79C970A) { + /* Print the link status and start the watchdog */ + pcnet32_check_media(dev, 1); + mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); + } i = 0; while (i++ < 100) - if (lp->a.read_csr(ioaddr, 0) & 0x0100) + if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) break; /* * We used to clear the InitDone bit, 0x0100, here but Mark Stockton * reports that doing so triggers a bug in the '974. */ - lp->a.write_csr(ioaddr, 0, 0x0042); + lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL); if (netif_msg_ifup(lp)) printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", dev->name, i, - (u32) (lp->dma_addr + - offsetof(struct pcnet32_private, init_block)), - lp->a.read_csr(ioaddr, 0)); + (u32) (lp->init_dma_addr), + lp->a.read_csr(ioaddr, CSR0)); spin_unlock_irqrestore(&lp->lock, flags); @@ -1753,16 +2315,7 @@ static int pcnet32_open(struct net_device *dev) err_free_ring: /* free any allocated skbuffs */ - for (i = 0; i < lp->rx_ring_size; i++) { - lp->rx_ring[i].status = 0; - if (lp->rx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); - dev_kfree_skb(lp->rx_skbuff[i]); - } - lp->rx_skbuff[i] = NULL; - lp->rx_dma_addr[i] = 0; - } + pcnet32_purge_rx_ring(dev); /* * Switch back to 16bit mode to avoid problems with dumb @@ -1791,7 +2344,7 @@ static int pcnet32_open(struct net_device *dev) static void pcnet32_purge_tx_ring(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); int i; for (i = 0; i < lp->tx_ring_size; i++) { @@ -1811,7 +2364,7 @@ static void pcnet32_purge_tx_ring(struct net_device *dev) /* Initialize the PCNET32 Rx and Tx rings. */ static int pcnet32_init_ring(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); int i; lp->tx_full = 0; @@ -1825,7 +2378,7 @@ static int pcnet32_init_ring(struct net_device *dev) (rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ))) { /* there is not much, we can do at this point */ - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n", dev->name); @@ -1853,12 +2406,12 @@ static int pcnet32_init_ring(struct net_device *dev) lp->tx_dma_addr[i] = 0; } - lp->init_block.tlen_rlen = + lp->init_block->tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) - lp->init_block.phys_addr[i] = dev->dev_addr[i]; - lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); - lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); + lp->init_block->phys_addr[i] = dev->dev_addr[i]; + lp->init_block->rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); wmb(); /* Make sure all changes are visible */ return 0; } @@ -1869,13 +2422,13 @@ static int pcnet32_init_ring(struct net_device *dev) */ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; int i; /* wait for stop */ for (i = 0; i < 100; i++) - if (lp->a.read_csr(ioaddr, 0) & 0x0004) + if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP) break; if (i >= 100 && netif_msg_drv(lp)) @@ -1888,18 +2441,18 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) return; /* ReInit Ring */ - lp->a.write_csr(ioaddr, 0, 1); + lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); i = 0; while (i++ < 1000) - if (lp->a.read_csr(ioaddr, 0) & 0x0100) + if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) break; - lp->a.write_csr(ioaddr, 0, csr0_bits); + lp->a.write_csr(ioaddr, CSR0, csr0_bits); } static void pcnet32_tx_timeout(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr, flags; spin_lock_irqsave(&lp->lock, flags); @@ -1907,8 +2460,8 @@ static void pcnet32_tx_timeout(struct net_device *dev) if (pcnet32_debug & NETIF_MSG_DRV) printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n", - dev->name, lp->a.read_csr(ioaddr, 0)); - lp->a.write_csr(ioaddr, 0, 0x0004); + dev->name, lp->a.read_csr(ioaddr, CSR0)); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); lp->stats.tx_errors++; if (netif_msg_tx_err(lp)) { int i; @@ -1930,7 +2483,7 @@ static void pcnet32_tx_timeout(struct net_device *dev) le16_to_cpu(lp->tx_ring[i].status)); printk("\n"); } - pcnet32_restart(dev, 0x0042); + pcnet32_restart(dev, CSR0_NORMAL); dev->trans_start = jiffies; netif_wake_queue(dev); @@ -1940,7 +2493,7 @@ static void pcnet32_tx_timeout(struct net_device *dev) static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; u16 status; int entry; @@ -1951,7 +2504,7 @@ static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) if (netif_msg_tx_queued(lp)) { printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", - dev->name, lp->a.read_csr(ioaddr, 0)); + dev->name, lp->a.read_csr(ioaddr, CSR0)); } /* Default status -- will not enable Successful-TxDone @@ -1982,7 +2535,7 @@ static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) lp->stats.tx_bytes += skb->len; /* Trigger an immediate send poll. */ - lp->a.write_csr(ioaddr, 0, 0x0048); + lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); dev->trans_start = jiffies; @@ -1996,162 +2549,47 @@ static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) /* The PCNET32 interrupt handler. */ static irqreturn_t -pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs) +pcnet32_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct pcnet32_private *lp; unsigned long ioaddr; - u16 csr0, rap; + u16 csr0; int boguscnt = max_interrupt_work; - int must_restart; - - if (!dev) { - if (pcnet32_debug & NETIF_MSG_INTR) - printk(KERN_DEBUG "%s(): irq %d for unknown device\n", - __FUNCTION__, irq); - return IRQ_NONE; - } ioaddr = dev->base_addr; - lp = dev->priv; + lp = netdev_priv(dev); spin_lock(&lp->lock); - rap = lp->a.read_rap(ioaddr); - while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) { + csr0 = lp->a.read_csr(ioaddr, CSR0); + while ((csr0 & 0x8f00) && --boguscnt >= 0) { if (csr0 == 0xffff) { break; /* PCMCIA remove happened */ } /* Acknowledge all of the current interrupt sources ASAP. */ - lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f); - - must_restart = 0; + lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f); if (netif_msg_intr(lp)) printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", - dev->name, csr0, lp->a.read_csr(ioaddr, 0)); - - if (csr0 & 0x0400) /* Rx interrupt */ - pcnet32_rx(dev); - - if (csr0 & 0x0200) { /* Tx-done interrupt */ - unsigned int dirty_tx = lp->dirty_tx; - int delta; - - while (dirty_tx != lp->cur_tx) { - int entry = dirty_tx & lp->tx_mod_mask; - int status = - (short)le16_to_cpu(lp->tx_ring[entry]. - status); - - if (status < 0) - break; /* It still hasn't been Txed */ - - lp->tx_ring[entry].base = 0; - - if (status & 0x4000) { - /* There was an major error, log it. */ - int err_status = - le32_to_cpu(lp->tx_ring[entry]. - misc); - lp->stats.tx_errors++; - if (netif_msg_tx_err(lp)) - printk(KERN_ERR - "%s: Tx error status=%04x err_status=%08x\n", - dev->name, status, - err_status); - if (err_status & 0x04000000) - lp->stats.tx_aborted_errors++; - if (err_status & 0x08000000) - lp->stats.tx_carrier_errors++; - if (err_status & 0x10000000) - lp->stats.tx_window_errors++; -#ifndef DO_DXSUFLO - if (err_status & 0x40000000) { - lp->stats.tx_fifo_errors++; - /* Ackk! On FIFO errors the Tx unit is turned off! */ - /* Remove this verbosity later! */ - if (netif_msg_tx_err(lp)) - printk(KERN_ERR - "%s: Tx FIFO error! CSR0=%4.4x\n", - dev->name, csr0); - must_restart = 1; - } -#else - if (err_status & 0x40000000) { - lp->stats.tx_fifo_errors++; - if (!lp->dxsuflo) { /* If controller doesn't recover ... */ - /* Ackk! On FIFO errors the Tx unit is turned off! */ - /* Remove this verbosity later! */ - if (netif_msg_tx_err - (lp)) - printk(KERN_ERR - "%s: Tx FIFO error! CSR0=%4.4x\n", - dev-> - name, - csr0); - must_restart = 1; - } - } -#endif - } else { - if (status & 0x1800) - lp->stats.collisions++; - lp->stats.tx_packets++; - } - - /* We must free the original skb */ - if (lp->tx_skbuff[entry]) { - pci_unmap_single(lp->pci_dev, - lp->tx_dma_addr[entry], - lp->tx_skbuff[entry]-> - len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(lp->tx_skbuff[entry]); - lp->tx_skbuff[entry] = NULL; - lp->tx_dma_addr[entry] = 0; - } - dirty_tx++; - } - - delta = - (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + - lp->tx_ring_size); - if (delta > lp->tx_ring_size) { - if (netif_msg_drv(lp)) - printk(KERN_ERR - "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", - dev->name, dirty_tx, lp->cur_tx, - lp->tx_full); - dirty_tx += lp->tx_ring_size; - delta -= lp->tx_ring_size; - } - - if (lp->tx_full && - netif_queue_stopped(dev) && - delta < lp->tx_ring_size - 2) { - /* The ring is no longer full, clear tbusy. */ - lp->tx_full = 0; - netif_wake_queue(dev); - } - lp->dirty_tx = dirty_tx; - } + dev->name, csr0, lp->a.read_csr(ioaddr, CSR0)); /* Log misc errors. */ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */ if (csr0 & 0x1000) { /* - * this happens when our receive ring is full. This shouldn't - * be a problem as we will see normal rx interrupts for the frames - * in the receive ring. But there are some PCI chipsets (I can - * reproduce this on SP3G with Intel saturn chipset) which have - * sometimes problems and will fill up the receive ring with - * error descriptors. In this situation we don't get a rx - * interrupt, but a missed frame interrupt sooner or later. - * So we try to clean up our receive ring here. + * This happens when our receive ring is full. This + * shouldn't be a problem as we will see normal rx + * interrupts for the frames in the receive ring. But + * there are some PCI chipsets (I can reproduce this + * on SP3G with Intel saturn chipset) which have + * sometimes problems and will fill up the receive + * ring with error descriptors. In this situation we + * don't get a rx interrupt, but a missed frame + * interrupt sooner or later. */ - pcnet32_rx(dev); lp->stats.rx_errors++; /* Missed a Rx frame. */ } if (csr0 & 0x0800) { @@ -2161,190 +2599,48 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs) dev->name, csr0); /* unlike for the lance, there is no restart needed */ } - - if (must_restart) { +#ifdef CONFIG_PCNET32_NAPI + if (netif_rx_schedule_prep(dev)) { + u16 val; + /* set interrupt masks */ + val = lp->a.read_csr(ioaddr, CSR3); + val |= 0x5f00; + lp->a.write_csr(ioaddr, CSR3, val); + mmiowb(); + __netif_rx_schedule(dev); + break; + } +#else + pcnet32_rx(dev, dev->weight); + if (pcnet32_tx(dev)) { /* reset the chip to clear the error condition, then restart */ lp->a.reset(ioaddr); - lp->a.write_csr(ioaddr, 4, 0x0915); - pcnet32_restart(dev, 0x0002); + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + pcnet32_restart(dev, CSR0_START); netif_wake_queue(dev); } +#endif + csr0 = lp->a.read_csr(ioaddr, CSR0); } +#ifndef CONFIG_PCNET32_NAPI /* Set interrupt enable. */ - lp->a.write_csr(ioaddr, 0, 0x0040); - lp->a.write_rap(ioaddr, rap); + lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); +#endif if (netif_msg_intr(lp)) printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", - dev->name, lp->a.read_csr(ioaddr, 0)); + dev->name, lp->a.read_csr(ioaddr, CSR0)); spin_unlock(&lp->lock); return IRQ_HANDLED; } -static int pcnet32_rx(struct net_device *dev) -{ - struct pcnet32_private *lp = dev->priv; - int entry = lp->cur_rx & lp->rx_mod_mask; - int boguscnt = lp->rx_ring_size / 2; - - /* If we own the next entry, it's a new packet. Send it up. */ - while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { - int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; - - if (status != 0x03) { /* There was an error. */ - /* - * There is a tricky error noted by John Murphy, - * to Russ Nelson: Even with full-sized - * buffers it's possible for a jabber packet to use two - * buffers, with only the last correctly noting the error. - */ - if (status & 0x01) /* Only count a general error at the */ - lp->stats.rx_errors++; /* end of a packet. */ - if (status & 0x20) - lp->stats.rx_frame_errors++; - if (status & 0x10) - lp->stats.rx_over_errors++; - if (status & 0x08) - lp->stats.rx_crc_errors++; - if (status & 0x04) - lp->stats.rx_fifo_errors++; - lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); - } else { - /* Malloc up new buffer, compatible with net-2e. */ - short pkt_len = - (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff) - - 4; - struct sk_buff *skb; - - /* Discard oversize frames. */ - if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { - if (netif_msg_drv(lp)) - printk(KERN_ERR - "%s: Impossible packet size %d!\n", - dev->name, pkt_len); - lp->stats.rx_errors++; - } else if (pkt_len < 60) { - if (netif_msg_rx_err(lp)) - printk(KERN_ERR "%s: Runt packet!\n", - dev->name); - lp->stats.rx_errors++; - } else { - int rx_in_place = 0; - - if (pkt_len > rx_copybreak) { - struct sk_buff *newskb; - - if ((newskb = - dev_alloc_skb(PKT_BUF_SZ))) { - skb_reserve(newskb, 2); - skb = lp->rx_skbuff[entry]; - pci_unmap_single(lp->pci_dev, - lp-> - rx_dma_addr - [entry], - PKT_BUF_SZ - 2, - PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len); - lp->rx_skbuff[entry] = newskb; - newskb->dev = dev; - lp->rx_dma_addr[entry] = - pci_map_single(lp->pci_dev, - newskb->data, - PKT_BUF_SZ - - 2, - PCI_DMA_FROMDEVICE); - lp->rx_ring[entry].base = - le32_to_cpu(lp-> - rx_dma_addr - [entry]); - rx_in_place = 1; - } else - skb = NULL; - } else { - skb = dev_alloc_skb(pkt_len + 2); - } - - if (skb == NULL) { - int i; - if (netif_msg_drv(lp)) - printk(KERN_ERR - "%s: Memory squeeze, deferring packet.\n", - dev->name); - for (i = 0; i < lp->rx_ring_size; i++) - if ((short) - le16_to_cpu(lp-> - rx_ring[(entry + - i) - & lp-> - rx_mod_mask]. - status) < 0) - break; - - if (i > lp->rx_ring_size - 2) { - lp->stats.rx_dropped++; - lp->rx_ring[entry].status |= - le16_to_cpu(0x8000); - wmb(); /* Make sure adapter sees owner change */ - lp->cur_rx++; - } - break; - } - skb->dev = dev; - if (!rx_in_place) { - skb_reserve(skb, 2); /* 16 byte align */ - skb_put(skb, pkt_len); /* Make room */ - pci_dma_sync_single_for_cpu(lp->pci_dev, - lp-> - rx_dma_addr - [entry], - PKT_BUF_SZ - - 2, - PCI_DMA_FROMDEVICE); - eth_copy_and_sum(skb, - (unsigned char *)(lp-> - rx_skbuff - [entry]-> - data), - pkt_len, 0); - pci_dma_sync_single_for_device(lp-> - pci_dev, - lp-> - rx_dma_addr - [entry], - PKT_BUF_SZ - - 2, - PCI_DMA_FROMDEVICE); - } - lp->stats.rx_bytes += skb->len; - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->last_rx = jiffies; - lp->stats.rx_packets++; - } - } - /* - * The docs say that the buffer length isn't touched, but Andrew Boyd - * of QNX reports that some revs of the 79C965 clear it. - */ - lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); - wmb(); /* Make sure owner changes after all others are visible */ - lp->rx_ring[entry].status |= le16_to_cpu(0x8000); - entry = (++lp->cur_rx) & lp->rx_mod_mask; - if (--boguscnt <= 0) - break; /* don't stay in loop forever */ - } - - return 0; -} - static int pcnet32_close(struct net_device *dev) { unsigned long ioaddr = dev->base_addr; - struct pcnet32_private *lp = dev->priv; - int i; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; del_timer_sync(&lp->watchdog_timer); @@ -2358,10 +2654,10 @@ static int pcnet32_close(struct net_device *dev) if (netif_msg_ifdown(lp)) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", - dev->name, lp->a.read_csr(ioaddr, 0)); + dev->name, lp->a.read_csr(ioaddr, CSR0)); /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ - lp->a.write_csr(ioaddr, 0, 0x0004); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* * Switch back to 16bit mode to avoid problems with dumb @@ -2375,31 +2671,8 @@ static int pcnet32_close(struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); - /* free all allocated skbuffs */ - for (i = 0; i < lp->rx_ring_size; i++) { - lp->rx_ring[i].status = 0; - wmb(); /* Make sure adapter sees owner change */ - if (lp->rx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); - dev_kfree_skb(lp->rx_skbuff[i]); - } - lp->rx_skbuff[i] = NULL; - lp->rx_dma_addr[i] = 0; - } - - for (i = 0; i < lp->tx_ring_size; i++) { - lp->tx_ring[i].status = 0; /* CPU owns buffer */ - wmb(); /* Make sure adapter sees owner change */ - if (lp->tx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], - lp->tx_skbuff[i]->len, - PCI_DMA_TODEVICE); - dev_kfree_skb(lp->tx_skbuff[i]); - } - lp->tx_skbuff[i] = NULL; - lp->tx_dma_addr[i] = 0; - } + pcnet32_purge_rx_ring(dev); + pcnet32_purge_tx_ring(dev); spin_unlock_irqrestore(&lp->lock, flags); @@ -2408,15 +2681,12 @@ static int pcnet32_close(struct net_device *dev) static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; - u16 saved_addr; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); - saved_addr = lp->a.read_rap(ioaddr); lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); - lp->a.write_rap(ioaddr, saved_addr); spin_unlock_irqrestore(&lp->lock, flags); return &lp->stats; @@ -2425,10 +2695,11 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) /* taken from the sunlance driver, which it took from the depca driver */ static void pcnet32_load_multicast(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - volatile struct pcnet32_init_block *ib = &lp->init_block; + struct pcnet32_private *lp = netdev_priv(dev); + volatile struct pcnet32_init_block *ib = lp->init_block; volatile u16 *mcast_table = (u16 *) & ib->filter; struct dev_mc_list *dmi = dev->mc_list; + unsigned long ioaddr = dev->base_addr; char *addrs; int i; u32 crc; @@ -2437,6 +2708,10 @@ static void pcnet32_load_multicast(struct net_device *dev) if (dev->flags & IFF_ALLMULTI) { ib->filter[0] = 0xffffffff; ib->filter[1] = 0xffffffff; + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); return; } /* clear the multicast filter */ @@ -2458,6 +2733,9 @@ static void pcnet32_load_multicast(struct net_device *dev) le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | (1 << (crc & 0xf))); } + for (i = 0; i < 4; i++) + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, + le16_to_cpu(mcast_table[i])); return; } @@ -2467,26 +2745,38 @@ static void pcnet32_load_multicast(struct net_device *dev) static void pcnet32_set_multicast_list(struct net_device *dev) { unsigned long ioaddr = dev->base_addr, flags; - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); + int csr15, suspended; spin_lock_irqsave(&lp->lock, flags); + suspended = pcnet32_suspend(dev, &flags, 0); + csr15 = lp->a.read_csr(ioaddr, CSR15); if (dev->flags & IFF_PROMISC) { /* Log any net taps. */ if (netif_msg_hw(lp)) printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); - lp->init_block.mode = + lp->init_block->mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); + lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000); } else { - lp->init_block.mode = + lp->init_block->mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); + lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff); pcnet32_load_multicast(dev); } - lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ - pcnet32_restart(dev, 0x0042); /* Resume normal operation */ - netif_wake_queue(dev); + if (suspended) { + int csr5; + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = lp->a.read_csr(ioaddr, CSR5); + lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + } else { + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); + pcnet32_restart(dev, CSR0_NORMAL); + netif_wake_queue(dev); + } spin_unlock_irqrestore(&lp->lock, flags); } @@ -2494,7 +2784,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev) /* This routine assumes that the lp->lock is held */ static int mdio_read(struct net_device *dev, int phy_id, int reg_num) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; u16 val_out; @@ -2510,7 +2800,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num) /* This routine assumes that the lp->lock is held */ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; if (!lp->mii) @@ -2522,7 +2812,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); int rc; unsigned long flags; @@ -2540,7 +2830,7 @@ static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int pcnet32_check_otherphy(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); struct mii_if_info mii = lp->mii_if; u16 bmcr; int i; @@ -2587,7 +2877,7 @@ static int pcnet32_check_otherphy(struct net_device *dev) static void pcnet32_check_media(struct net_device *dev, int verbose) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); int curr_link; int prev_link = netif_carrier_ok(dev) ? 1 : 0; u32 bcr9; @@ -2643,7 +2933,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) static void pcnet32_watchdog(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; /* Print the link status if it has changed */ @@ -2659,12 +2949,13 @@ static void __devexit pcnet32_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); if (dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = netdev_priv(dev); unregister_netdev(dev); pcnet32_free_ring(dev); release_region(dev->base_addr, PCNET32_TOTAL_SIZE); - pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); free_netdev(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); @@ -2721,7 +3012,7 @@ static int __init pcnet32_init_module(void) tx_start = tx_start_pt; /* find the PCI devices */ - if (!pci_module_init(&pcnet32_driver)) + if (!pci_register_driver(&pcnet32_driver)) pcnet32_have_pci = 1; /* should we find any remaining VLbus devices ? */ @@ -2739,12 +3030,13 @@ static void __exit pcnet32_cleanup_module(void) struct net_device *next_dev; while (pcnet32_dev) { - struct pcnet32_private *lp = pcnet32_dev->priv; + struct pcnet32_private *lp = netdev_priv(pcnet32_dev); next_dev = lp->next; unregister_netdev(pcnet32_dev); pcnet32_free_ring(pcnet32_dev); release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); - pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); free_netdev(pcnet32_dev); pcnet32_dev = next_dev; }