X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Flib82596.c;h=443c39a3732f30ea494175793b80566423099bd5;hb=d8f492b7d9a4c1cfdac69ba18a81acbd86d1dc6e;hp=afa4638052a2dbb4786445466f4c434419d29d23;hpb=09f75cd7bf13720738e6a196cc0107ce9a5bd5a0;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index afa4638..443c39a 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c @@ -47,7 +47,7 @@ TBD: * look at deferring rx frames rather than discarding (as per tulip) * handle tx ring full as per tulip - * performace test to tune rx_copybreak + * performance test to tune rx_copybreak Most of my modifications relate to the braindead big-endian implementation by Intel. When the i596 is operating in @@ -176,8 +176,8 @@ struct i596_reg { struct i596_tbd { unsigned short size; unsigned short pad; - dma_addr_t next; - dma_addr_t data; + u32 next; + u32 data; u32 cache_pad[5]; /* Total 32 bytes... */ }; @@ -195,12 +195,12 @@ struct i596_cmd { struct i596_cmd *v_next; /* Address from CPUs viewpoint */ unsigned short status; unsigned short command; - dma_addr_t b_next; /* Address from i596 viewpoint */ + u32 b_next; /* Address from i596 viewpoint */ }; struct tx_cmd { struct i596_cmd cmd; - dma_addr_t tbd; + u32 tbd; unsigned short size; unsigned short pad; struct sk_buff *skb; /* So we can free it after tx */ @@ -237,8 +237,8 @@ struct cf_cmd { struct i596_rfd { unsigned short stat; unsigned short cmd; - dma_addr_t b_next; /* Address from i596 viewpoint */ - dma_addr_t rbd; + u32 b_next; /* Address from i596 viewpoint */ + u32 rbd; unsigned short count; unsigned short size; struct i596_rfd *v_next; /* Address from CPUs viewpoint */ @@ -249,18 +249,18 @@ struct i596_rfd { }; struct i596_rbd { - /* hardware data */ - unsigned short count; - unsigned short zero1; - dma_addr_t b_next; - dma_addr_t b_data; /* Address from i596 viewpoint */ - unsigned short size; - unsigned short zero2; - /* driver data */ - struct sk_buff *skb; - struct i596_rbd *v_next; - dma_addr_t b_addr; /* This rbd addr from i596 view */ - unsigned char *v_data; /* Address from CPUs viewpoint */ + /* hardware data */ + unsigned short count; + unsigned short zero1; + u32 b_next; + u32 b_data; /* Address from i596 viewpoint */ + unsigned short size; + unsigned short zero2; + /* driver data */ + struct sk_buff *skb; + struct i596_rbd *v_next; + u32 b_addr; /* This rbd addr from i596 view */ + unsigned char *v_data; /* Address from CPUs viewpoint */ /* Total 32 bytes... */ #ifdef __LP64__ u32 cache_pad[4]; @@ -275,8 +275,8 @@ struct i596_rbd { struct i596_scb { unsigned short status; unsigned short command; - dma_addr_t cmd; - dma_addr_t rfd; + u32 cmd; + u32 rfd; u32 crc_err; u32 align_err; u32 resource_err; @@ -288,14 +288,14 @@ struct i596_scb { }; struct i596_iscp { - u32 stat; - dma_addr_t scb; + u32 stat; + u32 scb; }; struct i596_scp { - u32 sysbus; - u32 pad; - dma_addr_t iscp; + u32 sysbus; + u32 pad; + u32 iscp; }; struct i596_dma { @@ -470,11 +470,11 @@ static inline int init_rx_bufs(struct net_device *dev) for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { dma_addr_t dma_addr; - struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); + struct sk_buff *skb; + skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); if (skb == NULL) return -1; - skb_reserve(skb, 2); dma_addr = dma_map_single(dev->dev.parent, skb->data, PKT_BUF_SZ, DMA_FROM_DEVICE); rbd->v_next = rbd+1; @@ -588,7 +588,7 @@ static int init_i596_mem(struct net_device *dev) "%s: i82596 initialization successful\n", dev->name)); - if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) { + if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); goto failed; } @@ -697,12 +697,12 @@ static inline int i596_rx(struct net_device *dev) (dma_addr_t)SWAP32(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); /* Get fresh skbuff to replace filled one. */ - newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); + newskb = netdev_alloc_skb_ip_align(dev, + PKT_BUF_SZ); if (newskb == NULL) { skb = NULL; /* drop pkt */ goto memory_squeeze; } - skb_reserve(newskb, 2); /* Pass up the skb already on the Rx ring. */ skb_put(skb, pkt_len); @@ -716,7 +716,7 @@ static inline int i596_rx(struct net_device *dev) rbd->b_data = SWAP32(dma_addr); DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); } else - skb = netdev_alloc_skb(dev, pkt_len + 2); + skb = netdev_alloc_skb_ip_align(dev, pkt_len); memory_squeeze: if (skb == NULL) { /* XXX tulip.c can defer packets here!! */ @@ -730,7 +730,6 @@ memory_squeeze: dma_sync_single_for_cpu(dev->dev.parent, (dma_addr_t)SWAP32(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); - skb_reserve(skb, 2); memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); dma_sync_single_for_device(dev->dev.parent, (dma_addr_t)SWAP32(rbd->b_data), @@ -739,7 +738,6 @@ memory_squeeze: skb->len = pkt_len; skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } @@ -984,7 +982,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) if (length < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) - return 0; + return NETDEV_TX_OK; length = ETH_ZLEN; } @@ -1029,21 +1027,27 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_start_queue(dev); - return 0; + return NETDEV_TX_OK; } static void print_eth(unsigned char *add, char *str) { - int i; - - printk(KERN_DEBUG "i596 0x%p, ", add); - for (i = 0; i < 6; i++) - printk(" %02X", add[i + 6]); - printk(" -->"); - for (i = 0; i < 6; i++) - printk(" %02X", add[i]); - printk(" %02X%02X, %s\n", add[12], add[13], str); + printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", + add, add + 6, add, add[12], add[13], str); } +static const struct net_device_ops i596_netdev_ops = { + .ndo_open = i596_open, + .ndo_stop = i596_close, + .ndo_start_xmit = i596_start_xmit, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = i596_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = i596_poll_controller, +#endif +}; static int __devinit i82596_probe(struct net_device *dev) { @@ -1070,16 +1074,8 @@ static int __devinit i82596_probe(struct net_device *dev) return -ENOMEM; } - /* The 82596-specific entries in the device structure. */ - dev->open = i596_open; - dev->stop = i596_close; - dev->hard_start_xmit = i596_start_xmit; - dev->set_multicast_list = set_multicast_list; - dev->tx_timeout = i596_tx_timeout; + dev->netdev_ops = &i596_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = i596_poll_controller; -#endif memset(dma, 0, sizeof(struct i596_dma)); lp->dma = dma; @@ -1098,11 +1094,9 @@ static int __devinit i82596_probe(struct net_device *dev) return i; }; - DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", - dev->name, dev->base_addr)); - for (i = 0; i < 6; i++) - DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i])); - DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq)); + DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n", + dev->name, dev->base_addr, dev->dev_addr, + dev->irq)); DEB(DEB_INIT, printk(KERN_INFO "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", dev->name, dma, (int)sizeof(struct i596_dma), @@ -1127,12 +1121,6 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) struct i596_dma *dma; unsigned short status, ack_cmd = 0; - if (dev == NULL) { - printk(KERN_WARNING "%s: irq %d for unknown device.\n", - __FUNCTION__, irq); - return IRQ_NONE; - } - lp = netdev_priv(dev); dma = lp->dma; @@ -1143,7 +1131,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) DEB(DEB_INTS, printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n", - dev->name, irq, status)); + dev->name, dev->irq, status)); ack_cmd = status & 0xf000; @@ -1392,31 +1380,32 @@ static void set_multicast_list(struct net_device *dev) } } - cnt = dev->mc_count; + cnt = netdev_mc_count(dev); if (cnt > MAX_MC_CNT) { cnt = MAX_MC_CNT; printk(KERN_NOTICE "%s: Only %d multicast addresses supported", dev->name, cnt); } - if (dev->mc_count > 0) { + if (!netdev_mc_empty(dev)) { struct dev_mc_list *dmi; unsigned char *cp; struct mc_cmd *cmd; cmd = &dma->mc_cmd; cmd->cmd.command = SWAP16(CmdMulticastList); - cmd->mc_cnt = SWAP16(dev->mc_count * 6); + cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); cp = cmd->mc_addrs; - for (dmi = dev->mc_list; - cnt && dmi != NULL; - dmi = dmi->next, cnt--, cp += 6) { + netdev_for_each_mc_addr(dmi, dev) { + if (!cnt--) + break; memcpy(cp, dmi->dmi_addr, 6); if (i596_debug > 1) DEB(DEB_MULTI, printk(KERN_DEBUG - "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, cp[0], cp[1], cp[2], cp[3], cp[4], cp[5])); + "%s: Adding address %pM\n", + dev->name, cp)); + cp += 6; } DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); i596_add_cmd(dev, &cmd->cmd);