X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Fvia-rhine.c;h=4535e89dfff117fabd8effbc4121a2629423393f;hb=51f2be24328957f9e2acf116b1b1d2dfd10bf41f;hp=88c30a58b4bda50d0b5fb610916365076cbdf1c5;hpb=635ecaa70e862f85f652581305fe0074810893be;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 88c30a5..4535e89 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -408,7 +408,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int rhine_open(struct net_device *dev); static void rhine_tx_timeout(struct net_device *dev); -static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t rhine_start_tx(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t rhine_interrupt(int irq, void *dev_instance); static void rhine_tx(struct net_device *dev); static int rhine_rx(struct net_device *dev, int limit); @@ -1213,11 +1214,13 @@ static void rhine_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t rhine_start_tx(struct sk_buff *skb, + struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; unsigned entry; + unsigned long flags; /* Caution: the write order is important here, set the field with the "ownership" bits last. */ @@ -1226,7 +1229,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) entry = rp->cur_tx % TX_RING_SIZE; if (skb_padto(skb, ETH_ZLEN)) - return 0; + return NETDEV_TX_OK; rp->tx_skbuff[entry] = skb; @@ -1238,7 +1241,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb(skb); rp->tx_skbuff[entry] = NULL; dev->stats.tx_dropped++; - return 0; + return NETDEV_TX_OK; } /* Padding is not copied and so must be redone. */ @@ -1261,7 +1264,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); /* lock eth irq */ - spin_lock_irq(&rp->lock); + spin_lock_irqsave(&rp->lock, flags); wmb(); rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); wmb(); @@ -1280,13 +1283,13 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; - spin_unlock_irq(&rp->lock); + spin_unlock_irqrestore(&rp->lock, flags); if (debug > 4) { printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", dev->name, rp->cur_tx-1, entry); } - return 0; + return NETDEV_TX_OK; } /* The interrupt handler does all of the Rx thread work and cleans up @@ -1481,15 +1484,15 @@ static int rhine_rx(struct net_device *dev, int limit) } } } else { - struct sk_buff *skb; + struct sk_buff *skb = NULL; /* Length should omit the CRC */ int pkt_len = data_size - 4; /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ - if (pkt_len < rx_copybreak && - (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) { - skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */ + if (pkt_len < rx_copybreak) + skb = netdev_alloc_skb_ip_align(dev, pkt_len); + if (skb) { pci_dma_sync_single_for_cpu(rp->pdev, rp->rx_skbuff_dma[entry], rp->rx_buf_sz,