struct sk_buff *skb = dev->rx_info.skbs[i];
dev->rx_info.skbs[i] = NULL;
clear_rx_desc(dev, i);
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
}
}
if (unlikely(dev->CFG_cache & CFG_LNKSTS)) {
netif_stop_queue(ndev);
if (unlikely(dev->CFG_cache & CFG_LNKSTS))
- return 1;
+ return NETDEV_TX_BUSY;
netif_start_queue(ndev);
}
netif_start_queue(ndev);
goto again;
}
- return 1;
+ return NETDEV_TX_BUSY;
}
if (free_idx == dev->tx_intr_idx) {
if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
netif_start_queue(ndev);
- /* set the transmit start time to catch transmit timeouts */
- ndev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_OK;
}
static void ns83820_update_stats(struct ns83820 *dev)
);
#endif
- if (time_after(jiffies, ndev->trans_start + 1*HZ) &&
+ if (time_after(jiffies, dev_trans_start(ndev) + 1*HZ) &&
dev->tx_done_idx != dev->tx_free_idx) {
printk(KERN_DEBUG "%s: ns83820_tx_watch: %u %u %d\n",
ndev->name,
/* See if we can set the dma mask early on; failure is fatal. */
if (sizeof(dma_addr_t) == 8 &&
- !pci_set_dma_mask(pci_dev, DMA_64BIT_MASK)) {
+ !pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
using_dac = 1;
- } else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
+ } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
using_dac = 0;
} else {
dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n");