#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <asm/irq.h>
#include <asm/io.h>
struct nv_skb_map {
struct sk_buff *skb;
dma_addr_t dma;
- unsigned int dma_len;
+ unsigned int dma_len:31;
+ unsigned int dma_single:1;
struct ring_desc_ex *first_tx_desc;
struct nv_skb_map *next_tx_ctx;
};
np->tx_skb[i].skb = NULL;
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
+ np->tx_skb[i].dma_single = 0;
np->tx_skb[i].first_tx_desc = NULL;
np->tx_skb[i].next_tx_ctx = NULL;
}
return nv_alloc_rx_optimized(dev);
}
-static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
+static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
{
- struct fe_priv *np = netdev_priv(dev);
-
if (tx_skb->dma) {
- pci_unmap_page(np->pci_dev, tx_skb->dma,
- tx_skb->dma_len,
- PCI_DMA_TODEVICE);
+ if (tx_skb->dma_single)
+ pci_unmap_single(np->pci_dev, tx_skb->dma,
+ tx_skb->dma_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(np->pci_dev, tx_skb->dma,
+ tx_skb->dma_len,
+ PCI_DMA_TODEVICE);
tx_skb->dma = 0;
}
+}
+
+static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
+{
+ nv_unmap_txskb(np, tx_skb);
if (tx_skb->skb) {
dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
return 1;
- } else {
- return 0;
}
+ return 0;
}
static void nv_drain_tx(struct net_device *dev)
np->tx_ring.ex[i].bufhigh = 0;
np->tx_ring.ex[i].buflow = 0;
}
- if (nv_release_txskb(dev, &np->tx_skb[i]))
+ if (nv_release_txskb(np, &np->tx_skb[i]))
dev->stats.tx_dropped++;
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
+ np->tx_skb[i].dma_single = 0;
np->tx_skb[i].first_tx_desc = NULL;
np->tx_skb[i].next_tx_ctx = NULL;
}
* nv_start_xmit: dev->hard_start_xmit function
* Called with netif_tx_lock held.
*/
-static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u32 tx_flags = 0;
np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 1;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
PCI_DMA_TODEVICE);
np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 0;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
return NETDEV_TX_OK;
}
-static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
+ struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u32 tx_flags = 0;
np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 1;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
PCI_DMA_TODEVICE);
np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 0;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
dev->name, flags);
- pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
- np->get_tx_ctx->dma_len,
- PCI_DMA_TODEVICE);
- np->get_tx_ctx->dma = 0;
+ nv_unmap_txskb(np, np->get_tx_ctx);
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
dev->name, flags);
- pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
- np->get_tx_ctx->dma_len,
- PCI_DMA_TODEVICE);
- np->get_tx_ctx->dma = 0;
+ nv_unmap_txskb(np, np->get_tx_ctx);
if (flags & NV_TX2_LASTPACKET) {
if (!(flags & NV_TX2_ERROR))
} else {
pff |= NVREG_PFF_MYADDR;
- if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
+ if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
u32 alwaysOff[2];
u32 alwaysOn[2];
} else {
struct dev_mc_list *walk;
- walk = dev->mc_list;
- while (walk != NULL) {
+ netdev_for_each_mc_addr(walk, dev) {
u32 a, b;
a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
alwaysOff[0] &= ~a;
alwaysOn[1] &= b;
alwaysOff[1] &= ~b;
- walk = walk->next;
}
}
addr[0] = alwaysOn[0];
nv_msi_workaround(np);
#ifdef CONFIG_FORCEDETH_NAPI
- napi_schedule(&np->napi);
-
- /* Disable furthur irq's
- (msix not enabled with napi) */
- writel(0, base + NvRegIrqMask);
+ if (napi_schedule_prep(&np->napi)) {
+ /*
+ * Disable further irq's (msix not enabled with napi)
+ */
+ writel(0, base + NvRegIrqMask);
+ __napi_schedule(&np->napi);
+ }
#else
do
nv_msi_workaround(np);
#ifdef CONFIG_FORCEDETH_NAPI
- napi_schedule(&np->napi);
-
- /* Disable furthur irq's
- (msix not enabled with napi) */
- writel(0, base + NvRegIrqMask);
-
+ if (napi_schedule_prep(&np->napi)) {
+ /*
+ * Disable further irq's (msix not enabled with napi)
+ */
+ writel(0, base + NvRegIrqMask);
+ __napi_schedule(&np->napi);
+ }
#else
do
{
/* Request irq for rx handling */
sprintf(np->name_rx, "%s-rx", dev->name);
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
- &nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
+ nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
/* Request irq for tx handling */
sprintf(np->name_tx, "%s-tx", dev->name);
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
- &nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
+ nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
/* Request irq for link and timer handling */
sprintf(np->name_other, "%s-other", dev->name);
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
- &nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
+ nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
}
- pci_unmap_page(np->pci_dev, test_dma_addr,
+ pci_unmap_single(np->pci_dev, test_dma_addr,
(skb_end_pointer(tx_skb) - tx_skb->data),
PCI_DMA_TODEVICE);
dev_kfree_skb_any(tx_skb);
dev->dev_addr);
dev_printk(KERN_ERR, &pci_dev->dev,
"Please complain to your hardware vendor. Switching to a random MAC.\n");
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x6c;
- get_random_bytes(&dev->dev_addr[3], 3);
+ random_ether_addr(dev->dev_addr);
}
dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
/* Limit the number of tx's outstanding for hw bug */
if (id->driver_data & DEV_NEED_TX_LIMIT) {
np->tx_limit = 1;
- if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
+ if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
pci_dev->revision >= 0xA2)
np->tx_limit = 0;
}
#define nv_resume NULL
#endif /* CONFIG_PM */
-static struct pci_device_id pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
{ /* nForce Ethernet Controller */
PCI_DEVICE(0x10DE, 0x01C3),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,