return -1;
*skbref = skb;
- skb->dev = cp->dev;
skb_reserve(skb, swivel);
p = skb->data;
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(dev);
+ netif_rx_schedule(dev, &cp->napi);
#else
cas_rx_ringN(cp, ring, 0);
#endif
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(dev);
+ netif_rx_schedule(dev, &cp->napi);
#else
cas_rx_ringN(cp, 1, 0);
#endif
if (status & INTR_RX_DONE) {
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(dev);
+ netif_rx_schedule(dev, &cp->napi);
#else
cas_rx_ringN(cp, 0, 0);
#endif
#ifdef USE_NAPI
-static int cas_poll(struct net_device *dev, int *budget)
+static int cas_poll(struct napi_struct *napi, int budget)
{
- struct cas *cp = netdev_priv(dev);
+ struct cas *cp = container_of(napi, struct cas, napi);
+ struct net_device *dev = cp->dev;
int i, enable_intr, todo, credits;
u32 status = readl(cp->regs + REG_INTR_STATUS);
unsigned long flags;
/* NAPI rx packets. we spread the credits across all of the
* rxc rings
- */
- todo = min(*budget, dev->quota);
-
- /* to make sure we're fair with the work we loop through each
+ *
+ * to make sure we're fair with the work we loop through each
* ring N_RX_COMP_RING times with a request of
- * todo / N_RX_COMP_RINGS
+ * budget / N_RX_COMP_RINGS
*/
enable_intr = 1;
credits = 0;
for (i = 0; i < N_RX_COMP_RINGS; i++) {
int j;
for (j = 0; j < N_RX_COMP_RINGS; j++) {
- credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS);
- if (credits >= todo) {
+ credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
+ if (credits >= budget) {
enable_intr = 0;
goto rx_comp;
}
}
rx_comp:
- *budget -= credits;
- dev->quota -= credits;
-
/* final rx completion */
spin_lock_irqsave(&cp->lock, flags);
if (status)
#endif
spin_unlock_irqrestore(&cp->lock, flags);
if (enable_intr) {
- netif_rx_complete(dev);
+ netif_rx_complete(dev, napi);
cas_unmask_intr(cp);
- return 0;
}
- return 1;
+ return credits;
}
#endif
ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u64 csum_start_off, csum_stuff_off;
-
- csum_start_off = (u64) (skb->h.raw - skb->data);
- csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+ const u64 csum_start_off = skb_transport_offset(skb);
+ const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = TX_DESC_CSUM_EN |
CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
ctrl | TX_DESC_SOF, 0);
entry = TX_DESC_NEXT(ring, entry);
- memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
- len - tabort, tabort);
+ skb_copy_from_linear_data_offset(skb, len - tabort,
+ tx_tiny_buf(cp, ring, entry), tabort);
mapping = tx_tiny_map(cp, ring, entry, tentry);
cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
(nr_frags == 0));
static void cas_check_pci_invariants(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
- u8 rev;
cp->cas_flags = 0;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
(pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
- if (rev >= CAS_ID_REVPLUS)
+ if (pdev->revision >= CAS_ID_REVPLUS)
cp->cas_flags |= CAS_FLAG_REG_PLUS;
- if (rev < CAS_ID_REVPLUS02u)
+ if (pdev->revision < CAS_ID_REVPLUS02u)
cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
/* Original Cassini supports HW CSUM, but it's not
* enabled by default as it can trigger TX hangs.
*/
- if (rev < CAS_ID_REV2)
+ if (pdev->revision < CAS_ID_REV2)
cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
} else {
/* Only sun has original cassini chips. */
goto err_spare;
}
+#ifdef USE_NAPI
+ napi_enable(&cp->napi);
+#endif
/* init hw */
cas_lock_all_save(cp, flags);
cas_clean_rings(cp);
unsigned long flags;
struct cas *cp = netdev_priv(dev);
+#ifdef USE_NAPI
+ napi_enable(&cp->napi);
+#endif
/* Make sure we don't get distracted by suspend/resume */
mutex_lock(&cp->pm_mutex);
cas_read_regs(cp, p, regs->len / sizeof(u32));
}
-static int cas_get_stats_count(struct net_device *dev)
+static int cas_get_sset_count(struct net_device *dev, int sset)
{
- return CAS_NUM_STAT_KEYS;
+ switch (sset) {
+ case ETH_SS_STATS:
+ return CAS_NUM_STAT_KEYS;
+ default:
+ return -EOPNOTSUPP;
+ }
}
static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
.set_msglevel = cas_set_msglevel,
.get_regs_len = cas_get_regs_len,
.get_regs = cas_get_regs,
- .get_stats_count = cas_get_stats_count,
+ .get_sset_count = cas_get_sset_count,
.get_strings = cas_get_strings,
.get_ethtool_stats = cas_get_ethtool_stats,
};
int i, err, pci_using_dac;
u16 pci_cmd;
u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
+ DECLARE_MAC_BUF(mac);
if (cas_version_printed++ == 0)
printk(KERN_INFO "%s", version);
err = -ENOMEM;
goto err_out_disable_pdev;
}
- SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = pci_request_regions(pdev, dev->name);
pci_cmd &= ~PCI_COMMAND_SERR;
pci_cmd |= PCI_COMMAND_PARITY;
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- pci_set_mwi(pdev);
+ if (pci_try_set_mwi(pdev))
+ printk(KERN_WARNING PFX "Could not enable MWI for %s\n",
+ pci_name(pdev));
+
/*
* On some architectures, the default cache line size set
- * by pci_set_mwi reduces perforamnce. We have to increase
+ * by pci_try_set_mwi reduces perforamnce. We have to increase
* it for this case. To start, we'll print some configuration
* data.
*/
dev->watchdog_timeo = CAS_TX_TIMEOUT;
dev->change_mtu = cas_change_mtu;
#ifdef USE_NAPI
- dev->poll = cas_poll;
- dev->weight = 64;
+ netif_napi_add(dev, &cp->napi, cas_poll, 64);
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = cas_netpoll;
i = readl(cp->regs + REG_BIM_CFG);
printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
- "Ethernet[%d] ", dev->name,
+ "Ethernet[%d] %s\n", dev->name,
(cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
(i & BIM_CFG_32BIT) ? "32" : "64",
(i & BIM_CFG_66MHZ) ? "66" : "33",
- (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq);
-
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i],
- i == 5 ? ' ' : ':');
- printk("\n");
+ (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
+ print_mac(mac, dev->dev_addr));
pci_set_drvdata(pdev, dev);
cp->hw_running = 1;