+ if (netif_running(dev))
+ printk(KERN_INFO "%s: link down.\n", dev->name);
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ } else {
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (pause->rx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (pause->tx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+
+ if (!netif_running(dev))
+ nv_update_linkspeed(dev);
+ else
+ nv_update_pause(dev, np->pause_flags);
+ }
+
+ if (netif_running(dev)) {
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ nv_enable_irq(dev);
+ }
+ return 0;
+}
+
+static u32 nv_get_rx_csum(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ return (np->rx_csum) != 0;
+}
+
+static int nv_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int retcode = 0;
+
+ if (np->driver_data & DEV_HAS_CHECKSUM) {
+ if (data) {
+ np->rx_csum = 1;
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+ } else {
+ np->rx_csum = 0;
+ /* vlan is dependent on rx checksum offload */
+ if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
+ }
+ if (netif_running(dev)) {
+ spin_lock_irq(&np->lock);
+ writel(np->txrxctl_bits, base + NvRegTxRxControl);
+ spin_unlock_irq(&np->lock);
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return retcode;
+}
+
+static int nv_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (np->driver_data & DEV_HAS_CHECKSUM)
+ return ethtool_op_set_tx_hw_csum(dev, data);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int nv_set_sg(struct net_device *dev, u32 data)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (np->driver_data & DEV_HAS_CHECKSUM)
+ return ethtool_op_set_sg(dev, data);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int nv_get_sset_count(struct net_device *dev, int sset)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_TEST:
+ if (np->driver_data & DEV_HAS_TEST_EXTENDED)
+ return NV_TEST_COUNT_EXTENDED;
+ else
+ return NV_TEST_COUNT_BASE;
+ case ETH_SS_STATS:
+ if (np->driver_data & DEV_HAS_STATISTICS_V1)
+ return NV_DEV_STATISTICS_V1_COUNT;
+ else if (np->driver_data & DEV_HAS_STATISTICS_V2)
+ return NV_DEV_STATISTICS_V2_COUNT;
+ else
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ /* update stats */
+ nv_do_stats_poll((unsigned long)dev);
+
+ memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+}
+
+static int nv_link_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int mii_status;
+
+ mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+
+ /* check phy link status */
+ if (!(mii_status & BMSR_LSTATUS))
+ return 0;
+ else
+ return 1;
+}
+
+static int nv_register_test(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ int i = 0;
+ u32 orig_read, new_read;
+
+ do {
+ orig_read = readl(base + nv_registers_test[i].reg);
+
+ /* xor with mask to toggle bits */
+ orig_read ^= nv_registers_test[i].mask;
+
+ writel(orig_read, base + nv_registers_test[i].reg);
+
+ new_read = readl(base + nv_registers_test[i].reg);
+
+ if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
+ return 0;
+
+ /* restore original value */
+ orig_read ^= nv_registers_test[i].mask;
+ writel(orig_read, base + nv_registers_test[i].reg);
+
+ } while (nv_registers_test[++i].reg != 0);
+
+ return 1;
+}
+
+static int nv_interrupt_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int ret = 1;
+ int testcnt;
+ u32 save_msi_flags, save_poll_interval = 0;
+
+ if (netif_running(dev)) {
+ /* free current irq */
+ nv_free_irq(dev);
+ save_poll_interval = readl(base+NvRegPollingInterval);
+ }
+
+ /* flag to test interrupt handler */
+ np->intr_test = 0;
+
+ /* setup test irq */
+ save_msi_flags = np->msi_flags;
+ np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
+ np->msi_flags |= 0x001; /* setup 1 vector */
+ if (nv_request_irq(dev, 1))
+ return 0;
+
+ /* setup timer interrupt */
+ writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
+ writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+
+ nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+
+ /* wait for at least one interrupt */
+ msleep(100);
+
+ spin_lock_irq(&np->lock);
+
+ /* flag should be set within ISR */
+ testcnt = np->intr_test;
+ if (!testcnt)
+ ret = 2;
+
+ nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ else
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+
+ spin_unlock_irq(&np->lock);
+
+ nv_free_irq(dev);
+
+ np->msi_flags = save_msi_flags;
+
+ if (netif_running(dev)) {
+ writel(save_poll_interval, base + NvRegPollingInterval);
+ writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+ /* restore original irq */
+ if (nv_request_irq(dev, 0))
+ return 0;
+ }
+
+ return ret;
+}
+
+static int nv_loopback_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ struct sk_buff *tx_skb, *rx_skb;
+ dma_addr_t test_dma_addr;
+ u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
+ u32 flags;
+ int len, i, pkt_len;
+ u8 *pkt_data;
+ u32 filter_flags = 0;
+ u32 misc1_flags = 0;
+ int ret = 1;
+
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ filter_flags = readl(base + NvRegPacketFilterFlags);
+ misc1_flags = readl(base + NvRegMisc1);
+ } else {
+ nv_txrx_reset(dev);
+ }
+
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ nv_init_ring(dev);
+
+ /* setup hardware for loopback */
+ writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
+ writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
+
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+
+ /* restart rx engine */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+
+ /* setup packet for tx */
+ pkt_len = ETH_DATA_LEN;
+ tx_skb = dev_alloc_skb(pkt_len);
+ if (!tx_skb) {
+ printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
+ " of %s\n", dev->name);
+ ret = 0;
+ goto out;
+ }
+ test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+ skb_tailroom(tx_skb),
+ PCI_DMA_FROMDEVICE);
+ pkt_data = skb_put(tx_skb, pkt_len);
+ for (i = 0; i < pkt_len; i++)
+ pkt_data[i] = (u8)(i & 0xff);
+
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
+ np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ } else {
+ np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
+ np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
+ np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ }
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(get_hwbase(dev));
+
+ msleep(500);
+
+ /* check for rx of the packet */
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
+ len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
+
+ } else {
+ flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
+ len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
+ }
+
+ if (flags & NV_RX_AVAIL) {
+ ret = 0;
+ } else if (np->desc_ver == DESC_VER_1) {
+ if (flags & NV_RX_ERROR)
+ ret = 0;
+ } else {
+ if (flags & NV_RX2_ERROR) {
+ ret = 0;
+ }
+ }
+
+ if (ret) {
+ if (len != pkt_len) {
+ ret = 0;
+ dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
+ dev->name, len, pkt_len);
+ } else {
+ rx_skb = np->rx_skb[0].skb;
+ for (i = 0; i < pkt_len; i++) {
+ if (rx_skb->data[i] != (u8)(i & 0xff)) {
+ ret = 0;
+ dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
+ dev->name, i);
+ break;
+ }
+ }
+ }
+ } else {
+ dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
+ }
+
+ pci_unmap_page(np->pci_dev, test_dma_addr,
+ (skb_end_pointer(tx_skb) - tx_skb->data),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(tx_skb);
+ out:
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+
+ if (netif_running(dev)) {
+ writel(misc1_flags, base + NvRegMisc1);
+ writel(filter_flags, base + NvRegPacketFilterFlags);
+ nv_enable_irq(dev);
+ }
+
+ return ret;
+}
+
+static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int result;
+ memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
+
+ if (!nv_link_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[0] = 1;
+ }
+
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+#ifdef CONFIG_FORCEDETH_NAPI
+ napi_disable(&np->napi);
+#endif
+ netif_tx_lock_bh(dev);
+ spin_lock_irq(&np->lock);
+ nv_disable_hw_interrupts(dev, np->irqmask);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ } else {
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+ }
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+ spin_unlock_irq(&np->lock);
+ netif_tx_unlock_bh(dev);
+ }
+
+ if (!nv_register_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[1] = 1;
+ }
+
+ result = nv_interrupt_test(dev);
+ if (result != 1) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[2] = 1;
+ }
+ if (result == 0) {
+ /* bail out */
+ return;
+ }