X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Fskge.c;h=952d37ffee51f8de796a20b650b946678ca5e333;hb=0a64ea57486acd9e17b80bb70b966e81d904b61c;hp=14f06aea9ca04b88ebb47f020262bf5779e0d7d1;hpb=29816d9aa55c99d463bd5507a46535b5fe79c33a;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 14f06ae..952d37f 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -44,7 +44,7 @@ #include "skge.h" #define DRV_NAME "skge" -#define DRV_VERSION "1.12" +#define DRV_VERSION "1.13" #define PFX DRV_NAME " " #define DEFAULT_TX_RING_SIZE 128 @@ -104,6 +104,7 @@ static void yukon_get_stats(struct skge_port *skge, u64 *data); static void yukon_init(struct skge_hw *hw, int port); static void genesis_mac_init(struct skge_hw *hw, int port); static void genesis_link_up(struct skge_port *skge); +static void skge_set_multicast(struct net_device *dev); /* Avoid conditionals by using array */ static const int txqaddr[] = { Q_XA1, Q_XA2 }; @@ -149,24 +150,6 @@ static u32 wol_supported(const struct skge_hw *hw) return WAKE_MAGIC | WAKE_PHY; } -static u32 pci_wake_enabled(struct pci_dev *dev) -{ - int pm = pci_find_capability(dev, PCI_CAP_ID_PM); - u16 value; - - /* If device doesn't support PM Capabilities, but request is to disable - * wake events, it's a nop; otherwise fail */ - if (!pm) - return 0; - - pci_read_config_word(dev, pm + PCI_PM_PMC, &value); - - value &= PCI_PM_CAP_PME_MASK; - value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ - - return value != 0; -} - static void skge_wol_init(struct skge_port *skge) { struct skge_hw *hw = skge->hw; @@ -254,10 +237,14 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; - if (wol->wolopts & ~wol_supported(hw)) + if ((wol->wolopts & ~wol_supported(hw)) + || !device_can_wakeup(&hw->pdev->dev)) return -EOPNOTSUPP; skge->wol = wol->wolopts; + + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + return 0; } @@ -319,6 +306,7 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) struct skge_port *skge = netdev_priv(dev); const struct skge_hw *hw = skge->hw; u32 supported = skge_supported_modes(hw); + int err = 0; if (ecmd->autoneg == AUTONEG_ENABLE) { ecmd->advertising = supported; @@ -367,8 +355,14 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) skge->autoneg = ecmd->autoneg; skge->advertising = ecmd->advertising; - if (netif_running(dev)) - skge_phy_reset(skge); + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } return (0); } @@ -494,7 +488,7 @@ static int skge_set_ring_param(struct net_device *dev, struct ethtool_ringparam *p) { struct skge_port *skge = netdev_priv(dev); - int err; + int err = 0; if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) @@ -510,7 +504,7 @@ static int skge_set_ring_param(struct net_device *dev, dev_close(dev); } - return 0; + return err; } static u32 skge_get_msglevel(struct net_device *netdev) @@ -593,6 +587,7 @@ static int skge_set_pauseparam(struct net_device *dev, { struct skge_port *skge = netdev_priv(dev); struct ethtool_pauseparam old; + int err = 0; skge_get_pauseparam(dev, &old); @@ -609,8 +604,14 @@ static int skge_set_pauseparam(struct net_device *dev, skge->flow_control = FLOW_MODE_NONE; } - if (netif_running(dev)) - skge_phy_reset(skge); + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } return 0; } @@ -1095,16 +1096,9 @@ static void xm_link_down(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; struct skge_port *skge = netdev_priv(dev); - u16 cmd = xm_read16(hw, port, XM_MMU_CMD); xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); - cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); - xm_write16(hw, port, XM_MMU_CMD, cmd); - - /* dummy read to ensure writing */ - xm_read16(hw, port, XM_MMU_CMD); - if (netif_carrier_ok(dev)) skge_link_down(skge); } @@ -1194,6 +1188,7 @@ static void genesis_init(struct skge_hw *hw) static void genesis_reset(struct skge_hw *hw, int port) { const u8 zero[8] = { 0 }; + u32 reg; skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); @@ -1209,6 +1204,11 @@ static void genesis_reset(struct skge_hw *hw, int port) xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); xm_outhash(hw, port, XM_HSM, zero); + + /* Flush TX and RX fifo */ + reg = xm_read32(hw, port, XM_MODE); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); } @@ -1634,15 +1634,14 @@ static void genesis_mac_init(struct skge_hw *hw, int port) } xm_write16(hw, port, XM_RX_CMD, r); - /* We want short frames padded to 60 bytes. */ xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); - /* - * Bump up the transmit threshold. This helps hold off transmit - * underruns when we're blasting traffic from both ports at once. - */ - xm_write16(hw, port, XM_TX_THR, 512); + /* Increase threshold for jumbo frames on dual port */ + if (hw->ports > 1 && jumbo) + xm_write16(hw, port, XM_TX_THR, 1020); + else + xm_write16(hw, port, XM_TX_THR, 512); /* * Enable the reception of all error frames. This is is @@ -1713,7 +1712,13 @@ static void genesis_stop(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; - u32 reg; + unsigned retries = 1000; + u16 cmd; + + /* Disable Tx and Rx */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); + xm_write16(hw, port, XM_MMU_CMD, cmd); genesis_reset(hw, port); @@ -1721,20 +1726,17 @@ static void genesis_stop(struct skge_port *skge) skge_write16(hw, B3_PA_CTRL, port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); - /* - * If the transfer sticks at the MAC the STOP command will not - * terminate if we don't flush the XMAC's transmit FIFO ! - */ - xm_write32(hw, port, XM_MODE, - xm_read32(hw, port, XM_MODE)|XM_MD_FTF); - - /* Reset the MAC */ - skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + do { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) + break; + } while (--retries > 0); /* For external PHYs there must be special handling */ if (hw->phy_type != SK_PHY_XMAC) { - reg = skge_read32(hw, B2_GP_IO); + u32 reg = skge_read32(hw, B2_GP_IO); if (port == 0) { reg |= GP_DIR_0; reg &= ~GP_IO_0; @@ -1801,11 +1803,6 @@ static void genesis_mac_intr(struct skge_hw *hw, int port) xm_write32(hw, port, XM_MODE, XM_MD_FTF); ++dev->stats.tx_fifo_errors; } - - if (status & XM_IS_RXF_OV) { - xm_write32(hw, port, XM_MODE, XM_MD_FRF); - ++dev->stats.rx_fifo_errors; - } } static void genesis_link_up(struct skge_port *skge) @@ -1862,9 +1859,9 @@ static void genesis_link_up(struct skge_port *skge) xm_write32(hw, port, XM_MODE, mode); - /* Turn on detection of Tx underrun, Rx overrun */ + /* Turn on detection of Tx underrun */ msk = xm_read16(hw, port, XM_IMSK); - msk &= ~(XM_IS_RXF_OV | XM_IS_TXF_UR); + msk &= ~XM_IS_TXF_UR; xm_write16(hw, port, XM_IMSK, msk); xm_read16(hw, port, XM_ISRC); @@ -2194,9 +2191,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port) TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); - /* serial mode register */ - reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); - if (hw->dev[port]->mtu > 1500) + /* configure the Serial Mode Register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) + | GM_SMOD_VLAN_ENA + | IPG_DATA_VAL(IPG_DATA_DEF); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) reg |= GM_SMOD_JUMBO_ENA; gma_write16(hw, port, GM_SERIAL_MODE, reg); @@ -2464,7 +2464,7 @@ static void skge_phy_reset(struct skge_port *skge) } spin_unlock_bh(&hw->phy_lock); - dev->set_multicast_list(dev); + skge_set_multicast(dev); } /* Basic MII support */ @@ -2897,11 +2897,7 @@ static void skge_tx_timeout(struct net_device *dev) static int skge_change_mtu(struct net_device *dev, int new_mtu) { - struct skge_port *skge = netdev_priv(dev); - struct skge_hw *hw = skge->hw; - int port = skge->port; int err; - u16 ctl, reg; if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) return -EINVAL; @@ -2911,40 +2907,13 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu) return 0; } - skge_write32(hw, B0_IMSK, 0); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_stop_queue(dev); - napi_disable(&skge->napi); - - ctl = gma_read16(hw, port, GM_GP_CTRL); - gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); - - skge_rx_clean(skge); - skge_rx_stop(hw, port); + skge_down(dev); dev->mtu = new_mtu; - reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); - if (new_mtu > 1500) - reg |= GM_SMOD_JUMBO_ENA; - gma_write16(hw, port, GM_SERIAL_MODE, reg); - - skge_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); - - err = skge_rx_fill(dev); - wmb(); - if (!err) - skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); - skge_write32(hw, B0_IMSK, hw->intr_mask); - + err = skge_up(dev); if (err) dev_close(dev); - else { - gma_write16(hw, port, GM_GP_CTRL, ctl); - - napi_enable(&skge->napi); - netif_wake_queue(dev); - } return err; } @@ -3063,6 +3032,18 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status) (status & GMR_FS_RX_OK) == 0; } +static void skge_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + + if (hw->chip_id == CHIP_ID_GENESIS) + genesis_set_multicast(dev); + else + yukon_set_multicast(dev); + +} + /* Get receive buffer from descriptor. * Handles copy of small buffers and reallocation failures @@ -3218,7 +3199,6 @@ static int skge_poll(struct napi_struct *napi, int to_do) skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); if (likely(skb)) { - dev->last_rx = jiffies; netif_receive_skb(skb); ++work_done; @@ -3231,12 +3211,14 @@ static int skge_poll(struct napi_struct *napi, int to_do) skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); if (work_done < to_do) { - spin_lock_irq(&hw->hw_lock); - __netif_rx_complete(dev, napi); + unsigned long flags; + + spin_lock_irqsave(&hw->hw_lock, flags); + __napi_complete(napi); hw->intr_mask |= napimask[skge->port]; skge_write32(hw, B0_IMSK, hw->intr_mask); skge_read32(hw, B0_IMSK); - spin_unlock_irq(&hw->hw_lock); + spin_unlock_irqrestore(&hw->hw_lock, flags); } return work_done; @@ -3395,7 +3377,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id) if (status & (IS_XA1_F|IS_R1_F)) { struct skge_port *skge = netdev_priv(hw->dev[0]); hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); - netif_rx_schedule(hw->dev[0], &skge->napi); + napi_schedule(&skge->napi); } if (status & IS_PA_TO_TX1) @@ -3415,7 +3397,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id) if (status & (IS_XA2_F|IS_R2_F)) { hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); - netif_rx_schedule(hw->dev[1], &skge->napi); + napi_schedule(&skge->napi); } if (status & IS_PA_TO_RX2) { @@ -3746,7 +3728,7 @@ static int skge_device_event(struct notifier_block *unused, struct skge_port *skge; struct dentry *d; - if (dev->open != &skge_up || !skge_debug) + if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) goto done; skge = netdev_priv(dev); @@ -3820,6 +3802,23 @@ static __exit void skge_debug_cleanup(void) #define skge_debug_cleanup() #endif +static const struct net_device_ops skge_netdev_ops = { + .ndo_open = skge_up, + .ndo_stop = skge_down, + .ndo_start_xmit = skge_xmit_frame, + .ndo_do_ioctl = skge_ioctl, + .ndo_get_stats = skge_get_stats, + .ndo_tx_timeout = skge_tx_timeout, + .ndo_change_mtu = skge_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = skge_set_multicast, + .ndo_set_mac_address = skge_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = skge_netpoll, +#endif +}; + + /* Initialize network device */ static struct net_device *skge_devinit(struct skge_hw *hw, int port, int highmem) @@ -3833,24 +3832,9 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, } SET_NETDEV_DEV(dev, &hw->pdev->dev); - dev->open = skge_up; - dev->stop = skge_down; - dev->do_ioctl = skge_ioctl; - dev->hard_start_xmit = skge_xmit_frame; - dev->get_stats = skge_get_stats; - if (hw->chip_id == CHIP_ID_GENESIS) - dev->set_multicast_list = genesis_set_multicast; - else - dev->set_multicast_list = yukon_set_multicast; - - dev->set_mac_address = skge_set_mac_address; - dev->change_mtu = skge_change_mtu; - SET_ETHTOOL_OPS(dev, &skge_ethtool_ops); - dev->tx_timeout = skge_tx_timeout; + dev->netdev_ops = &skge_netdev_ops; + dev->ethtool_ops = &skge_ethtool_ops; dev->watchdog_timeo = TX_WATCHDOG; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = skge_netpoll; -#endif dev->irq = hw->pdev->irq; if (highmem) @@ -3872,7 +3856,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, skge->speed = -1; skge->advertising = skge_supported_modes(hw); - if (pci_wake_enabled(hw->pdev)) + if (device_may_wakeup(&hw->pdev->dev)) skge->wol = wol_supported(hw) & WAKE_MAGIC; hw->dev[port] = dev; @@ -3901,11 +3885,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, static void __devinit skge_show_addr(struct net_device *dev) { const struct skge_port *skge = netdev_priv(dev); - DECLARE_MAC_BUF(mac); if (netif_msg_probe(skge)) - printk(KERN_INFO PFX "%s: addr %s\n", - dev->name, print_mac(mac, dev->dev_addr)); + printk(KERN_INFO PFX "%s: addr %pM\n", + dev->name, dev->dev_addr); } static int __devinit skge_probe(struct pci_dev *pdev, @@ -4098,8 +4081,8 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state) } skge_write32(hw, B0_IMSK, 0); - pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + pci_prepare_to_sleep(pdev); return 0; } @@ -4112,7 +4095,7 @@ static int skge_resume(struct pci_dev *pdev) if (!hw) return 0; - err = pci_set_power_state(pdev, PCI_D0); + err = pci_back_from_sleep(pdev); if (err) goto out; @@ -4120,8 +4103,6 @@ static int skge_resume(struct pci_dev *pdev) if (err) goto out; - pci_enable_wake(pdev, PCI_D0, 0); - err = skge_reset(hw); if (err) goto out; @@ -4162,8 +4143,8 @@ static void skge_shutdown(struct pci_dev *pdev) wol |= skge->wol; } - pci_enable_wake(pdev, PCI_D3hot, wol); - pci_enable_wake(pdev, PCI_D3cold, wol); + if (pci_enable_wake(pdev, PCI_D3cold, wol)) + pci_enable_wake(pdev, PCI_D3hot, wol); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot);