static void yukon_init(struct skge_hw *hw, int port);
static void genesis_mac_init(struct skge_hw *hw, int port);
static void genesis_link_up(struct skge_port *skge);
+static void skge_set_multicast(struct net_device *dev);
/* Avoid conditionals by using array */
static const int txqaddr[] = { Q_XA1, Q_XA2 };
return WAKE_MAGIC | WAKE_PHY;
}
-static u32 pci_wake_enabled(struct pci_dev *dev)
-{
- int pm = pci_find_capability(dev, PCI_CAP_ID_PM);
- u16 value;
-
- /* If device doesn't support PM Capabilities, but request is to disable
- * wake events, it's a nop; otherwise fail */
- if (!pm)
- return 0;
-
- pci_read_config_word(dev, pm + PCI_PM_PMC, &value);
-
- value &= PCI_PM_CAP_PME_MASK;
- value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */
-
- return value != 0;
-}
-
static void skge_wol_init(struct skge_port *skge)
{
struct skge_hw *hw = skge->hw;
if (skge->wol & WAKE_MAGIC)
ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
else
- ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
+ ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
- if (wol->wolopts & ~wol_supported(hw))
+ if ((wol->wolopts & ~wol_supported(hw))
+ || !device_can_wakeup(&hw->pdev->dev))
return -EOPNOTSUPP;
skge->wol = wol->wolopts;
+
+ device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
+
return 0;
}
struct skge_port *skge = netdev_priv(dev);
const struct skge_hw *hw = skge->hw;
u32 supported = skge_supported_modes(hw);
+ int err = 0;
if (ecmd->autoneg == AUTONEG_ENABLE) {
ecmd->advertising = supported;
skge->autoneg = ecmd->autoneg;
skge->advertising = ecmd->advertising;
- if (netif_running(dev))
- skge_phy_reset(skge);
+ if (netif_running(dev)) {
+ skge_down(dev);
+ err = skge_up(dev);
+ if (err) {
+ dev_close(dev);
+ return err;
+ }
+ }
return (0);
}
struct ethtool_ringparam *p)
{
struct skge_port *skge = netdev_priv(dev);
- int err;
+ int err = 0;
if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
dev_close(dev);
}
- return 0;
+ return err;
}
static u32 skge_get_msglevel(struct net_device *netdev)
{
struct skge_port *skge = netdev_priv(dev);
struct ethtool_pauseparam old;
+ int err = 0;
skge_get_pauseparam(dev, &old);
skge->flow_control = FLOW_MODE_NONE;
}
- if (netif_running(dev))
- skge_phy_reset(skge);
+ if (netif_running(dev)) {
+ skge_down(dev);
+ err = skge_up(dev);
+ if (err) {
+ dev_close(dev);
+ return err;
+ }
+ }
return 0;
}
}
spin_unlock_bh(&hw->phy_lock);
- dev->set_multicast_list(dev);
+ skge_set_multicast(dev);
}
/* Basic MII support */
}
case SIOCSMIIREG:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
spin_lock_bh(&hw->phy_lock);
if (hw->chip_id == CHIP_ID_GENESIS)
err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
if (netif_msg_ifdown(skge))
printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
del_timer_sync(&skge->link_timer);
+ (ring->to_clean - ring->to_use) - 1;
}
-static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
+ struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
}
skge->tx_ring.to_clean = e;
- netif_wake_queue(dev);
}
static void skge_tx_timeout(struct net_device *dev)
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
skge_tx_clean(dev);
+ netif_wake_queue(dev);
}
static int skge_change_mtu(struct net_device *dev, int new_mtu)
(status & GMR_FS_RX_OK) == 0;
}
+static void skge_set_multicast(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+
+ if (hw->chip_id == CHIP_ID_GENESIS)
+ genesis_set_multicast(dev);
+ else
+ yukon_set_multicast(dev);
+
+}
+
/* Get receive buffer from descriptor.
* Handles copy of small buffers and reallocation failures
goto error;
if (len < RX_COPY_THRESHOLD) {
- skb = netdev_alloc_skb(dev, len + 2);
+ skb = netdev_alloc_skb_ip_align(dev, len);
if (!skb)
goto resubmit;
- skb_reserve(skb, 2);
pci_dma_sync_single_for_cpu(skge->hw->pdev,
pci_unmap_addr(e, mapaddr),
len, PCI_DMA_FROMDEVICE);
skge_rx_reuse(e, skge->rx_buf_size);
} else {
struct sk_buff *nskb;
- nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN);
+
+ nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
if (!nskb)
goto resubmit;
- skb_reserve(nskb, NET_IP_ALIGN);
pci_unmap_single(skge->hw->pdev,
pci_unmap_addr(e, mapaddr),
pci_unmap_len(e, maplen),
skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
if (likely(skb)) {
- dev->last_rx = jiffies;
netif_receive_skb(skb);
++work_done;
if (work_done < to_do) {
unsigned long flags;
-
+
spin_lock_irqsave(&hw->hw_lock, flags);
- __netif_rx_complete(dev, napi);
+ __napi_complete(napi);
hw->intr_mask |= napimask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK);
if (status & (IS_XA1_F|IS_R1_F)) {
struct skge_port *skge = netdev_priv(hw->dev[0]);
hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
- netif_rx_schedule(hw->dev[0], &skge->napi);
+ napi_schedule(&skge->napi);
}
if (status & IS_PA_TO_TX1)
if (status & (IS_XA2_F|IS_R2_F)) {
hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
- netif_rx_schedule(hw->dev[1], &skge->napi);
+ napi_schedule(&skge->napi);
}
if (status & IS_PA_TO_RX2) {
struct skge_port *skge;
struct dentry *d;
- if (dev->open != &skge_up || !skge_debug)
+ if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
goto done;
skge = netdev_priv(dev);
#define skge_debug_cleanup()
#endif
+static const struct net_device_ops skge_netdev_ops = {
+ .ndo_open = skge_up,
+ .ndo_stop = skge_down,
+ .ndo_start_xmit = skge_xmit_frame,
+ .ndo_do_ioctl = skge_ioctl,
+ .ndo_get_stats = skge_get_stats,
+ .ndo_tx_timeout = skge_tx_timeout,
+ .ndo_change_mtu = skge_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = skge_set_multicast,
+ .ndo_set_mac_address = skge_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = skge_netpoll,
+#endif
+};
+
+
/* Initialize network device */
static struct net_device *skge_devinit(struct skge_hw *hw, int port,
int highmem)
}
SET_NETDEV_DEV(dev, &hw->pdev->dev);
- dev->open = skge_up;
- dev->stop = skge_down;
- dev->do_ioctl = skge_ioctl;
- dev->hard_start_xmit = skge_xmit_frame;
- dev->get_stats = skge_get_stats;
- if (hw->chip_id == CHIP_ID_GENESIS)
- dev->set_multicast_list = genesis_set_multicast;
- else
- dev->set_multicast_list = yukon_set_multicast;
-
- dev->set_mac_address = skge_set_mac_address;
- dev->change_mtu = skge_change_mtu;
- SET_ETHTOOL_OPS(dev, &skge_ethtool_ops);
- dev->tx_timeout = skge_tx_timeout;
+ dev->netdev_ops = &skge_netdev_ops;
+ dev->ethtool_ops = &skge_ethtool_ops;
dev->watchdog_timeo = TX_WATCHDOG;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = skge_netpoll;
-#endif
dev->irq = hw->pdev->irq;
if (highmem)
skge->speed = -1;
skge->advertising = skge_supported_modes(hw);
- if (pci_wake_enabled(hw->pdev))
+ if (device_can_wakeup(&hw->pdev->dev)) {
skge->wol = wol_supported(hw) & WAKE_MAGIC;
+ device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
+ }
hw->dev[port] = dev;
static void __devinit skge_show_addr(struct net_device *dev)
{
const struct skge_port *skge = netdev_priv(dev);
- DECLARE_MAC_BUF(mac);
if (netif_msg_probe(skge))
- printk(KERN_INFO PFX "%s: addr %s\n",
- dev->name, print_mac(mac, dev->dev_addr));
+ printk(KERN_INFO PFX "%s: addr %pM\n",
+ dev->name, dev->dev_addr);
}
static int __devinit skge_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
- } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
using_dac = 0;
- err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
if (err) {
#endif
err = -ENOMEM;
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ /* space for skge@pci:0000:04:00.0 */
+ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
+ + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
if (!hw) {
dev_err(&pdev->dev, "cannot allocate hardware struct\n");
goto err_out_free_regions;
}
+ sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
hw->pdev = pdev;
spin_lock_init(&hw->hw_lock);
goto err_out_free_netdev;
}
- err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
+ err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw);
if (err) {
dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
dev->name, pdev->irq);
}
skge_show_addr(dev);
- if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
- if (register_netdev(dev1) == 0)
+ if (hw->ports > 1) {
+ dev1 = skge_devinit(hw, 1, using_dac);
+ if (dev1 && register_netdev(dev1) == 0)
skge_show_addr(dev1);
else {
/* Failure to register second port need not be fatal */
dev_warn(&pdev->dev, "register of second port failed\n");
hw->dev[1] = NULL;
- free_netdev(dev1);
+ hw->ports = 1;
+ if (dev1)
+ free_netdev(dev1);
}
}
pci_set_drvdata(pdev, hw);
}
skge_write32(hw, B0_IMSK, 0);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ pci_prepare_to_sleep(pdev);
return 0;
}
if (!hw)
return 0;
- err = pci_set_power_state(pdev, PCI_D0);
+ err = pci_back_from_sleep(pdev);
if (err)
goto out;
if (err)
goto out;
- pci_enable_wake(pdev, PCI_D0, 0);
-
err = skge_reset(hw);
if (err)
goto out;
wol |= skge->wol;
}
- pci_enable_wake(pdev, PCI_D3hot, wol);
- pci_enable_wake(pdev, PCI_D3cold, wol);
+ if (pci_enable_wake(pdev, PCI_D3cold, wol))
+ pci_enable_wake(pdev, PCI_D3hot, wol);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);