#include "gianfar_mii.h"
#define TX_TIMEOUT (1*HZ)
-#define SKB_ALLOC_TIMEOUT 1000000
#undef BRIEF_GFAR_ERRORS
#undef VERBOSE_GFAR_ERRORS
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
-struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
-static struct net_device_stats *gfar_get_stats(struct net_device *dev);
+struct sk_buff *gfar_new_skb(struct net_device *dev);
+static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+ struct sk_buff *skb);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
+static void gfar_configure_serdes(struct net_device *dev);
+extern int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value);
+extern int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
#ifdef CONFIG_GFAR_NAPI
-static int gfar_poll(struct net_device *dev, int *budget);
+static int gfar_poll(struct napi_struct *napi, int budget);
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
struct gfar_private *priv = NULL;
struct gianfar_platform_data *einfo;
struct resource *r;
- int idx;
int err = 0;
+ DECLARE_MAC_BUF(mac);
einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
return -ENOMEM;
priv = netdev_priv(dev);
+ priv->dev = dev;
/* Set the info in the priv to the current info */
priv->einfo = einfo;
/* Set the dev->base_addr to the gfar reg region */
dev->base_addr = (unsigned long) (priv->regs);
- SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
/* Fill in the dev structure */
dev->tx_timeout = gfar_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
#ifdef CONFIG_GFAR_NAPI
- dev->poll = gfar_poll;
- dev->weight = GFAR_DEV_WEIGHT;
+ netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = gfar_netpoll;
#endif
dev->stop = gfar_close;
- dev->get_stats = gfar_get_stats;
dev->change_mtu = gfar_change_mtu;
dev->mtu = 1500;
dev->set_multicast_list = gfar_set_multi;
gfar_init_sysfs(dev);
/* Print out the device info */
- printk(KERN_INFO DEVICE_NAME, dev->name);
- for (idx = 0; idx < 6; idx++)
- printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
- printk("\n");
+ printk(KERN_INFO DEVICE_NAME "%s\n",
+ dev->name, print_mac(mac, dev->dev_addr));
/* Even more device info helps when determining which kernel */
/* provided which set of benchmarks. */
if (ecntrl & ECNTRL_REDUCED_MODE) {
if (ecntrl & ECNTRL_REDUCED_MII_MODE)
return PHY_INTERFACE_MODE_RMII;
- else
+ else {
+ phy_interface_t interface = priv->einfo->interface;
+
+ /*
+ * This isn't autodetected right now, so it must
+ * be set by the device tree or platform code.
+ */
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII_ID;
+
return PHY_INTERFACE_MODE_RGMII;
+ }
}
if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ gfar_configure_serdes(dev);
+
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
return PTR_ERR(phydev);
return 0;
}
+static void gfar_configure_serdes(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_mii __iomem *regs =
+ (void __iomem *)&priv->regs->gfar_mii_regs;
+
+ /* Initialise TBI i/f to communicate with serdes (lynx phy) */
+
+ /* Single clk mode, mii mode off(for aerdes communication) */
+ gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT);
+
+ /* Supported pause and full-duplex, no half-duplex */
+ gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE,
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM);
+
+ /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */
+ gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE |
+ BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
+}
+
static void init_registers(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
free_skb_resources(priv);
- dma_free_coherent(NULL,
+ dma_free_coherent(&dev->dev,
sizeof(struct txbd8)*priv->tx_ring_size
+ sizeof(struct rxbd8)*priv->rx_ring_size,
priv->tx_bd_base,
for (i = 0; i < priv->tx_ring_size; i++) {
if (priv->tx_skbuff[i]) {
- dma_unmap_single(NULL, txbdp->bufPtr,
+ dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
txbdp->length,
DMA_TO_DEVICE);
dev_kfree_skb_any(priv->tx_skbuff[i]);
if(priv->rx_skbuff != NULL) {
for (i = 0; i < priv->rx_ring_size; i++) {
if (priv->rx_skbuff[i]) {
- dma_unmap_single(NULL, rxbdp->bufPtr,
+ dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
priv->rx_buffer_size,
DMA_FROM_DEVICE);
{
struct txbd8 *txbdp;
struct rxbd8 *rxbdp;
- dma_addr_t addr;
+ dma_addr_t addr = 0;
unsigned long vaddr;
int i;
struct gfar_private *priv = netdev_priv(dev);
gfar_write(®s->imask, IMASK_INIT_CLEAR);
/* Allocate memory for the buffer descriptors */
- vaddr = (unsigned long) dma_alloc_coherent(NULL,
+ vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
sizeof (struct txbd8) * priv->tx_ring_size +
sizeof (struct rxbd8) * priv->rx_ring_size,
&addr, GFP_KERNEL);
rxbdp = priv->rx_bd_base;
for (i = 0; i < priv->rx_ring_size; i++) {
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
+
+ skb = gfar_new_skb(dev);
- rxbdp->status = 0;
+ if (!skb) {
+ printk(KERN_ERR "%s: Can't allocate RX buffers\n",
+ dev->name);
- skb = gfar_new_skb(dev, rxbdp);
+ goto err_rxalloc_fail;
+ }
priv->rx_skbuff[i] = skb;
+ gfar_new_rxbdp(dev, rxbdp, skb);
+
rxbdp++;
}
tx_irq_fail:
free_irq(priv->interruptError, dev);
err_irq_fail:
+err_rxalloc_fail:
rx_skb_fail:
free_skb_resources(priv);
tx_skb_fail:
- dma_free_coherent(NULL,
+ dma_free_coherent(&dev->dev,
sizeof(struct txbd8)*priv->tx_ring_size
+ sizeof(struct rxbd8)*priv->rx_ring_size,
priv->tx_bd_base,
/* Returns 0 for success. */
static int gfar_enet_open(struct net_device *dev)
{
+#ifdef CONFIG_GFAR_NAPI
+ struct gfar_private *priv = netdev_priv(dev);
+#endif
int err;
+#ifdef CONFIG_GFAR_NAPI
+ napi_enable(&priv->napi);
+#endif
+
/* Initialize a bunch of registers */
init_registers(dev);
err = init_phy(dev);
- if(err)
+ if(err) {
+#ifdef CONFIG_GFAR_NAPI
+ napi_disable(&priv->napi);
+#endif
return err;
+ }
err = startup_gfar(dev);
+ if (err) {
+#ifdef CONFIG_GFAR_NAPI
+ napi_disable(&priv->napi);
+#endif
+ return err;
+ }
netif_start_queue(dev);
unsigned long flags;
/* Update transmit stats */
- priv->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
/* Lock priv now */
spin_lock_irqsave(&priv->txlock, flags);
/* Set buffer length and pointer */
txbdp->length = skb->len;
- txbdp->bufPtr = dma_map_single(NULL, skb->data,
+ txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
/* Save the skb pointer so we can free it later */
if (txbdp == priv->dirty_tx) {
netif_stop_queue(dev);
- priv->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
}
/* Update the current txbd to the next one */
static int gfar_close(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
+
+#ifdef CONFIG_GFAR_NAPI
+ napi_disable(&priv->napi);
+#endif
+
stop_gfar(dev);
/* Disconnect from the PHY */
return 0;
}
-/* returns a net_device_stats structure pointer */
-static struct net_device_stats * gfar_get_stats(struct net_device *dev)
-{
- struct gfar_private *priv = netdev_priv(dev);
-
- return &(priv->stats);
-}
-
/* Changes the mac address if the controller is not running. */
int gfar_set_mac_address(struct net_device *dev)
{
int frame_size = new_mtu + ETH_HLEN;
if (priv->vlan_enable)
- frame_size += VLAN_ETH_HLEN;
+ frame_size += VLAN_HLEN;
if (gfar_uses_fcb(priv))
frame_size += GMAC_FCB_LEN;
* starting over will fix the problem. */
static void gfar_timeout(struct net_device *dev)
{
- struct gfar_private *priv = netdev_priv(dev);
-
- priv->stats.tx_errors++;
+ dev->stats.tx_errors++;
if (dev->flags & IFF_UP) {
stop_gfar(dev);
}
/* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *dev_id)
+int gfar_clean_tx_ring(struct net_device *dev)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct gfar_private *priv = netdev_priv(dev);
struct txbd8 *bdp;
+ struct gfar_private *priv = netdev_priv(dev);
+ int howmany = 0;
- /* Clear IEVENT */
- gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
-
- /* Lock priv */
- spin_lock(&priv->txlock);
bdp = priv->dirty_tx;
while ((bdp->status & TXBD_READY) == 0) {
/* If dirty_tx and cur_tx are the same, then either the */
if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
break;
- priv->stats.tx_packets++;
+ howmany++;
/* Deferred means some collisions occurred during transmit, */
/* but we eventually sent the packet. */
if (bdp->status & TXBD_DEF)
- priv->stats.collisions++;
+ dev->stats.collisions++;
/* Free the sk buffer associated with this TxBD */
dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
+
priv->tx_skbuff[priv->skb_dirtytx] = NULL;
priv->skb_dirtytx =
(priv->skb_dirtytx +
1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+ /* Clean BD length for empty detection */
+ bdp->length = 0;
+
/* update bdp to point at next bd in the ring (wrapping if necessary) */
if (bdp->status & TXBD_WRAP)
bdp = priv->tx_bd_base;
netif_wake_queue(dev);
} /* while ((bdp->status & TXBD_READY) == 0) */
+ dev->stats.tx_packets += howmany;
+
+ return howmany;
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
+
+ /* Lock priv */
+ spin_lock(&priv->txlock);
+
+ gfar_clean_tx_ring(dev);
+
/* If we are coalescing the interrupts, reset the timer */
/* Otherwise, clear it */
- if (priv->txcoalescing)
+ if (likely(priv->txcoalescing)) {
+ gfar_write(&priv->regs->txic, 0);
gfar_write(&priv->regs->txic,
mk_ic_value(priv->txcount, priv->txtime));
- else
- gfar_write(&priv->regs->txic, 0);
+ }
spin_unlock(&priv->txlock);
return IRQ_HANDLED;
}
-struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
+static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+ struct sk_buff *skb)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 * status_len = (u32 *)bdp;
+ u16 flags;
+
+ bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
+
+ flags = RXBD_EMPTY | RXBD_INTERRUPT;
+
+ if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
+ flags |= RXBD_WRAP;
+
+ eieio();
+
+ *status_len = (u32)flags << 16;
+}
+
+
+struct sk_buff * gfar_new_skb(struct net_device *dev)
{
unsigned int alignamount;
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
- unsigned int timeout = SKB_ALLOC_TIMEOUT;
/* We have to allocate the skb, so keep trying till we succeed */
- while ((!skb) && timeout--)
- skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
+ skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
- if (NULL == skb)
+ if (!skb)
return NULL;
alignamount = RXBUF_ALIGNMENT -
- (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1));
+ (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
/* We need the data buffer to be aligned properly. We will reserve
* as many bytes as needed to align the data properly
*/
skb_reserve(skb, alignamount);
- bdp->bufPtr = dma_map_single(NULL, skb->data,
- priv->rx_buffer_size, DMA_FROM_DEVICE);
-
- bdp->length = 0;
-
- /* Mark the buffer empty */
- eieio();
- bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
-
return skb;
}
-static inline void count_errors(unsigned short status, struct gfar_private *priv)
+static inline void count_errors(unsigned short status, struct net_device *dev)
{
- struct net_device_stats *stats = &priv->stats;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
struct gfar_extra_stats *estats = &priv->extra_stats;
/* If the packet was truncated, none of the other errors
unsigned long flags;
#endif
- /* Clear IEVENT, so rx interrupt isn't called again
- * because of this interrupt */
- gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
-
/* support NAPI */
#ifdef CONFIG_GFAR_NAPI
- if (netif_rx_schedule_prep(dev)) {
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived */
+ gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
+
+ if (netif_rx_schedule_prep(dev, &priv->napi)) {
tempval = gfar_read(&priv->regs->imask);
- tempval &= IMASK_RX_DISABLED;
+ tempval &= IMASK_RTX_DISABLED;
gfar_write(&priv->regs->imask, tempval);
- __netif_rx_schedule(dev);
+ __netif_rx_schedule(dev, &priv->napi);
} else {
if (netif_msg_rx_err(priv))
printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
gfar_read(&priv->regs->imask));
}
#else
+ /* Clear IEVENT, so rx interrupt isn't called again
+ * because of this interrupt */
+ gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
spin_lock_irqsave(&priv->rxlock, flags);
gfar_clean_rx_ring(dev, priv->rx_ring_size);
/* If we are coalescing interrupts, update the timer */
/* Otherwise, clear it */
- if (priv->rxcoalescing)
+ if (likely(priv->rxcoalescing)) {
+ gfar_write(&priv->regs->rxic, 0);
gfar_write(&priv->regs->rxic,
mk_ic_value(priv->rxcount, priv->rxtime));
- else
- gfar_write(&priv->regs->rxic, 0);
+ }
spin_unlock_irqrestore(&priv->rxlock, flags);
#endif
if (NULL == skb) {
if (netif_msg_rx_err(priv))
printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
- priv->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
priv->extra_stats.rx_skbmissing++;
} else {
int ret;
bdp = priv->cur_rx;
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
+ struct sk_buff *newskb;
rmb();
+
+ /* Add another skb for the future */
+ newskb = gfar_new_skb(dev);
+
skb = priv->rx_skbuff[priv->skb_currx];
- if (!(bdp->status &
- (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
- | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
+ /* We drop the frame if we failed to allocate a new buffer */
+ if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
+ bdp->status & RXBD_ERR)) {
+ count_errors(bdp->status, dev);
+
+ if (unlikely(!newskb))
+ newskb = skb;
+
+ if (skb) {
+ dma_unmap_single(&priv->dev->dev,
+ bdp->bufPtr,
+ priv->rx_buffer_size,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ }
+ } else {
/* Increment the number of packets */
- priv->stats.rx_packets++;
+ dev->stats.rx_packets++;
howmany++;
/* Remove the FCS from the packet length */
gfar_process_frame(dev, skb, pkt_len);
- priv->stats.rx_bytes += pkt_len;
- } else {
- count_errors(bdp->status, priv);
-
- if (skb)
- dev_kfree_skb_any(skb);
-
- priv->rx_skbuff[priv->skb_currx] = NULL;
+ dev->stats.rx_bytes += pkt_len;
}
dev->last_rx = jiffies;
- /* Clear the status flags for this buffer */
- bdp->status &= ~RXBD_STATS;
+ priv->rx_skbuff[priv->skb_currx] = newskb;
- /* Add another skb for the future */
- skb = gfar_new_skb(dev, bdp);
- priv->rx_skbuff[priv->skb_currx] = skb;
+ /* Setup the new bdp */
+ gfar_new_rxbdp(dev, bdp, newskb);
/* Update to the next pointer */
if (bdp->status & RXBD_WRAP)
/* update to point at the next skb */
priv->skb_currx =
- (priv->skb_currx +
- 1) & RX_RING_MOD_MASK(priv->rx_ring_size);
-
+ (priv->skb_currx + 1) &
+ RX_RING_MOD_MASK(priv->rx_ring_size);
}
/* Update the current rxbd pointer to be the next one */
}
#ifdef CONFIG_GFAR_NAPI
-static int gfar_poll(struct net_device *dev, int *budget)
+static int gfar_poll(struct napi_struct *napi, int budget)
{
+ struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
+ struct net_device *dev = priv->dev;
int howmany;
- struct gfar_private *priv = netdev_priv(dev);
- int rx_work_limit = *budget;
-
- if (rx_work_limit > dev->quota)
- rx_work_limit = dev->quota;
+ unsigned long flags;
- howmany = gfar_clean_rx_ring(dev, rx_work_limit);
+ /* If we fail to get the lock, don't bother with the TX BDs */
+ if (spin_trylock_irqsave(&priv->txlock, flags)) {
+ gfar_clean_tx_ring(dev);
+ spin_unlock_irqrestore(&priv->txlock, flags);
+ }
- dev->quota -= howmany;
- rx_work_limit -= howmany;
- *budget -= howmany;
+ howmany = gfar_clean_rx_ring(dev, budget);
- if (rx_work_limit > 0) {
- netif_rx_complete(dev);
+ if (howmany < budget) {
+ netif_rx_complete(dev, napi);
/* Clear the halt bit in RSTAT */
gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
/* If we are coalescing interrupts, update the timer */
/* Otherwise, clear it */
- if (priv->rxcoalescing)
+ if (likely(priv->rxcoalescing)) {
+ gfar_write(&priv->regs->rxic, 0);
gfar_write(&priv->regs->rxic,
mk_ic_value(priv->rxcount, priv->rxtime));
- else
- gfar_write(&priv->regs->rxic, 0);
+ }
}
- /* Return 1 if there's more work to do */
- return (rx_work_limit > 0) ? 0 : 1;
+ return howmany;
}
#endif
/* Update the error counters */
if (events & IEVENT_TXE) {
- priv->stats.tx_errors++;
+ dev->stats.tx_errors++;
if (events & IEVENT_LC)
- priv->stats.tx_window_errors++;
+ dev->stats.tx_window_errors++;
if (events & IEVENT_CRL)
- priv->stats.tx_aborted_errors++;
+ dev->stats.tx_aborted_errors++;
if (events & IEVENT_XFUN) {
if (netif_msg_tx_err(priv))
printk(KERN_DEBUG "%s: TX FIFO underrun, "
"packet dropped.\n", dev->name);
- priv->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
priv->extra_stats.tx_underrun++;
/* Reactivate the Tx Queues */
printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
}
if (events & IEVENT_BSY) {
- priv->stats.rx_errors++;
+ dev->stats.rx_errors++;
priv->extra_stats.rx_bsy++;
gfar_receive(irq, dev_id);
dev->name, gfar_read(&priv->regs->rstat));
}
if (events & IEVENT_BABR) {
- priv->stats.rx_errors++;
+ dev->stats.rx_errors++;
priv->extra_stats.rx_babr++;
if (netif_msg_rx_err(priv))
return IRQ_HANDLED;
}
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:fsl-gianfar");
+
/* Structure for a device driver */
static struct platform_driver gfar_driver = {
.probe = gfar_probe,
.remove = gfar_remove,
.driver = {
.name = "fsl-gianfar",
+ .owner = THIS_MODULE,
},
};