static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
static void gfar_init_tx_rx_base(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u32 *baddr;
+ u32 __iomem *baddr;
int i;
baddr = ®s->tbase0;
/* Configure the coalescing support */
gfar_configure_coalescing(priv, 0xFF, 0xFF);
- if (priv->rx_filer_enable)
+ if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
+ /* Program the RIR0 reg with the required distribution */
+ gfar_write(®s->rir0, DEFAULT_RIR0);
+ }
if (priv->rx_csum_enable)
rctrl |= RCTRL_CHECKSUMMING;
gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off);
}
+static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct netdev_queue *txq;
+ unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
+ unsigned long tx_packets = 0, tx_bytes = 0;
+ int i = 0;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_packets += priv->rx_queue[i]->stats.rx_packets;
+ rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+ }
+
+ dev->stats.rx_packets = rx_packets;
+ dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_dropped = rx_dropped;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ txq = netdev_get_tx_queue(dev, i);
+ tx_bytes += txq->tx_bytes;
+ tx_packets += txq->tx_packets;
+ }
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+
+ return &dev->stats;
+}
+
static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
.ndo_set_multicast_list = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
- .ndo_select_queue = gfar_select_queue,
+ .ndo_get_stats = gfar_get_stats,
.ndo_vlan_rx_register = gfar_vlan_rx_register,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
return priv->vlgrp || priv->rx_csum_enable;
}
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- return skb_get_queue_mapping(skb);
-}
static void free_tx_pointers(struct gfar_private *priv)
{
int i = 0;
int err = 0, i;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct device_node *child = NULL;
const u32 *stash;
const u32 *stash_len;
return -ENOMEM;
priv = netdev_priv(dev);
- priv->node = ofdev->node;
+ priv->node = ofdev->dev.of_node;
priv->ndev = dev;
dev->num_tx_queues = num_tx_qs;
priv->rx_queue[i] = NULL;
for (i = 0; i < priv->num_tx_queues; i++) {
- priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
+ priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc(
sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
if (!priv->tx_queue[i]) {
err = -ENOMEM;
}
for (i = 0; i < priv->num_rx_queues; i++) {
- priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
+ priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
if (!priv->rx_queue[i]) {
err = -ENOMEM;
return new_bit_map;
}
-u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, u32 class)
+static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
+ u32 class)
{
u32 rqfpr = FPR_FILER_MASK;
u32 rqfcr = 0x0;
int len_devname;
u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
u32 isrg = 0;
- u32 *baddr;
+ u32 __iomem *baddr;
err = gfar_of_init(ofdev, &dev);
priv = netdev_priv(dev);
priv->ndev = dev;
priv->ofdev = ofdev;
- priv->node = ofdev->node;
+ priv->node = ofdev->dev.of_node;
SET_NETDEV_DEV(dev, &ofdev->dev);
spin_lock_init(&priv->bflock);
}
/* Need to reverse the bit maps as bit_map's MSB is q0
- * but, for_each_bit parses from right to left, which
+ * but, for_each_set_bit parses from right to left, which
* basically reverses the queue numbers */
for (i = 0; i< priv->num_grps; i++) {
priv->gfargrp[i].tx_bit_map = reverse_bitmap(
* also assign queues to groups */
for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
priv->gfargrp[grp_idx].num_rx_queues = 0x0;
- for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+ for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
priv->num_rx_queues) {
priv->gfargrp[grp_idx].num_rx_queues++;
priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
}
priv->gfargrp[grp_idx].num_tx_queues = 0x0;
- for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
+ for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
priv->num_tx_queues) {
priv->gfargrp[grp_idx].num_tx_queues++;
priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
+ /* enable filer if using multiple RX queues*/
+ if(priv->num_rx_queues > 1)
+ priv->rx_filer_enable = 1;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
/* provided which set of benchmarks. */
printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
for (i = 0; i < priv->num_rx_queues; i++)
- printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
+ printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
dev->name, i, priv->rx_queue[i]->rx_ring_size);
for(i = 0; i < priv->num_tx_queues; i++)
- printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
+ printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
dev->name, i, priv->tx_queue[i]->tx_ring_size);
return 0;
phy_start(priv->phydev);
netif_device_attach(ndev);
- napi_enable(&priv->gfargrp.napi);
+ enable_napi(priv);
return 0;
}
/* Go through all the buffer descriptors and free their data buffers */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
- if(!tx_queue->tx_skbuff)
+ if(tx_queue->tx_skbuff)
free_skb_tx_queue(tx_queue);
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- if(!rx_queue->rx_skbuff)
+ if(rx_queue->rx_skbuff)
free_skb_rx_queue(rx_queue);
}
}
void gfar_configure_coalescing(struct gfar_private *priv,
- unsigned int tx_mask, unsigned int rx_mask)
+ unsigned long tx_mask, unsigned long rx_mask)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u32 *baddr;
+ u32 __iomem *baddr;
int i = 0;
/* Backward compatible case ---- even if we enable
if (priv->mode == MQ_MG_MODE) {
baddr = ®s->txic0;
- for_each_bit (i, &tx_mask, priv->num_tx_queues) {
+ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
if (likely(priv->tx_queue[i]->txcoalescing)) {
gfar_write(baddr + i, 0);
gfar_write(baddr + i, priv->tx_queue[i]->txic);
}
baddr = ®s->rxic0;
- for_each_bit (i, &rx_mask, priv->num_rx_queues) {
+ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
if (likely(priv->rx_queue[i]->rxcoalescing)) {
gfar_write(baddr + i, 0);
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
/* total number of fragments in the SKB */
nr_frags = skb_shinfo(skb)->nr_frags;
- spin_lock_irqsave(&tx_queue->txlock, flags);
-
/* check if there is space to queue this packet */
if ((nr_frags+1) > tx_queue->num_txbdfree) {
/* no space, stop the queue */
netif_tx_stop_queue(txq);
dev->stats.tx_fifo_errors++;
- spin_unlock_irqrestore(&tx_queue->txlock, flags);
return NETDEV_TX_BUSY;
}
/* Update transmit stats */
- dev->stats.tx_bytes += skb->len;
+ txq->tx_bytes += skb->len;
+ txq->tx_packets ++;
txbdp = txbdp_start = tx_queue->cur_tx;
}
/* setup the TxBD length and buffer pointer for the first BD */
- tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
/*
+ * We can work in parallel with gfar_clean_tx_ring(), except
+ * when modifying num_txbdfree. Note that we didn't grab the lock
+ * when we were reading the num_txbdfree and checking for available
+ * space, that's because outside of this function it can only grow,
+ * and once we've got needed space, it cannot suddenly disappear.
+ *
+ * The lock also protects us from gfar_error(), which can modify
+ * regs->tstat and thus retrigger the transfers, which is why we
+ * also must grab the lock before setting ready bit for the first
+ * to be transmitted BD.
+ */
+ spin_lock_irqsave(&tx_queue->txlock, flags);
+
+ /*
* The powerpc-specific eieio() is used, as wmb() has too strong
* semantics (it requires synchronization between cacheable and
* uncacheable mappings, which eieio doesn't provide and which we
txbdp_start->lstatus = lstatus;
+ eieio(); /* force lstatus write before tx_skbuff */
+
+ tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+
/* Update the current skb pointer to the next entry we will use
* (wrapping if necessary) */
tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
skb_dirtytx = tx_queue->skb_dirtytx;
while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+ unsigned long flags;
+
frags = skb_shinfo(skb)->nr_frags;
lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
TX_RING_MOD_MASK(tx_ring_size);
howmany++;
+ spin_lock_irqsave(&tx_queue->txlock, flags);
tx_queue->num_txbdfree += frags + 1;
+ spin_unlock_irqrestore(&tx_queue->txlock, flags);
}
/* If we freed a buffer, we can restart transmission, if necessary */
tx_queue->skb_dirtytx = skb_dirtytx;
tx_queue->dirty_tx = bdp;
- dev->stats.tx_packets += howmany;
-
return howmany;
}
* as many bytes as needed to align the data properly
*/
skb_reserve(skb, alignamount);
+ GFAR_CB(skb)->alignamount = alignamount;
return skb;
}
fcb = (struct rxfcb *)skb->data;
/* Remove the FCB from the skb */
- skb_set_queue_mapping(skb, fcb->rq);
/* Remove the padded bytes, if there are any */
- if (amount_pull)
+ if (amount_pull) {
+ skb_record_rx_queue(skb, fcb->rq);
skb_pull(skb, amount_pull);
+ }
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
newskb = skb;
else if (skb) {
/*
- * We need to reset ->data to what it
+ * We need to un-reserve() the skb to what it
* was before gfar_new_skb() re-aligned
* it to an RXBUF_ALIGNMENT boundary
* before we put the skb back on the
* recycle list.
*/
- skb->data = skb->head + NET_SKB_PAD;
+ skb_reserve(skb, -GFAR_CB(skb)->alignamount);
__skb_queue_head(&priv->rx_recycle, skb);
}
} else {
/* Increment the number of packets */
- dev->stats.rx_packets++;
+ rx_queue->stats.rx_packets++;
howmany++;
if (likely(skb)) {
pkt_len = bdp->length - ETH_FCS_LEN;
/* Remove the FCS from the packet length */
skb_put(skb, pkt_len);
- dev->stats.rx_bytes += pkt_len;
-
- if (in_irq() || irqs_disabled())
- printk("Interrupt problem!\n");
+ rx_queue->stats.rx_bytes += pkt_len;
+ skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull);
} else {
if (netif_msg_rx_err(priv))
printk(KERN_WARNING
"%s: Missing skb!\n", dev->name);
- dev->stats.rx_dropped++;
+ rx_queue->stats.rx_dropped++;
priv->extra_stats.rx_skbmissing++;
}
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
- int tx_cleaned = 0, i, left_over_budget = budget, serviced_queues = 0;
+ int tx_cleaned = 0, i, left_over_budget = budget;
+ unsigned long serviced_queues = 0;
int num_queues = 0;
- unsigned long flags;
num_queues = gfargrp->num_rx_queues;
budget_per_queue = budget/num_queues;
budget_per_queue = left_over_budget/num_queues;
left_over_budget = 0;
- for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
if (test_bit(i, &serviced_queues))
continue;
rx_queue = priv->rx_queue[i];
tx_queue = priv->tx_queue[rx_queue->qindex];
- /* If we fail to get the lock,
- * don't bother with the TX BDs */
- if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
- tx_cleaned += gfar_clean_tx_ring(tx_queue);
- spin_unlock_irqrestore(&tx_queue->txlock,
- flags);
- }
-
+ tx_cleaned += gfar_clean_tx_ring(tx_queue);
rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
budget_per_queue);
rx_cleaned += rx_cleaned_per_queue;
gfar_interrupt(priv->gfargrp[i].interruptTransmit,
&priv->gfargrp[i]);
enable_irq(priv->gfargrp[i].interruptTransmit);
+ }
}
}
#endif
em_num = 0;
}
- if (dev->mc_count == 0)
+ if (netdev_mc_empty(dev))
return;
/* Parse the list, and set the appropriate bits */
- for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, dev) {
if (idx < em_num) {
gfar_set_mac_for_addr(dev, idx,
mc_ptr->dmi_addr);
if (events & IEVENT_CRL)
dev->stats.tx_aborted_errors++;
if (events & IEVENT_XFUN) {
+ unsigned long flags;
+
if (netif_msg_tx_err(priv))
printk(KERN_DEBUG "%s: TX FIFO underrun, "
"packet dropped.\n", dev->name);
dev->stats.tx_dropped++;
priv->extra_stats.tx_underrun++;
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+
/* Reactivate the Tx Queues */
gfar_write(®s->tstat, gfargrp->tstat);
+
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
}
if (netif_msg_tx_err(priv))
printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);