X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Fifb.c;h=ab9f675c5b8b8b86473d07b60b3ad933523b3ab5;hb=df01093bb08916f434ebedde4610805d4105d05f;hp=af233b591534e6128a2b1c6b702ee312a12a49b4;hpb=94833dfb8c98ed4ca1944dd2c1339d88a2d1c758;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index af233b5..ab9f675 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -35,7 +35,6 @@ #include #include #include -#include #define TX_TIMEOUT (2*HZ) @@ -60,7 +59,7 @@ struct ifb_private { static int numifbs = 2; static void ri_tasklet(unsigned long dev); -static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev); static int ifb_open(struct net_device *dev); static int ifb_close(struct net_device *dev); @@ -70,18 +69,20 @@ static void ri_tasklet(unsigned long dev) struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); struct net_device_stats *stats = &_dev->stats; + struct netdev_queue *txq; struct sk_buff *skb; + txq = netdev_get_tx_queue(_dev, 0); dp->st_task_enter++; if ((skb = skb_peek(&dp->tq)) == NULL) { dp->st_txq_refl_try++; - if (netif_tx_trylock(_dev)) { + if (__netif_tx_trylock(txq)) { dp->st_rxq_enter++; while ((skb = skb_dequeue(&dp->rq)) != NULL) { skb_queue_tail(&dp->tq, skb); dp->st_rx2tx_tran++; } - netif_tx_unlock(_dev); + __netif_tx_unlock(txq); } else { /* reschedule */ dp->st_rxq_notenter++; @@ -97,13 +98,16 @@ static void ri_tasklet(unsigned long dev) stats->tx_packets++; stats->tx_bytes +=skb->len; - skb->dev = __dev_get_by_index(&init_net, skb->iif); + rcu_read_lock(); + skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif); if (!skb->dev) { + rcu_read_unlock(); dev_kfree_skb(skb); stats->tx_dropped++; break; } - skb->iif = _dev->ifindex; + rcu_read_unlock(); + skb->skb_iif = _dev->ifindex; if (from & AT_EGRESS) { dp->st_rx_frm_egr++; @@ -116,7 +120,7 @@ static void ri_tasklet(unsigned long dev) BUG(); } - if (netif_tx_trylock(_dev)) { + if (__netif_tx_trylock(txq)) { dp->st_rxq_check++; if ((skb = skb_peek(&dp->rq)) == NULL) { dp->tasklet_pending = 0; @@ -124,10 +128,10 @@ static void ri_tasklet(unsigned long dev) netif_wake_queue(_dev); } else { dp->st_rxq_rsch++; - netif_tx_unlock(_dev); + __netif_tx_unlock(txq); goto resched; } - netif_tx_unlock(_dev); + __netif_tx_unlock(txq); } else { resched: dp->tasklet_pending = 1; @@ -136,51 +140,55 @@ resched: } +static const struct net_device_ops ifb_netdev_ops = { + .ndo_open = ifb_open, + .ndo_stop = ifb_close, + .ndo_start_xmit = ifb_xmit, + .ndo_validate_addr = eth_validate_addr, +}; + static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ - dev->hard_start_xmit = ifb_xmit; - dev->open = &ifb_open; - dev->stop = &ifb_close; dev->destructor = free_netdev; + dev->netdev_ops = &ifb_netdev_ops; /* Fill in device structure with ethernet-generic values. */ ether_setup(dev); dev->tx_queue_len = TX_Q_LIMIT; - dev->change_mtu = NULL; + dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; + dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; random_ether_addr(dev->dev_addr); } -static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; - int ret = 0; u32 from = G_TC_FROM(skb->tc_verd); stats->rx_packets++; stats->rx_bytes+=skb->len; - if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->iif) { + if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) { dev_kfree_skb(skb); stats->rx_dropped++; - return ret; + return NETDEV_TX_OK; } if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { netif_stop_queue(dev); } - dev->trans_start = jiffies; skb_queue_tail(&dp->rq, skb); if (!dp->tasklet_pending) { dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } - return ret; + return NETDEV_TX_OK; } static int ifb_close(struct net_device *dev) @@ -228,16 +236,6 @@ static struct rtnl_link_ops ifb_link_ops __read_mostly = { module_param(numifbs, int, 0); MODULE_PARM_DESC(numifbs, "Number of ifb devices"); -/* - * dev_ifb->queue_lock is usually taken after dev->ingress_lock, - * reversely to e.g. qdisc_lock_tree(). It should be safe until - * ifb doesn't take dev->queue_lock with dev_ifb->ingress_lock. - * But lockdep should know that ifb has different locks from dev. - */ -static struct lock_class_key ifb_queue_lock_key; -static struct lock_class_key ifb_ingress_lock_key; - - static int __init ifb_init_one(int index) { struct net_device *dev_ifb; @@ -258,9 +256,6 @@ static int __init ifb_init_one(int index) if (err < 0) goto err; - lockdep_set_class(&dev_ifb->queue_lock, &ifb_queue_lock_key); - lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key); - return 0; err: