net/core: Allow certain receives on inactive slave.
[safe/jmp/linux-2.6] / net / core / dev.c
index d0e23d8..dab97c7 100644 (file)
@@ -90,6 +90,7 @@
 #include <linux/if_ether.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/ethtool.h>
 #include <linux/notifier.h>
 #include <linux/skbuff.h>
 #include <net/net_namespace.h>
 #include <linux/err.h>
 #include <linux/ctype.h>
 #include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
 
 #include "net-sysfs.h"
 
  *             86DD    IPv6
  */
 
+#define PTYPE_HASH_SIZE        (16)
+#define PTYPE_HASH_MASK        (PTYPE_HASH_SIZE - 1)
+
 static DEFINE_SPINLOCK(ptype_lock);
-static struct list_head ptype_base[16] __read_mostly;  /* 16 way hashed list */
+static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
 static struct list_head ptype_all __read_mostly;       /* Taps */
 
 #ifdef CONFIG_NET_DMA
@@ -159,7 +169,7 @@ struct net_dma {
        struct dma_client client;
        spinlock_t lock;
        cpumask_t channel_mask;
-       struct dma_chan *channels[NR_CPUS];
+       struct dma_chan **channels;
 };
 
 static enum dma_state_client
@@ -213,7 +223,7 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
 /* Device list insertion */
 static int list_netdevice(struct net_device *dev)
 {
-       struct net *net = dev->nd_net;
+       struct net *net = dev_net(dev);
 
        ASSERT_RTNL();
 
@@ -251,9 +261,9 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
 
 DEFINE_PER_CPU(struct softnet_data, softnet_data);
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#ifdef CONFIG_LOCKDEP
 /*
- * register_netdevice() inits dev->_xmit_lock and sets lockdep class
+ * register_netdevice() inits txq->_xmit_lock and sets lockdep class
  * according to dev->type
  */
 static const unsigned short netdev_lock_type[] =
@@ -291,6 +301,7 @@ static const char *netdev_lock_name[] =
         "_xmit_NONE"};
 
 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
+static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
 
 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
 {
@@ -303,8 +314,8 @@ static inline unsigned short netdev_lock_pos(unsigned short dev_type)
        return ARRAY_SIZE(netdev_lock_type) - 1;
 }
 
-static inline void netdev_set_lockdep_class(spinlock_t *lock,
-                                           unsigned short dev_type)
+static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+                                                unsigned short dev_type)
 {
        int i;
 
@@ -312,9 +323,22 @@ static inline void netdev_set_lockdep_class(spinlock_t *lock,
        lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
                                   netdev_lock_name[i]);
 }
+
+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
+{
+       int i;
+
+       i = netdev_lock_pos(dev->type);
+       lockdep_set_class_and_name(&dev->addr_list_lock,
+                                  &netdev_addr_lock_key[i],
+                                  netdev_lock_name[i]);
+}
 #else
-static inline void netdev_set_lockdep_class(spinlock_t *lock,
-                                           unsigned short dev_type)
+static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+                                                unsigned short dev_type)
+{
+}
+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 {
 }
 #endif
@@ -362,7 +386,7 @@ void dev_add_pack(struct packet_type *pt)
        if (pt->type == htons(ETH_P_ALL))
                list_add_rcu(&pt->list, &ptype_all);
        else {
-               hash = ntohs(pt->type) & 15;
+               hash = ntohs(pt->type) & PTYPE_HASH_MASK;
                list_add_rcu(&pt->list, &ptype_base[hash]);
        }
        spin_unlock_bh(&ptype_lock);
@@ -391,7 +415,7 @@ void __dev_remove_pack(struct packet_type *pt)
        if (pt->type == htons(ETH_P_ALL))
                head = &ptype_all;
        else
-               head = &ptype_base[ntohs(pt->type) & 15];
+               head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
 
        list_for_each_entry(pt1, head, list) {
                if (pt == pt1) {
@@ -450,7 +474,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
                if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
                        memset(s[i].name, 0, sizeof(s[i].name));
-                       strcpy(s[i].name, name);
+                       strlcpy(s[i].name, name, IFNAMSIZ);
                        memcpy(&s[i].map, map, sizeof(s[i].map));
                        break;
                }
@@ -475,7 +499,7 @@ int netdev_boot_setup_check(struct net_device *dev)
 
        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
                if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
-                   !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
+                   !strcmp(dev->name, s[i].name)) {
                        dev->irq        = s[i].map.irq;
                        dev->base_addr  = s[i].map.base_addr;
                        dev->mem_start  = s[i].map.mem_start;
@@ -672,7 +696,7 @@ struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *h
 
        ASSERT_RTNL();
 
-       for_each_netdev(&init_net, dev)
+       for_each_netdev(net, dev)
                if (dev->type == type &&
                    !memcmp(dev->dev_addr, ha, dev->addr_len))
                        return dev;
@@ -849,8 +873,8 @@ int dev_alloc_name(struct net_device *dev, const char *name)
        struct net *net;
        int ret;
 
-       BUG_ON(!dev->nd_net);
-       net = dev->nd_net;
+       BUG_ON(!dev_net(dev));
+       net = dev_net(dev);
        ret = __dev_alloc_name(net, name, buf);
        if (ret >= 0)
                strlcpy(dev->name, buf, IFNAMSIZ);
@@ -874,9 +898,9 @@ int dev_change_name(struct net_device *dev, char *newname)
        struct net *net;
 
        ASSERT_RTNL();
-       BUG_ON(!dev->nd_net);
+       BUG_ON(!dev_net(dev));
 
-       net = dev->nd_net;
+       net = dev_net(dev);
        if (dev->flags & IFF_UP)
                return -EBUSY;
 
@@ -900,7 +924,11 @@ int dev_change_name(struct net_device *dev, char *newname)
                strlcpy(dev->name, newname, IFNAMSIZ);
 
 rollback:
-       device_rename(&dev->dev, dev->name);
+       err = device_rename(&dev->dev, dev->name);
+       if (err) {
+               memcpy(dev->name, oldname, IFNAMSIZ);
+               return err;
+       }
 
        write_lock_bh(&dev_base_lock);
        hlist_del(&dev->name_hlist);
@@ -953,6 +981,12 @@ void netdev_state_change(struct net_device *dev)
        }
 }
 
+void netdev_bonding_change(struct net_device *dev)
+{
+       call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
+}
+EXPORT_SYMBOL(netdev_bonding_change);
+
 /**
  *     dev_load        - load a network module
  *     @net: the applicable net namespace
@@ -991,6 +1025,8 @@ int dev_open(struct net_device *dev)
 {
        int ret = 0;
 
+       ASSERT_RTNL();
+
        /*
         *      Is it already up?
         */
@@ -1057,6 +1093,8 @@ int dev_open(struct net_device *dev)
  */
 int dev_close(struct net_device *dev)
 {
+       ASSERT_RTNL();
+
        might_sleep();
 
        if (!(dev->flags & IFF_UP))
@@ -1068,8 +1106,6 @@ int dev_close(struct net_device *dev)
         */
        call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
 
-       dev_deactivate(dev);
-
        clear_bit(__LINK_STATE_START, &dev->state);
 
        /* Synchronize to scheduled poll. We cannot touch poll list,
@@ -1080,6 +1116,8 @@ int dev_close(struct net_device *dev)
         */
        smp_mb__after_clear_bit(); /* Commit netif_running(). */
 
+       dev_deactivate(dev);
+
        /*
         *      Call the device specific close. This cannot fail.
         *      Only if device is UP
@@ -1105,6 +1143,29 @@ int dev_close(struct net_device *dev)
 }
 
 
+/**
+ *     dev_disable_lro - disable Large Receive Offload on a device
+ *     @dev: device
+ *
+ *     Disable Large Receive Offload (LRO) on a net device.  Must be
+ *     called under RTNL.  This is needed if received packets may be
+ *     forwarded to another interface.
+ */
+void dev_disable_lro(struct net_device *dev)
+{
+       if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
+           dev->ethtool_ops->set_flags) {
+               u32 flags = dev->ethtool_ops->get_flags(dev);
+               if (flags & ETH_FLAG_LRO) {
+                       flags &= ~ETH_FLAG_LRO;
+                       dev->ethtool_ops->set_flags(dev, flags);
+               }
+       }
+       WARN_ON(dev->features & NETIF_F_LRO);
+}
+EXPORT_SYMBOL(dev_disable_lro);
+
+
 static int dev_boot_phase = 1;
 
 /*
@@ -1278,16 +1339,16 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 }
 
 
-void __netif_schedule(struct net_device *dev)
+void __netif_schedule(struct Qdisc *q)
 {
-       if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
-               unsigned long flags;
+       if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
                struct softnet_data *sd;
+               unsigned long flags;
 
                local_irq_save(flags);
                sd = &__get_cpu_var(softnet_data);
-               dev->next_sched = sd->output_queue;
-               sd->output_queue = dev;
+               q->next_sched = sd->output_queue;
+               sd->output_queue = q;
                raise_softirq_irqoff(NET_TX_SOFTIRQ);
                local_irq_restore(flags);
        }
@@ -1351,6 +1412,29 @@ void netif_device_attach(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_device_attach);
 
+static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+{
+       return ((features & NETIF_F_GEN_CSUM) ||
+               ((features & NETIF_F_IP_CSUM) &&
+                protocol == htons(ETH_P_IP)) ||
+               ((features & NETIF_F_IPV6_CSUM) &&
+                protocol == htons(ETH_P_IPV6)));
+}
+
+static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
+{
+       if (can_checksum_protocol(dev->features, skb->protocol))
+               return true;
+
+       if (skb->protocol == htons(ETH_P_8021Q)) {
+               struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+               if (can_checksum_protocol(dev->features & dev->vlan_features,
+                                         veh->h_vlan_encapsulated_proto))
+                       return true;
+       }
+
+       return false;
+}
 
 /*
  * Invalidate hardware checksum when packet is to be mangled, and
@@ -1420,7 +1504,8 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
        }
 
        rcu_read_lock();
-       list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
+       list_for_each_entry_rcu(ptype,
+                       &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
                if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
                        if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
                                err = ptype->gso_send_check(skb);
@@ -1520,7 +1605,7 @@ static int dev_gso_segment(struct sk_buff *skb)
        if (!segs)
                return 0;
 
-       if (unlikely(IS_ERR(segs)))
+       if (IS_ERR(segs))
                return PTR_ERR(segs);
 
        skb->next = segs;
@@ -1530,7 +1615,8 @@ static int dev_gso_segment(struct sk_buff *skb)
        return 0;
 }
 
-int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+                       struct netdev_queue *txq)
 {
        if (likely(!skb->next)) {
                if (!list_empty(&ptype_all))
@@ -1559,9 +1645,7 @@ gso:
                        skb->next = nskb;
                        return rc;
                }
-               if (unlikely((netif_queue_stopped(dev) ||
-                            netif_subqueue_stopped(dev, skb)) &&
-                            skb->next))
+               if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
                        return NETDEV_TX_BUSY;
        } while (skb->next);
 
@@ -1572,6 +1656,73 @@ out_kfree_skb:
        return 0;
 }
 
+static u32 simple_tx_hashrnd;
+static int simple_tx_hashrnd_initialized = 0;
+
+static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
+{
+       u32 addr1, addr2, ports;
+       u32 hash, ihl;
+       u8 ip_proto;
+
+       if (unlikely(!simple_tx_hashrnd_initialized)) {
+               get_random_bytes(&simple_tx_hashrnd, 4);
+               simple_tx_hashrnd_initialized = 1;
+       }
+
+       switch (skb->protocol) {
+       case __constant_htons(ETH_P_IP):
+               ip_proto = ip_hdr(skb)->protocol;
+               addr1 = ip_hdr(skb)->saddr;
+               addr2 = ip_hdr(skb)->daddr;
+               ihl = ip_hdr(skb)->ihl;
+               break;
+       case __constant_htons(ETH_P_IPV6):
+               ip_proto = ipv6_hdr(skb)->nexthdr;
+               addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
+               addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
+               ihl = (40 >> 2);
+               break;
+       default:
+               return 0;
+       }
+
+
+       switch (ip_proto) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+       case IPPROTO_DCCP:
+       case IPPROTO_ESP:
+       case IPPROTO_AH:
+       case IPPROTO_SCTP:
+       case IPPROTO_UDPLITE:
+               ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
+               break;
+
+       default:
+               ports = 0;
+               break;
+       }
+
+       hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
+
+       return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
+}
+
+static struct netdev_queue *dev_pick_tx(struct net_device *dev,
+                                       struct sk_buff *skb)
+{
+       u16 queue_index = 0;
+
+       if (dev->select_queue)
+               queue_index = dev->select_queue(dev, skb);
+       else if (dev->real_num_tx_queues > 1)
+               queue_index = simple_tx_hash(dev, skb);
+
+       skb_set_queue_mapping(skb, queue_index);
+       return netdev_get_tx_queue(dev, queue_index);
+}
+
 /**
  *     dev_queue_xmit - transmit a buffer
  *     @skb: buffer to transmit
@@ -1597,10 +1748,10 @@ out_kfree_skb:
  *      the BH enable code must have IRQs enabled so that it will not deadlock.
  *          --BLG
  */
-
 int dev_queue_xmit(struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
+       struct netdev_queue *txq;
        struct Qdisc *q;
        int rc = -ENOMEM;
 
@@ -1628,55 +1779,33 @@ int dev_queue_xmit(struct sk_buff *skb)
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                skb_set_transport_header(skb, skb->csum_start -
                                              skb_headroom(skb));
-
-               if (!(dev->features & NETIF_F_GEN_CSUM) &&
-                   !((dev->features & NETIF_F_IP_CSUM) &&
-                     skb->protocol == htons(ETH_P_IP)) &&
-                   !((dev->features & NETIF_F_IPV6_CSUM) &&
-                     skb->protocol == htons(ETH_P_IPV6)))
-                       if (skb_checksum_help(skb))
-                               goto out_kfree_skb;
+               if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
+                       goto out_kfree_skb;
        }
 
 gso:
-       spin_lock_prefetch(&dev->queue_lock);
-
        /* Disable soft irqs for various locks below. Also
         * stops preemption for RCU.
         */
        rcu_read_lock_bh();
 
-       /* Updates of qdisc are serialized by queue_lock.
-        * The struct Qdisc which is pointed to by qdisc is now a
-        * rcu structure - it may be accessed without acquiring
-        * a lock (but the structure may be stale.) The freeing of the
-        * qdisc will be deferred until it's known that there are no
-        * more references to it.
-        *
-        * If the qdisc has an enqueue function, we still need to
-        * hold the queue_lock before calling it, since queue_lock
-        * also serializes access to the device queue.
-        */
+       txq = dev_pick_tx(dev, skb);
+       q = rcu_dereference(txq->qdisc);
 
-       q = rcu_dereference(dev->qdisc);
 #ifdef CONFIG_NET_CLS_ACT
        skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
 #endif
        if (q->enqueue) {
-               /* Grab device queue */
-               spin_lock(&dev->queue_lock);
-               q = dev->qdisc;
-               if (q->enqueue) {
-                       /* reset queue_mapping to zero */
-                       skb_set_queue_mapping(skb, 0);
-                       rc = q->enqueue(skb, q);
-                       qdisc_run(dev);
-                       spin_unlock(&dev->queue_lock);
-
-                       rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
-                       goto out;
-               }
-               spin_unlock(&dev->queue_lock);
+               spinlock_t *root_lock = qdisc_lock(q);
+
+               spin_lock(root_lock);
+
+               rc = qdisc_enqueue_root(skb, q);
+               qdisc_run(q);
+
+               spin_unlock(root_lock);
+
+               goto out;
        }
 
        /* The device has no queue. Common case for software devices:
@@ -1694,19 +1823,18 @@ gso:
        if (dev->flags & IFF_UP) {
                int cpu = smp_processor_id(); /* ok because BHs are off */
 
-               if (dev->xmit_lock_owner != cpu) {
+               if (txq->xmit_lock_owner != cpu) {
 
-                       HARD_TX_LOCK(dev, cpu);
+                       HARD_TX_LOCK(dev, txq, cpu);
 
-                       if (!netif_queue_stopped(dev) &&
-                           !netif_subqueue_stopped(dev, skb)) {
+                       if (!netif_tx_queue_stopped(txq)) {
                                rc = 0;
-                               if (!dev_hard_start_xmit(skb, dev)) {
-                                       HARD_TX_UNLOCK(dev);
+                               if (!dev_hard_start_xmit(skb, dev, txq)) {
+                                       HARD_TX_UNLOCK(dev, txq);
                                        goto out;
                                }
                        }
-                       HARD_TX_UNLOCK(dev);
+                       HARD_TX_UNLOCK(dev, txq);
                        if (net_ratelimit())
                                printk(KERN_CRIT "Virtual device %s asks to "
                                       "queue packet!\n", dev->name);
@@ -1780,7 +1908,6 @@ int netif_rx(struct sk_buff *skb)
        if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
                if (queue->input_pkt_queue.qlen) {
 enqueue:
-                       dev_hold(skb->dev);
                        __skb_queue_tail(&queue->input_pkt_queue, skb);
                        local_irq_restore(flags);
                        return NET_RX_SUCCESS;
@@ -1812,22 +1939,6 @@ int netif_rx_ni(struct sk_buff *skb)
 
 EXPORT_SYMBOL(netif_rx_ni);
 
-static inline struct net_device *skb_bond(struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-
-       if (dev->master) {
-               if (skb_bond_should_drop(skb)) {
-                       kfree_skb(skb);
-                       return NULL;
-               }
-               skb->dev = dev->master;
-       }
-
-       return dev;
-}
-
-
 static void net_tx_action(struct softirq_action *h)
 {
        struct softnet_data *sd = &__get_cpu_var(softnet_data);
@@ -1844,13 +1955,13 @@ static void net_tx_action(struct softirq_action *h)
                        struct sk_buff *skb = clist;
                        clist = clist->next;
 
-                       BUG_TRAP(!atomic_read(&skb->users));
+                       WARN_ON(atomic_read(&skb->users));
                        __kfree_skb(skb);
                }
        }
 
        if (sd->output_queue) {
-               struct net_device *head;
+               struct Qdisc *head;
 
                local_irq_disable();
                head = sd->output_queue;
@@ -1858,17 +1969,20 @@ static void net_tx_action(struct softirq_action *h)
                local_irq_enable();
 
                while (head) {
-                       struct net_device *dev = head;
+                       struct Qdisc *q = head;
+                       spinlock_t *root_lock;
+
                        head = head->next_sched;
 
                        smp_mb__before_clear_bit();
-                       clear_bit(__LINK_STATE_SCHED, &dev->state);
+                       clear_bit(__QDISC_STATE_SCHED, &q->state);
 
-                       if (spin_trylock(&dev->queue_lock)) {
-                               qdisc_run(dev);
-                               spin_unlock(&dev->queue_lock);
+                       root_lock = qdisc_lock(q);
+                       if (spin_trylock(root_lock)) {
+                               qdisc_run(q);
+                               spin_unlock(root_lock);
                        } else {
-                               netif_schedule(dev);
+                               __netif_schedule(q);
                        }
                }
        }
@@ -1949,10 +2063,11 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
  */
 static int ing_filter(struct sk_buff *skb)
 {
-       struct Qdisc *q;
        struct net_device *dev = skb->dev;
-       int result = TC_ACT_OK;
        u32 ttl = G_TC_RTTL(skb->tc_verd);
+       struct netdev_queue *rxq;
+       int result = TC_ACT_OK;
+       struct Qdisc *q;
 
        if (MAX_RED_LOOP < ttl++) {
                printk(KERN_WARNING
@@ -1964,10 +2079,14 @@ static int ing_filter(struct sk_buff *skb)
        skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
 
-       spin_lock(&dev->ingress_lock);
-       if ((q = dev->qdisc_ingress) != NULL)
-               result = q->enqueue(skb, q);
-       spin_unlock(&dev->ingress_lock);
+       rxq = &dev->rx_queue;
+
+       q = rxq->qdisc;
+       if (q != &noop_qdisc) {
+               spin_lock(qdisc_lock(q));
+               result = qdisc_enqueue_root(skb, q);
+               spin_unlock(qdisc_lock(q));
+       }
 
        return result;
 }
@@ -1976,7 +2095,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
                                         struct packet_type **pt_prev,
                                         int *ret, struct net_device *orig_dev)
 {
-       if (!skb->dev->qdisc_ingress)
+       if (skb->dev->rx_queue.qdisc == &noop_qdisc)
                goto out;
 
        if (*pt_prev) {
@@ -2000,6 +2119,33 @@ out:
 }
 #endif
 
+/*
+ *     netif_nit_deliver - deliver received packets to network taps
+ *     @skb: buffer
+ *
+ *     This function is used to deliver incoming packets to network
+ *     taps. It should be used when the normal netif_receive_skb path
+ *     is bypassed, for example because of VLAN acceleration.
+ */
+void netif_nit_deliver(struct sk_buff *skb)
+{
+       struct packet_type *ptype;
+
+       if (list_empty(&ptype_all))
+               return;
+
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+       skb->mac_len = skb->network_header - skb->mac_header;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(ptype, &ptype_all, list) {
+               if (!ptype->dev || ptype->dev == skb->dev)
+                       deliver_skb(skb, ptype, skb->dev);
+       }
+       rcu_read_unlock();
+}
+
 /**
  *     netif_receive_skb - process receive buffer from network
  *     @skb: buffer to process
@@ -2019,6 +2165,7 @@ int netif_receive_skb(struct sk_buff *skb)
 {
        struct packet_type *ptype, *pt_prev;
        struct net_device *orig_dev;
+       struct net_device *null_or_orig;
        int ret = NET_RX_DROP;
        __be16 type;
 
@@ -2032,10 +2179,14 @@ int netif_receive_skb(struct sk_buff *skb)
        if (!skb->iif)
                skb->iif = skb->dev->ifindex;
 
-       orig_dev = skb_bond(skb);
-
-       if (!orig_dev)
-               return NET_RX_DROP;
+       null_or_orig = NULL;
+       orig_dev = skb->dev;
+       if (orig_dev->master) {
+               if (skb_bond_should_drop(skb))
+                       null_or_orig = orig_dev; /* deliver only exact match */
+               else
+                       skb->dev = orig_dev->master;
+       }
 
        __get_cpu_var(netdev_rx_stat).total++;
 
@@ -2047,6 +2198,10 @@ int netif_receive_skb(struct sk_buff *skb)
 
        rcu_read_lock();
 
+       /* Don't receive packets in an exiting network namespace */
+       if (!net_alive(dev_net(skb->dev)))
+               goto out;
+
 #ifdef CONFIG_NET_CLS_ACT
        if (skb->tc_verd & TC_NCLS) {
                skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -2055,7 +2210,7 @@ int netif_receive_skb(struct sk_buff *skb)
 #endif
 
        list_for_each_entry_rcu(ptype, &ptype_all, list) {
-               if (!ptype->dev || ptype->dev == skb->dev) {
+               if (ptype->dev == null_or_orig || ptype->dev == skb->dev) {
                        if (pt_prev)
                                ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = ptype;
@@ -2077,9 +2232,10 @@ ncls:
                goto out;
 
        type = skb->protocol;
-       list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
+       list_for_each_entry_rcu(ptype,
+                       &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
                if (ptype->type == type &&
-                   (!ptype->dev || ptype->dev == skb->dev)) {
+                   (ptype->dev == null_or_orig || ptype->dev == skb->dev)) {
                        if (pt_prev)
                                ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = ptype;
@@ -2101,6 +2257,20 @@ out:
        return ret;
 }
 
+/* Network device is going away, flush any packets still pending  */
+static void flush_backlog(void *arg)
+{
+       struct net_device *dev = arg;
+       struct softnet_data *queue = &__get_cpu_var(softnet_data);
+       struct sk_buff *skb, *tmp;
+
+       skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
+               if (skb->dev == dev) {
+                       __skb_unlink(skb, &queue->input_pkt_queue);
+                       kfree_skb(skb);
+               }
+}
+
 static int process_backlog(struct napi_struct *napi, int quota)
 {
        int work = 0;
@@ -2110,7 +2280,6 @@ static int process_backlog(struct napi_struct *napi, int quota)
        napi->weight = weight_p;
        do {
                struct sk_buff *skb;
-               struct net_device *dev;
 
                local_irq_disable();
                skb = __skb_dequeue(&queue->input_pkt_queue);
@@ -2119,14 +2288,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
                        local_irq_enable();
                        break;
                }
-
                local_irq_enable();
 
-               dev = skb->dev;
-
                netif_receive_skb(skb);
-
-               dev_put(dev);
        } while (++work < quota && jiffies == start_time);
 
        return work;
@@ -2138,7 +2302,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
  *
  * The entry's receive function will be scheduled to run
  */
-void fastcall __napi_schedule(struct napi_struct *n)
+void __napi_schedule(struct napi_struct *n)
 {
        unsigned long flags;
 
@@ -2226,7 +2390,7 @@ out:
         */
        if (!cpus_empty(net_dma.channel_mask)) {
                int chan_idx;
-               for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
+               for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
                        struct dma_chan *chan = net_dma.channels[chan_idx];
                        if (chan)
                                dma_async_memcpy_issue_pending(chan);
@@ -2363,6 +2527,7 @@ static int dev_ifconf(struct net *net, char __user *arg)
  *     in detail.
  */
 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(dev_base_lock)
 {
        struct net *net = seq_file_net(seq);
        loff_t off;
@@ -2389,6 +2554,7 @@ void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 void dev_seq_stop(struct seq_file *seq, void *v)
+       __releases(dev_base_lock)
 {
        read_unlock(&dev_base_lock);
 }
@@ -2437,7 +2603,7 @@ static struct netif_rx_stats *softnet_get_online(loff_t *pos)
 {
        struct netif_rx_stats *rc = NULL;
 
-       while (*pos < NR_CPUS)
+       while (*pos < nr_cpu_ids)
                if (cpu_online(*pos)) {
                        rc = &per_cpu(netdev_rx_stat, *pos);
                        break;
@@ -2525,7 +2691,7 @@ static void *ptype_get_idx(loff_t pos)
                ++i;
        }
 
-       for (t = 0; t < 16; t++) {
+       for (t = 0; t < PTYPE_HASH_SIZE; t++) {
                list_for_each_entry_rcu(pt, &ptype_base[t], list) {
                        if (i == pos)
                                return pt;
@@ -2536,6 +2702,7 @@ static void *ptype_get_idx(loff_t pos)
 }
 
 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(RCU)
 {
        rcu_read_lock();
        return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
@@ -2559,10 +2726,10 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                hash = 0;
                nxt = ptype_base[0].next;
        } else
-               hash = ntohs(pt->type) & 15;
+               hash = ntohs(pt->type) & PTYPE_HASH_MASK;
 
        while (nxt == &ptype_base[hash]) {
-               if (++hash >= 16)
+               if (++hash >= PTYPE_HASH_SIZE)
                        return NULL;
                nxt = ptype_base[hash].next;
        }
@@ -2571,6 +2738,7 @@ found:
 }
 
 static void ptype_seq_stop(struct seq_file *seq, void *v)
+       __releases(RCU)
 {
        rcu_read_unlock();
 }
@@ -2606,7 +2774,7 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
 
        if (v == SEQ_START_TOKEN)
                seq_puts(seq, "Type Device      Function\n");
-       else {
+       else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
                if (pt->type == htons(ETH_P_ALL))
                        seq_puts(seq, "ALL ");
                else
@@ -2630,7 +2798,8 @@ static const struct seq_operations ptype_seq_ops = {
 
 static int ptype_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &ptype_seq_ops);
+       return seq_open_net(inode, file, &ptype_seq_ops,
+                       sizeof(struct seq_net_private));
 }
 
 static const struct file_operations ptype_seq_fops = {
@@ -2638,7 +2807,7 @@ static const struct file_operations ptype_seq_fops = {
        .open    = ptype_seq_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = seq_release,
+       .release = seq_release_net,
 };
 
 
@@ -2729,30 +2898,47 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
        return 0;
 }
 
-static void __dev_set_promiscuity(struct net_device *dev, int inc)
+static int __dev_set_promiscuity(struct net_device *dev, int inc)
 {
        unsigned short old_flags = dev->flags;
 
        ASSERT_RTNL();
 
-       if ((dev->promiscuity += inc) == 0)
-               dev->flags &= ~IFF_PROMISC;
-       else
-               dev->flags |= IFF_PROMISC;
+       dev->flags |= IFF_PROMISC;
+       dev->promiscuity += inc;
+       if (dev->promiscuity == 0) {
+               /*
+                * Avoid overflow.
+                * If inc causes overflow, untouch promisc and return error.
+                */
+               if (inc < 0)
+                       dev->flags &= ~IFF_PROMISC;
+               else {
+                       dev->promiscuity -= inc;
+                       printk(KERN_WARNING "%s: promiscuity touches roof, "
+                               "set promiscuity failed, promiscuity feature "
+                               "of device might be broken.\n", dev->name);
+                       return -EOVERFLOW;
+               }
+       }
        if (dev->flags != old_flags) {
                printk(KERN_INFO "device %s %s promiscuous mode\n",
                       dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
                                                               "left");
-               audit_log(current->audit_context, GFP_ATOMIC,
-                       AUDIT_ANOM_PROMISCUOUS,
-                       "dev=%s prom=%d old_prom=%d auid=%u",
-                       dev->name, (dev->flags & IFF_PROMISC),
-                       (old_flags & IFF_PROMISC),
-                       audit_get_loginuid(current->audit_context));
+               if (audit_enabled)
+                       audit_log(current->audit_context, GFP_ATOMIC,
+                               AUDIT_ANOM_PROMISCUOUS,
+                               "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
+                               dev->name, (dev->flags & IFF_PROMISC),
+                               (old_flags & IFF_PROMISC),
+                               audit_get_loginuid(current),
+                               current->uid, current->gid,
+                               audit_get_sessionid(current));
 
                if (dev->change_rx_flags)
                        dev->change_rx_flags(dev, IFF_PROMISC);
        }
+       return 0;
 }
 
 /**
@@ -2764,14 +2950,19 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc)
  *     remains above zero the interface remains promiscuous. Once it hits zero
  *     the device reverts back to normal filtering operation. A negative inc
  *     value is used to drop promiscuity on the device.
+ *     Return 0 if successful or a negative errno code on error.
  */
-void dev_set_promiscuity(struct net_device *dev, int inc)
+int dev_set_promiscuity(struct net_device *dev, int inc)
 {
        unsigned short old_flags = dev->flags;
+       int err;
 
-       __dev_set_promiscuity(dev, inc);
+       err = __dev_set_promiscuity(dev, inc);
+       if (err < 0)
+               return err;
        if (dev->flags != old_flags)
                dev_set_rx_mode(dev);
+       return err;
 }
 
 /**
@@ -2784,22 +2975,38 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
  *     to all interfaces. Once it hits zero the device reverts back to normal
  *     filtering operation. A negative @inc value is used to drop the counter
  *     when releasing a resource needing all multicasts.
+ *     Return 0 if successful or a negative errno code on error.
  */
 
-void dev_set_allmulti(struct net_device *dev, int inc)
+int dev_set_allmulti(struct net_device *dev, int inc)
 {
        unsigned short old_flags = dev->flags;
 
        ASSERT_RTNL();
 
        dev->flags |= IFF_ALLMULTI;
-       if ((dev->allmulti += inc) == 0)
-               dev->flags &= ~IFF_ALLMULTI;
+       dev->allmulti += inc;
+       if (dev->allmulti == 0) {
+               /*
+                * Avoid overflow.
+                * If inc causes overflow, untouch allmulti and return error.
+                */
+               if (inc < 0)
+                       dev->flags &= ~IFF_ALLMULTI;
+               else {
+                       dev->allmulti -= inc;
+                       printk(KERN_WARNING "%s: allmulti touches roof, "
+                               "set allmulti failed, allmulti feature of "
+                               "device might be broken.\n", dev->name);
+                       return -EOVERFLOW;
+               }
+       }
        if (dev->flags ^ old_flags) {
                if (dev->change_rx_flags)
                        dev->change_rx_flags(dev, IFF_ALLMULTI);
                dev_set_rx_mode(dev);
        }
+       return 0;
 }
 
 /*
@@ -2838,9 +3045,9 @@ void __dev_set_rx_mode(struct net_device *dev)
 
 void dev_set_rx_mode(struct net_device *dev)
 {
-       netif_tx_lock_bh(dev);
+       netif_addr_lock_bh(dev);
        __dev_set_rx_mode(dev);
-       netif_tx_unlock_bh(dev);
+       netif_addr_unlock_bh(dev);
 }
 
 int __dev_addr_delete(struct dev_addr_list **list, int *count,
@@ -2888,7 +3095,7 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
                }
        }
 
-       da = kmalloc(sizeof(*da), GFP_ATOMIC);
+       da = kzalloc(sizeof(*da), GFP_ATOMIC);
        if (da == NULL)
                return -ENOMEM;
        memcpy(da->da_addr, addr, alen);
@@ -2918,11 +3125,11 @@ int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
 
        ASSERT_RTNL();
 
-       netif_tx_lock_bh(dev);
+       netif_addr_lock_bh(dev);
        err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
        if (!err)
                __dev_set_rx_mode(dev);
-       netif_tx_unlock_bh(dev);
+       netif_addr_unlock_bh(dev);
        return err;
 }
 EXPORT_SYMBOL(dev_unicast_delete);
@@ -2930,7 +3137,7 @@ EXPORT_SYMBOL(dev_unicast_delete);
 /**
  *     dev_unicast_add         - add a secondary unicast address
  *     @dev: device
- *     @addr: address to delete
+ *     @addr: address to add
  *     @alen: length of @addr
  *
  *     Add a secondary unicast address to the device or increase
@@ -2944,15 +3151,110 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen)
 
        ASSERT_RTNL();
 
-       netif_tx_lock_bh(dev);
+       netif_addr_lock_bh(dev);
        err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
        if (!err)
                __dev_set_rx_mode(dev);
-       netif_tx_unlock_bh(dev);
+       netif_addr_unlock_bh(dev);
        return err;
 }
 EXPORT_SYMBOL(dev_unicast_add);
 
+int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
+                   struct dev_addr_list **from, int *from_count)
+{
+       struct dev_addr_list *da, *next;
+       int err = 0;
+
+       da = *from;
+       while (da != NULL) {
+               next = da->next;
+               if (!da->da_synced) {
+                       err = __dev_addr_add(to, to_count,
+                                            da->da_addr, da->da_addrlen, 0);
+                       if (err < 0)
+                               break;
+                       da->da_synced = 1;
+                       da->da_users++;
+               } else if (da->da_users == 1) {
+                       __dev_addr_delete(to, to_count,
+                                         da->da_addr, da->da_addrlen, 0);
+                       __dev_addr_delete(from, from_count,
+                                         da->da_addr, da->da_addrlen, 0);
+               }
+               da = next;
+       }
+       return err;
+}
+
+void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
+                      struct dev_addr_list **from, int *from_count)
+{
+       struct dev_addr_list *da, *next;
+
+       da = *from;
+       while (da != NULL) {
+               next = da->next;
+               if (da->da_synced) {
+                       __dev_addr_delete(to, to_count,
+                                         da->da_addr, da->da_addrlen, 0);
+                       da->da_synced = 0;
+                       __dev_addr_delete(from, from_count,
+                                         da->da_addr, da->da_addrlen, 0);
+               }
+               da = next;
+       }
+}
+
+/**
+ *     dev_unicast_sync - Synchronize device's unicast list to another device
+ *     @to: destination device
+ *     @from: source device
+ *
+ *     Add newly added addresses to the destination device and release
+ *     addresses that have no users left. The source device must be
+ *     locked by netif_tx_lock_bh.
+ *
+ *     This function is intended to be called from the dev->set_rx_mode
+ *     function of layered software devices.
+ */
+int dev_unicast_sync(struct net_device *to, struct net_device *from)
+{
+       int err = 0;
+
+       netif_addr_lock_bh(to);
+       err = __dev_addr_sync(&to->uc_list, &to->uc_count,
+                             &from->uc_list, &from->uc_count);
+       if (!err)
+               __dev_set_rx_mode(to);
+       netif_addr_unlock_bh(to);
+       return err;
+}
+EXPORT_SYMBOL(dev_unicast_sync);
+
+/**
+ *     dev_unicast_unsync - Remove synchronized addresses from the destination device
+ *     @to: destination device
+ *     @from: source device
+ *
+ *     Remove all addresses that were added to the destination device by
+ *     dev_unicast_sync(). This function is intended to be called from the
+ *     dev->stop function of layered software devices.
+ */
+void dev_unicast_unsync(struct net_device *to, struct net_device *from)
+{
+       netif_addr_lock_bh(from);
+       netif_addr_lock(to);
+
+       __dev_addr_unsync(&to->uc_list, &to->uc_count,
+                         &from->uc_list, &from->uc_count);
+       __dev_set_rx_mode(to);
+
+       netif_addr_unlock(to);
+       netif_addr_unlock_bh(from);
+}
+EXPORT_SYMBOL(dev_unicast_unsync);
+
 static void __dev_addr_discard(struct dev_addr_list **list)
 {
        struct dev_addr_list *tmp;
@@ -2969,7 +3271,7 @@ static void __dev_addr_discard(struct dev_addr_list **list)
 
 static void dev_addr_discard(struct net_device *dev)
 {
-       netif_tx_lock_bh(dev);
+       netif_addr_lock_bh(dev);
 
        __dev_addr_discard(&dev->uc_list);
        dev->uc_count = 0;
@@ -2977,7 +3279,7 @@ static void dev_addr_discard(struct net_device *dev)
        __dev_addr_discard(&dev->mc_list);
        dev->mc_count = 0;
 
-       netif_tx_unlock_bh(dev);
+       netif_addr_unlock_bh(dev);
 }
 
 unsigned dev_get_flags(const struct net_device *dev)
@@ -3025,7 +3327,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
         *      Load in the correct multicast list now the flags have changed.
         */
 
-       if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST)
+       if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
                dev->change_rx_flags(dev, IFF_MULTICAST);
 
        dev_set_rx_mode(dev);
@@ -3222,7 +3524,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
                        return -EOPNOTSUPP;
 
                case SIOCADDMULTI:
-                       if (!dev->set_multicast_list ||
+                       if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
                            ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
                                return -EINVAL;
                        if (!netif_device_present(dev))
@@ -3231,7 +3533,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
                                          dev->addr_len, 1);
 
                case SIOCDELMULTI:
-                       if (!dev->set_multicast_list ||
+                       if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
                            ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
                                return -EINVAL;
                        if (!netif_device_present(dev))
@@ -3487,7 +3789,7 @@ static int dev_new_index(struct net *net)
 
 /* Delayed registration/unregisteration */
 static DEFINE_SPINLOCK(net_todo_list_lock);
-static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
+static LIST_HEAD(net_todo_list);
 
 static void net_set_todo(struct net_device *dev)
 {
@@ -3540,7 +3842,7 @@ static void rollback_registered(struct net_device *dev)
                dev->uninit(dev);
 
        /* Notifier chain MUST detach us from master device. */
-       BUG_TRAP(!dev->master);
+       WARN_ON(dev->master);
 
        /* Remove entries from kobject tree */
        netdev_unregister_kobject(dev);
@@ -3550,6 +3852,21 @@ static void rollback_registered(struct net_device *dev)
        dev_put(dev);
 }
 
+static void __netdev_init_queue_locks_one(struct net_device *dev,
+                                         struct netdev_queue *dev_queue,
+                                         void *_unused)
+{
+       spin_lock_init(&dev_queue->_xmit_lock);
+       netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
+       dev_queue->xmit_lock_owner = -1;
+}
+
+static void netdev_init_queue_locks(struct net_device *dev)
+{
+       netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
+       __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
+}
+
 /**
  *     register_netdevice      - register a network device
  *     @dev: device to register
@@ -3581,14 +3898,12 @@ int register_netdevice(struct net_device *dev)
 
        /* When net_device's are persistent, this will be fatal. */
        BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
-       BUG_ON(!dev->nd_net);
-       net = dev->nd_net;
+       BUG_ON(!dev_net(dev));
+       net = dev_net(dev);
 
-       spin_lock_init(&dev->queue_lock);
-       spin_lock_init(&dev->_xmit_lock);
-       netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
-       dev->xmit_lock_owner = -1;
-       spin_lock_init(&dev->ingress_lock);
+       spin_lock_init(&dev->addr_list_lock);
+       netdev_set_addr_lockdep_class(dev);
+       netdev_init_queue_locks(dev);
 
        dev->iflink = -1;
 
@@ -3668,6 +3983,11 @@ int register_netdevice(struct net_device *dev)
                }
        }
 
+       /* Enable software GSO if SG is supported. */
+       if (dev->features & NETIF_F_SG)
+               dev->features |= NETIF_F_GSO;
+
+       netdev_initialize_kobject(dev);
        ret = netdev_register_kobject(dev);
        if (ret)
                goto err_uninit;
@@ -3844,13 +4164,15 @@ void netdev_run_todo(void)
 
                dev->reg_state = NETREG_UNREGISTERED;
 
+               on_each_cpu(flush_backlog, dev, 1);
+
                netdev_wait_allrefs(dev);
 
                /* paranoia */
                BUG_ON(atomic_read(&dev->refcnt));
-               BUG_TRAP(!dev->ip_ptr);
-               BUG_TRAP(!dev->ip6_ptr);
-               BUG_TRAP(!dev->dn_ptr);
+               WARN_ON(dev->ip_ptr);
+               WARN_ON(dev->ip6_ptr);
+               WARN_ON(dev->dn_ptr);
 
                if (dev->destructor)
                        dev->destructor(dev);
@@ -3868,6 +4190,20 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
        return &dev->stats;
 }
 
+static void netdev_init_one_queue(struct net_device *dev,
+                                 struct netdev_queue *queue,
+                                 void *_unused)
+{
+       queue->dev = dev;
+}
+
+static void netdev_init_queues(struct net_device *dev)
+{
+       netdev_init_one_queue(dev, &dev->rx_queue, NULL);
+       netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
+       spin_lock_init(&dev->tx_global_lock);
+}
+
 /**
  *     alloc_netdev_mq - allocate network device
  *     @sizeof_priv:   size of private data to allocate space for
@@ -3882,17 +4218,21 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
                void (*setup)(struct net_device *), unsigned int queue_count)
 {
-       void *p;
+       struct netdev_queue *tx;
        struct net_device *dev;
-       int alloc_size;
+       size_t alloc_size;
+       void *p;
 
        BUG_ON(strlen(name) >= sizeof(dev->name));
 
-       /* ensure 32-byte alignment of both the device and private area */
-       alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
-                    (sizeof(struct net_device_subqueue) * (queue_count - 1))) &
-                    ~NETDEV_ALIGN_CONST;
-       alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
+       alloc_size = sizeof(struct net_device);
+       if (sizeof_priv) {
+               /* ensure 32-byte alignment of private area */
+               alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
+               alloc_size += sizeof_priv;
+       }
+       /* ensure 32-byte alignment of whole construct */
+       alloc_size += NETDEV_ALIGN_CONST;
 
        p = kzalloc(alloc_size, GFP_KERNEL);
        if (!p) {
@@ -3900,20 +4240,32 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
                return NULL;
        }
 
+       tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
+       if (!tx) {
+               printk(KERN_ERR "alloc_netdev: Unable to allocate "
+                      "tx qdiscs.\n");
+               kfree(p);
+               return NULL;
+       }
+
        dev = (struct net_device *)
                (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
        dev->padded = (char *)dev - (char *)p;
-       dev->nd_net = &init_net;
+       dev_net_set(dev, &init_net);
+
+       dev->_tx = tx;
+       dev->num_tx_queues = queue_count;
+       dev->real_num_tx_queues = queue_count;
 
        if (sizeof_priv) {
                dev->priv = ((char *)dev +
-                            ((sizeof(struct net_device) +
-                              (sizeof(struct net_device_subqueue) *
-                               (queue_count - 1)) + NETDEV_ALIGN_CONST)
+                            ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
                              & ~NETDEV_ALIGN_CONST));
        }
 
-       dev->egress_subqueue_count = queue_count;
+       dev->gso_max_size = GSO_MAX_SIZE;
+
+       netdev_init_queues(dev);
 
        dev->get_stats = internal_stats;
        netpoll_netdev_init(dev);
@@ -3933,6 +4285,10 @@ EXPORT_SYMBOL(alloc_netdev_mq);
  */
 void free_netdev(struct net_device *dev)
 {
+       release_net(dev_net(dev));
+
+       kfree(dev->_tx);
+
        /*  Compatibility with error handling in drivers */
        if (dev->reg_state == NETREG_UNINITIALIZED) {
                kfree((char *)dev - dev->padded);
@@ -3966,6 +4322,8 @@ void synchronize_net(void)
 
 void unregister_netdevice(struct net_device *dev)
 {
+       ASSERT_RTNL();
+
        rollback_registered(dev);
        /* Finish processing unregister after unlock */
        net_set_todo(dev);
@@ -4025,7 +4383,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
 
        /* Get out if there is nothing todo */
        err = 0;
-       if (dev->nd_net == net)
+       if (net_eq(dev_net(dev), net))
                goto out;
 
        /* Pick the destination device name, and ensure
@@ -4076,7 +4434,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        dev_addr_discard(dev);
 
        /* Actually switch the network namespace */
-       dev->nd_net = net;
+       dev_net_set(dev, net);
 
        /* Assign the new device name */
        if (destname != dev->name)
@@ -4091,7 +4449,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        }
 
        /* Fixup kobjects */
-       err = device_rename(&dev->dev, dev->name);
+       netdev_unregister_kobject(dev);
+       err = netdev_register_kobject(dev);
        WARN_ON(err);
 
        /* Add the device back in the hashes */
@@ -4111,7 +4470,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                            void *ocpu)
 {
        struct sk_buff **list_skb;
-       struct net_device **list_net;
+       struct Qdisc **list_net;
        struct sk_buff *skb;
        unsigned int cpu, oldcpu = (unsigned long)ocpu;
        struct softnet_data *sd, *oldsd;
@@ -4173,7 +4532,7 @@ static void net_dma_rebalance(struct net_dma *net_dma)
        i = 0;
        cpu = first_cpu(cpu_online_map);
 
-       for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
+       for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
                chan = net_dma->channels[chan_idx];
 
                n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
@@ -4207,7 +4566,7 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
        spin_lock(&net_dma->lock);
        switch (state) {
        case DMA_RESOURCE_AVAILABLE:
-               for (i = 0; i < NR_CPUS; i++)
+               for (i = 0; i < nr_cpu_ids; i++)
                        if (net_dma->channels[i] == chan) {
                                found = 1;
                                break;
@@ -4222,7 +4581,7 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
                }
                break;
        case DMA_RESOURCE_REMOVED:
-               for (i = 0; i < NR_CPUS; i++)
+               for (i = 0; i < nr_cpu_ids; i++)
                        if (net_dma->channels[i] == chan) {
                                found = 1;
                                pos = i;
@@ -4249,6 +4608,13 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
  */
 static int __init netdev_dma_register(void)
 {
+       net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
+                                                               GFP_KERNEL);
+       if (unlikely(!net_dma.channels)) {
+               printk(KERN_NOTICE
+                               "netdev_dma: no memory for net_dma.channels\n");
+               return -ENOMEM;
+       }
        spin_lock_init(&net_dma.lock);
        dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
        dma_async_client_register(&net_dma.client);
@@ -4333,6 +4699,26 @@ err_name:
        return -ENOMEM;
 }
 
+char *netdev_drivername(struct net_device *dev, char *buffer, int len)
+{
+       struct device_driver *driver;
+       struct device *parent;
+
+       if (len <= 0 || !buffer)
+               return buffer;
+       buffer[0] = 0;
+
+       parent = dev->dev.parent;
+
+       if (!parent)
+               return buffer;
+
+       driver = parent->driver;
+       if (driver && driver->name)
+               strlcpy(buffer, driver->name, len);
+       return buffer;
+}
+
 static void __net_exit netdev_exit(struct net *net)
 {
        kfree(net->dev_name_head);
@@ -4354,17 +4740,19 @@ static void __net_exit default_device_exit(struct net *net)
        rtnl_lock();
        for_each_netdev_safe(net, dev, next) {
                int err;
+               char fb_name[IFNAMSIZ];
 
                /* Ignore unmoveable devices (i.e. loopback) */
                if (dev->features & NETIF_F_NETNS_LOCAL)
                        continue;
 
                /* Push remaing network devices to init_net */
-               err = dev_change_net_namespace(dev, &init_net, "dev%d");
+               snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+               err = dev_change_net_namespace(dev, &init_net, fb_name);
                if (err) {
-                       printk(KERN_WARNING "%s: failed to move %s to init_net: %d\n",
+                       printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
                                __func__, dev->name, err);
-                       unregister_netdevice(dev);
+                       BUG();
                }
        }
        rtnl_unlock();
@@ -4398,7 +4786,7 @@ static int __init net_dev_init(void)
                goto out;
 
        INIT_LIST_HEAD(&ptype_all);
-       for (i = 0; i < 16; i++)
+       for (i = 0; i < PTYPE_HASH_SIZE; i++)
                INIT_LIST_HEAD(&ptype_base[i]);
 
        if (register_pernet_subsys(&netdev_net_ops))
@@ -4427,8 +4815,8 @@ static int __init net_dev_init(void)
 
        dev_boot_phase = 0;
 
-       open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
-       open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
+       open_softirq(NET_TX_SOFTIRQ, net_tx_action);
+       open_softirq(NET_RX_SOFTIRQ, net_rx_action);
 
        hotcpu_notifier(dev_cpu_callback, 0);
        dst_init();