Networking: use CAP_NET_ADMIN when deciding to call request_module
[safe/jmp/linux-2.6] / net / core / dev.c
index dcc357e..278d489 100644 (file)
 #include <linux/in.h>
 #include <linux/jhash.h>
 #include <linux/random.h>
+#include <trace/events/napi.h>
 
 #include "net-sysfs.h"
 
@@ -268,7 +269,8 @@ static const unsigned short netdev_lock_type[] =
         ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
         ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
         ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
-        ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
+        ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY,
+        ARPHRD_VOID, ARPHRD_NONE};
 
 static const char *netdev_lock_name[] =
        {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
@@ -285,7 +287,8 @@ static const char *netdev_lock_name[] =
         "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
         "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
         "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
-        "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
+        "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY",
+        "_xmit_VOID", "_xmit_NONE"};
 
 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -1028,7 +1031,7 @@ void dev_load(struct net *net, const char *name)
        dev = __dev_get_by_name(net, name);
        read_unlock(&dev_base_lock);
 
-       if (!dev && capable(CAP_SYS_MODULE))
+       if (!dev && capable(CAP_NET_ADMIN))
                request_module("%s", name);
 }
 
@@ -1047,7 +1050,7 @@ void dev_load(struct net *net, const char *name)
 int dev_open(struct net_device *dev)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
-       int ret = 0;
+       int ret;
 
        ASSERT_RTNL();
 
@@ -1064,6 +1067,11 @@ int dev_open(struct net_device *dev)
        if (!netif_device_present(dev))
                return -ENODEV;
 
+       ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
+       ret = notifier_to_errno(ret);
+       if (ret)
+               return ret;
+
        /*
         *      Call device private open method
         */
@@ -1688,7 +1696,16 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                                goto gso;
                }
 
+               /*
+                * If device doesnt need skb->dst, release it right now while
+                * its hot in this cpu cache
+                */
+               if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
+                       skb_dst_drop(skb);
+
                rc = ops->ndo_start_xmit(skb, dev);
+               if (rc == 0)
+                       txq_trans_update(txq);
                /*
                 * TODO: if skb_orphan() was called by
                 * dev->hard_start_xmit() (for example, the unmodified
@@ -1718,6 +1735,7 @@ gso:
                        skb->next = nskb;
                        return rc;
                }
+               txq_trans_update(txq);
                if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
                        return NETDEV_TX_BUSY;
        } while (skb->next);
@@ -1737,9 +1755,14 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
 
        if (skb_rx_queue_recorded(skb)) {
                hash = skb_get_rx_queue(skb);
-       } else if (skb->sk && skb->sk->sk_hash) {
+               while (unlikely (hash >= dev->real_num_tx_queues))
+                       hash -= dev->real_num_tx_queues;
+               return hash;
+       }
+
+       if (skb->sk && skb->sk->sk_hash)
                hash = skb->sk->sk_hash;
-       else
+       else
                hash = skb->protocol;
 
        hash = jhash_1word(hash, skb_tx_hashrnd);
@@ -1799,7 +1822,7 @@ int dev_queue_xmit(struct sk_buff *skb)
        if (netif_needs_gso(dev, skb))
                goto gso;
 
-       if (skb_shinfo(skb)->frag_list &&
+       if (skb_has_frags(skb) &&
            !(dev->features & NETIF_F_FRAGLIST) &&
            __skb_linearize(skb))
                goto out_kfree_skb;
@@ -2048,11 +2071,13 @@ static inline int deliver_skb(struct sk_buff *skb,
 }
 
 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
-/* These hooks defined here for ATM */
-struct net_bridge;
-struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
-                                               unsigned char *addr);
-void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
+
+#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+/* This hook is defined here for ATM LANE */
+int (*br_fdb_test_addr_hook)(struct net_device *dev,
+                            unsigned char *addr) __read_mostly;
+EXPORT_SYMBOL(br_fdb_test_addr_hook);
+#endif
 
 /*
  * If bridge module is loaded call bridging hook.
@@ -2060,6 +2085,8 @@ void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
  */
 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
                                        struct sk_buff *skb) __read_mostly;
+EXPORT_SYMBOL(br_handle_frame_hook);
+
 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
                                            struct packet_type **pt_prev, int *ret,
                                            struct net_device *orig_dev)
@@ -2283,8 +2310,6 @@ ncls:
        if (!skb)
                goto out;
 
-       skb_orphan(skb);
-
        type = skb->protocol;
        list_for_each_entry_rcu(ptype,
                        &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
@@ -2373,26 +2398,6 @@ void napi_gro_flush(struct napi_struct *napi)
 }
 EXPORT_SYMBOL(napi_gro_flush);
 
-void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
-{
-       unsigned int offset = skb_gro_offset(skb);
-
-       hlen += offset;
-       if (hlen <= skb_headlen(skb))
-               return skb->data + offset;
-
-       if (unlikely(!skb_shinfo(skb)->nr_frags ||
-                    skb_shinfo(skb)->frags[0].size <=
-                    hlen - skb_headlen(skb) ||
-                    PageHighMem(skb_shinfo(skb)->frags[0].page)))
-               return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
-
-       return page_address(skb_shinfo(skb)->frags[0].page) +
-              skb_shinfo(skb)->frags[0].page_offset +
-              offset - skb_headlen(skb);
-}
-EXPORT_SYMBOL(skb_gro_header);
-
 int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
        struct sk_buff **pp = NULL;
@@ -2406,7 +2411,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
        if (!(skb->dev->features & NETIF_F_GRO))
                goto normal;
 
-       if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
+       if (skb_is_gso(skb) || skb_has_frags(skb))
                goto normal;
 
        rcu_read_lock();
@@ -2455,10 +2460,25 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
        ret = GRO_HELD;
 
 pull:
-       if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) {
-               if (napi->gro_list == skb)
-                       napi->gro_list = skb->next;
-               ret = GRO_DROP;
+       if (skb_headlen(skb) < skb_gro_offset(skb)) {
+               int grow = skb_gro_offset(skb) - skb_headlen(skb);
+
+               BUG_ON(skb->end - skb->tail < grow);
+
+               memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
+
+               skb->tail += grow;
+               skb->data_len -= grow;
+
+               skb_shinfo(skb)->frags[0].page_offset += grow;
+               skb_shinfo(skb)->frags[0].size -= grow;
+
+               if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
+                       put_page(skb_shinfo(skb)->frags[0].page);
+                       memmove(skb_shinfo(skb)->frags,
+                               skb_shinfo(skb)->frags + 1,
+                               --skb_shinfo(skb)->nr_frags);
+               }
        }
 
 ok:
@@ -2508,6 +2528,22 @@ int napi_skb_finish(int ret, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(napi_skb_finish);
 
+void skb_gro_reset_offset(struct sk_buff *skb)
+{
+       NAPI_GRO_CB(skb)->data_offset = 0;
+       NAPI_GRO_CB(skb)->frag0 = NULL;
+       NAPI_GRO_CB(skb)->frag0_len = 0;
+
+       if (skb->mac_header == skb->tail &&
+           !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
+               NAPI_GRO_CB(skb)->frag0 =
+                       page_address(skb_shinfo(skb)->frags[0].page) +
+                       skb_shinfo(skb)->frags[0].page_offset;
+               NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
+       }
+}
+EXPORT_SYMBOL(skb_gro_reset_offset);
+
 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
        skb_gro_reset_offset(skb);
@@ -2525,16 +2561,10 @@ void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(napi_reuse_skb);
 
-struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
-                                 struct napi_gro_fraginfo *info)
+struct sk_buff *napi_get_frags(struct napi_struct *napi)
 {
        struct net_device *dev = napi->dev;
        struct sk_buff *skb = napi->skb;
-       struct ethhdr *eth;
-       skb_frag_t *frag;
-       int i;
-
-       napi->skb = NULL;
 
        if (!skb) {
                skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
@@ -2542,47 +2572,14 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
                        goto out;
 
                skb_reserve(skb, NET_IP_ALIGN);
-       }
-
-       BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
-       frag = &info->frags[info->nr_frags - 1];
-
-       for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) {
-               skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
-                                  frag->size);
-               frag++;
-       }
-       skb_shinfo(skb)->nr_frags = info->nr_frags;
-
-       skb->data_len = info->len;
-       skb->len += info->len;
-       skb->truesize += info->len;
-
-       skb_reset_mac_header(skb);
-       skb_gro_reset_offset(skb);
 
-       eth = skb_gro_header(skb, sizeof(*eth));
-       if (!eth) {
-               napi_reuse_skb(napi, skb);
-               skb = NULL;
-               goto out;
+               napi->skb = skb;
        }
 
-       skb_gro_pull(skb, sizeof(*eth));
-
-       /*
-        * This works because the only protocols we care about don't require
-        * special handling.  We'll fix it up properly at the end.
-        */
-       skb->protocol = eth->h_proto;
-
-       skb->ip_summed = info->ip_summed;
-       skb->csum = info->csum;
-
 out:
        return skb;
 }
-EXPORT_SYMBOL(napi_fraginfo_skb);
+EXPORT_SYMBOL(napi_get_frags);
 
 int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
 {
@@ -2612,9 +2609,46 @@ int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
 }
 EXPORT_SYMBOL(napi_frags_finish);
 
-int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+struct sk_buff *napi_frags_skb(struct napi_struct *napi)
 {
-       struct sk_buff *skb = napi_fraginfo_skb(napi, info);
+       struct sk_buff *skb = napi->skb;
+       struct ethhdr *eth;
+       unsigned int hlen;
+       unsigned int off;
+
+       napi->skb = NULL;
+
+       skb_reset_mac_header(skb);
+       skb_gro_reset_offset(skb);
+
+       off = skb_gro_offset(skb);
+       hlen = off + sizeof(*eth);
+       eth = skb_gro_header_fast(skb, off);
+       if (skb_gro_header_hard(skb, hlen)) {
+               eth = skb_gro_header_slow(skb, hlen, off);
+               if (unlikely(!eth)) {
+                       napi_reuse_skb(napi, skb);
+                       skb = NULL;
+                       goto out;
+               }
+       }
+
+       skb_gro_pull(skb, sizeof(*eth));
+
+       /*
+        * This works because the only protocols we care about don't require
+        * special handling.  We'll fix it up properly at the end.
+        */
+       skb->protocol = eth->h_proto;
+
+out:
+       return skb;
+}
+EXPORT_SYMBOL(napi_frags_skb);
+
+int napi_gro_frags(struct napi_struct *napi)
+{
+       struct sk_buff *skb = napi_frags_skb(napi);
 
        if (!skb)
                return NET_RX_DROP;
@@ -2718,7 +2752,7 @@ void netif_napi_del(struct napi_struct *napi)
        struct sk_buff *skb, *next;
 
        list_del_init(&napi->dev_list);
-       kfree_skb(napi->skb);
+       napi_free_frags(napi);
 
        for (skb = napi->gro_list; skb; skb = next) {
                next = skb->next;
@@ -2772,8 +2806,10 @@ static void net_rx_action(struct softirq_action *h)
                 * accidently calling ->poll() when NAPI is not scheduled.
                 */
                work = 0;
-               if (test_bit(NAPI_STATE_SCHED, &n->state))
+               if (test_bit(NAPI_STATE_SCHED, &n->state)) {
                        work = n->poll(n, weight);
+                       trace_napi_poll(n);
+               }
 
                WARN_ON_ONCE(work > weight);
 
@@ -2787,9 +2823,11 @@ static void net_rx_action(struct softirq_action *h)
                 * move the instance around on the list at-will.
                 */
                if (unlikely(work == weight)) {
-                       if (unlikely(napi_disable_pending(n)))
-                               __napi_complete(n);
-                       else
+                       if (unlikely(napi_disable_pending(n))) {
+                               local_irq_enable();
+                               napi_complete(n);
+                               local_irq_disable();
+                       } else
                                list_move_tail(&n->poll_list, list);
                }
 
@@ -3423,10 +3461,10 @@ void __dev_set_rx_mode(struct net_device *dev)
                /* Unicast addresses changes may only happen under the rtnl,
                 * therefore calling __dev_set_promiscuity here is safe.
                 */
-               if (dev->uc_count > 0 && !dev->uc_promisc) {
+               if (dev->uc.count > 0 && !dev->uc_promisc) {
                        __dev_set_promiscuity(dev, 1);
                        dev->uc_promisc = 1;
-               } else if (dev->uc_count == 0 && dev->uc_promisc) {
+               } else if (dev->uc.count == 0 && dev->uc_promisc) {
                        __dev_set_promiscuity(dev, -1);
                        dev->uc_promisc = 0;
                }
@@ -3443,6 +3481,316 @@ void dev_set_rx_mode(struct net_device *dev)
        netif_addr_unlock_bh(dev);
 }
 
+/* hw addresses list handling functions */
+
+static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
+                        int addr_len, unsigned char addr_type)
+{
+       struct netdev_hw_addr *ha;
+       int alloc_size;
+
+       if (addr_len > MAX_ADDR_LEN)
+               return -EINVAL;
+
+       list_for_each_entry(ha, &list->list, list) {
+               if (!memcmp(ha->addr, addr, addr_len) &&
+                   ha->type == addr_type) {
+                       ha->refcount++;
+                       return 0;
+               }
+       }
+
+
+       alloc_size = sizeof(*ha);
+       if (alloc_size < L1_CACHE_BYTES)
+               alloc_size = L1_CACHE_BYTES;
+       ha = kmalloc(alloc_size, GFP_ATOMIC);
+       if (!ha)
+               return -ENOMEM;
+       memcpy(ha->addr, addr, addr_len);
+       ha->type = addr_type;
+       ha->refcount = 1;
+       ha->synced = false;
+       list_add_tail_rcu(&ha->list, &list->list);
+       list->count++;
+       return 0;
+}
+
+static void ha_rcu_free(struct rcu_head *head)
+{
+       struct netdev_hw_addr *ha;
+
+       ha = container_of(head, struct netdev_hw_addr, rcu_head);
+       kfree(ha);
+}
+
+static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
+                        int addr_len, unsigned char addr_type)
+{
+       struct netdev_hw_addr *ha;
+
+       list_for_each_entry(ha, &list->list, list) {
+               if (!memcmp(ha->addr, addr, addr_len) &&
+                   (ha->type == addr_type || !addr_type)) {
+                       if (--ha->refcount)
+                               return 0;
+                       list_del_rcu(&ha->list);
+                       call_rcu(&ha->rcu_head, ha_rcu_free);
+                       list->count--;
+                       return 0;
+               }
+       }
+       return -ENOENT;
+}
+
+static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+                                 struct netdev_hw_addr_list *from_list,
+                                 int addr_len,
+                                 unsigned char addr_type)
+{
+       int err;
+       struct netdev_hw_addr *ha, *ha2;
+       unsigned char type;
+
+       list_for_each_entry(ha, &from_list->list, list) {
+               type = addr_type ? addr_type : ha->type;
+               err = __hw_addr_add(to_list, ha->addr, addr_len, type);
+               if (err)
+                       goto unroll;
+       }
+       return 0;
+
+unroll:
+       list_for_each_entry(ha2, &from_list->list, list) {
+               if (ha2 == ha)
+                       break;
+               type = addr_type ? addr_type : ha2->type;
+               __hw_addr_del(to_list, ha2->addr, addr_len, type);
+       }
+       return err;
+}
+
+static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+                                  struct netdev_hw_addr_list *from_list,
+                                  int addr_len,
+                                  unsigned char addr_type)
+{
+       struct netdev_hw_addr *ha;
+       unsigned char type;
+
+       list_for_each_entry(ha, &from_list->list, list) {
+               type = addr_type ? addr_type : ha->type;
+               __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
+       }
+}
+
+static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+                         struct netdev_hw_addr_list *from_list,
+                         int addr_len)
+{
+       int err = 0;
+       struct netdev_hw_addr *ha, *tmp;
+
+       list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
+               if (!ha->synced) {
+                       err = __hw_addr_add(to_list, ha->addr,
+                                           addr_len, ha->type);
+                       if (err)
+                               break;
+                       ha->synced = true;
+                       ha->refcount++;
+               } else if (ha->refcount == 1) {
+                       __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
+                       __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
+               }
+       }
+       return err;
+}
+
+static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+                            struct netdev_hw_addr_list *from_list,
+                            int addr_len)
+{
+       struct netdev_hw_addr *ha, *tmp;
+
+       list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
+               if (ha->synced) {
+                       __hw_addr_del(to_list, ha->addr,
+                                     addr_len, ha->type);
+                       ha->synced = false;
+                       __hw_addr_del(from_list, ha->addr,
+                                     addr_len, ha->type);
+               }
+       }
+}
+
+static void __hw_addr_flush(struct netdev_hw_addr_list *list)
+{
+       struct netdev_hw_addr *ha, *tmp;
+
+       list_for_each_entry_safe(ha, tmp, &list->list, list) {
+               list_del_rcu(&ha->list);
+               call_rcu(&ha->rcu_head, ha_rcu_free);
+       }
+       list->count = 0;
+}
+
+static void __hw_addr_init(struct netdev_hw_addr_list *list)
+{
+       INIT_LIST_HEAD(&list->list);
+       list->count = 0;
+}
+
+/* Device addresses handling functions */
+
+static void dev_addr_flush(struct net_device *dev)
+{
+       /* rtnl_mutex must be held here */
+
+       __hw_addr_flush(&dev->dev_addrs);
+       dev->dev_addr = NULL;
+}
+
+static int dev_addr_init(struct net_device *dev)
+{
+       unsigned char addr[MAX_ADDR_LEN];
+       struct netdev_hw_addr *ha;
+       int err;
+
+       /* rtnl_mutex must be held here */
+
+       __hw_addr_init(&dev->dev_addrs);
+       memset(addr, 0, sizeof(addr));
+       err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
+                           NETDEV_HW_ADDR_T_LAN);
+       if (!err) {
+               /*
+                * Get the first (previously created) address from the list
+                * and set dev_addr pointer to this location.
+                */
+               ha = list_first_entry(&dev->dev_addrs.list,
+                                     struct netdev_hw_addr, list);
+               dev->dev_addr = ha->addr;
+       }
+       return err;
+}
+
+/**
+ *     dev_addr_add    - Add a device address
+ *     @dev: device
+ *     @addr: address to add
+ *     @addr_type: address type
+ *
+ *     Add a device address to the device or increase the reference count if
+ *     it already exists.
+ *
+ *     The caller must hold the rtnl_mutex.
+ */
+int dev_addr_add(struct net_device *dev, unsigned char *addr,
+                unsigned char addr_type)
+{
+       int err;
+
+       ASSERT_RTNL();
+
+       err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
+       if (!err)
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+       return err;
+}
+EXPORT_SYMBOL(dev_addr_add);
+
+/**
+ *     dev_addr_del    - Release a device address.
+ *     @dev: device
+ *     @addr: address to delete
+ *     @addr_type: address type
+ *
+ *     Release reference to a device address and remove it from the device
+ *     if the reference count drops to zero.
+ *
+ *     The caller must hold the rtnl_mutex.
+ */
+int dev_addr_del(struct net_device *dev, unsigned char *addr,
+                unsigned char addr_type)
+{
+       int err;
+       struct netdev_hw_addr *ha;
+
+       ASSERT_RTNL();
+
+       /*
+        * We can not remove the first address from the list because
+        * dev->dev_addr points to that.
+        */
+       ha = list_first_entry(&dev->dev_addrs.list,
+                             struct netdev_hw_addr, list);
+       if (ha->addr == dev->dev_addr && ha->refcount == 1)
+               return -ENOENT;
+
+       err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
+                           addr_type);
+       if (!err)
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+       return err;
+}
+EXPORT_SYMBOL(dev_addr_del);
+
+/**
+ *     dev_addr_add_multiple   - Add device addresses from another device
+ *     @to_dev: device to which addresses will be added
+ *     @from_dev: device from which addresses will be added
+ *     @addr_type: address type - 0 means type will be used from from_dev
+ *
+ *     Add device addresses of the one device to another.
+ **
+ *     The caller must hold the rtnl_mutex.
+ */
+int dev_addr_add_multiple(struct net_device *to_dev,
+                         struct net_device *from_dev,
+                         unsigned char addr_type)
+{
+       int err;
+
+       ASSERT_RTNL();
+
+       if (from_dev->addr_len != to_dev->addr_len)
+               return -EINVAL;
+       err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
+                                    to_dev->addr_len, addr_type);
+       if (!err)
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+       return err;
+}
+EXPORT_SYMBOL(dev_addr_add_multiple);
+
+/**
+ *     dev_addr_del_multiple   - Delete device addresses by another device
+ *     @to_dev: device where the addresses will be deleted
+ *     @from_dev: device by which addresses the addresses will be deleted
+ *     @addr_type: address type - 0 means type will used from from_dev
+ *
+ *     Deletes addresses in to device by the list of addresses in from device.
+ *
+ *     The caller must hold the rtnl_mutex.
+ */
+int dev_addr_del_multiple(struct net_device *to_dev,
+                         struct net_device *from_dev,
+                         unsigned char addr_type)
+{
+       ASSERT_RTNL();
+
+       if (from_dev->addr_len != to_dev->addr_len)
+               return -EINVAL;
+       __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
+                              to_dev->addr_len, addr_type);
+       call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+       return 0;
+}
+EXPORT_SYMBOL(dev_addr_del_multiple);
+
+/* multicast addresses handling functions */
+
 int __dev_addr_delete(struct dev_addr_list **list, int *count,
                      void *addr, int alen, int glbl)
 {
@@ -3505,21 +3853,21 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
  *     dev_unicast_delete      - Release secondary unicast address.
  *     @dev: device
  *     @addr: address to delete
- *     @alen: length of @addr
  *
  *     Release reference to a secondary unicast address and remove it
  *     from the device if the reference count drops to zero.
  *
  *     The caller must hold the rtnl_mutex.
  */
-int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
+int dev_unicast_delete(struct net_device *dev, void *addr)
 {
        int err;
 
        ASSERT_RTNL();
 
        netif_addr_lock_bh(dev);
-       err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
+       err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
+                           NETDEV_HW_ADDR_T_UNICAST);
        if (!err)
                __dev_set_rx_mode(dev);
        netif_addr_unlock_bh(dev);
@@ -3531,21 +3879,21 @@ EXPORT_SYMBOL(dev_unicast_delete);
  *     dev_unicast_add         - add a secondary unicast address
  *     @dev: device
  *     @addr: address to add
- *     @alen: length of @addr
  *
  *     Add a secondary unicast address to the device or increase
  *     the reference count if it already exists.
  *
  *     The caller must hold the rtnl_mutex.
  */
-int dev_unicast_add(struct net_device *dev, void *addr, int alen)
+int dev_unicast_add(struct net_device *dev, void *addr)
 {
        int err;
 
        ASSERT_RTNL();
 
        netif_addr_lock_bh(dev);
-       err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
+       err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
+                           NETDEV_HW_ADDR_T_UNICAST);
        if (!err)
                __dev_set_rx_mode(dev);
        netif_addr_unlock_bh(dev);
@@ -3615,9 +3963,11 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
 {
        int err = 0;
 
+       if (to->addr_len != from->addr_len)
+               return -EINVAL;
+
        netif_addr_lock_bh(to);
-       err = __dev_addr_sync(&to->uc_list, &to->uc_count,
-                             &from->uc_list, &from->uc_count);
+       err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
        netif_addr_unlock_bh(to);
@@ -3636,18 +3986,31 @@ EXPORT_SYMBOL(dev_unicast_sync);
  */
 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
 {
+       if (to->addr_len != from->addr_len)
+               return;
+
        netif_addr_lock_bh(from);
        netif_addr_lock(to);
-
-       __dev_addr_unsync(&to->uc_list, &to->uc_count,
-                         &from->uc_list, &from->uc_count);
+       __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
        __dev_set_rx_mode(to);
-
        netif_addr_unlock(to);
        netif_addr_unlock_bh(from);
 }
 EXPORT_SYMBOL(dev_unicast_unsync);
 
+static void dev_unicast_flush(struct net_device *dev)
+{
+       netif_addr_lock_bh(dev);
+       __hw_addr_flush(&dev->uc);
+       netif_addr_unlock_bh(dev);
+}
+
+static void dev_unicast_init(struct net_device *dev)
+{
+       __hw_addr_init(&dev->uc);
+}
+
+
 static void __dev_addr_discard(struct dev_addr_list **list)
 {
        struct dev_addr_list *tmp;
@@ -3666,9 +4029,6 @@ static void dev_addr_discard(struct net_device *dev)
 {
        netif_addr_lock_bh(dev);
 
-       __dev_addr_discard(&dev->uc_list);
-       dev->uc_count = 0;
-
        __dev_addr_discard(&dev->mc_list);
        dev->mc_count = 0;
 
@@ -3852,7 +4212,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
 
        switch (cmd) {
                case SIOCGIFFLAGS:      /* Get interface flags */
-                       ifr->ifr_flags = dev_get_flags(dev);
+                       ifr->ifr_flags = (short) dev_get_flags(dev);
                        return 0;
 
                case SIOCGIFMETRIC:     /* Get the metric on the interface
@@ -4261,6 +4621,7 @@ static void rollback_registered(struct net_device *dev)
        /*
         *      Flush the unicast and multicast chains
         */
+       dev_unicast_flush(dev);
        dev_addr_discard(dev);
 
        if (dev->netdev_ops->ndo_uninit)
@@ -4332,39 +4693,6 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
 }
 EXPORT_SYMBOL(netdev_fix_features);
 
-/* Some devices need to (re-)set their netdev_ops inside
- * ->init() or similar.  If that happens, we have to setup
- * the compat pointers again.
- */
-void netdev_resync_ops(struct net_device *dev)
-{
-#ifdef CONFIG_COMPAT_NET_DEV_OPS
-       const struct net_device_ops *ops = dev->netdev_ops;
-
-       dev->init = ops->ndo_init;
-       dev->uninit = ops->ndo_uninit;
-       dev->open = ops->ndo_open;
-       dev->change_rx_flags = ops->ndo_change_rx_flags;
-       dev->set_rx_mode = ops->ndo_set_rx_mode;
-       dev->set_multicast_list = ops->ndo_set_multicast_list;
-       dev->set_mac_address = ops->ndo_set_mac_address;
-       dev->validate_addr = ops->ndo_validate_addr;
-       dev->do_ioctl = ops->ndo_do_ioctl;
-       dev->set_config = ops->ndo_set_config;
-       dev->change_mtu = ops->ndo_change_mtu;
-       dev->neigh_setup = ops->ndo_neigh_setup;
-       dev->tx_timeout = ops->ndo_tx_timeout;
-       dev->get_stats = ops->ndo_get_stats;
-       dev->vlan_rx_register = ops->ndo_vlan_rx_register;
-       dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
-       dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       dev->poll_controller = ops->ndo_poll_controller;
-#endif
-#endif
-}
-EXPORT_SYMBOL(netdev_resync_ops);
-
 /**
  *     register_netdevice      - register a network device
  *     @dev: device to register
@@ -4404,23 +4732,6 @@ int register_netdevice(struct net_device *dev)
 
        dev->iflink = -1;
 
-#ifdef CONFIG_COMPAT_NET_DEV_OPS
-       /* Netdevice_ops API compatiability support.
-        * This is temporary until all network devices are converted.
-        */
-       if (dev->netdev_ops) {
-               netdev_resync_ops(dev);
-       } else {
-               char drivername[64];
-               pr_info("%s (%s): not using net_device_ops yet\n",
-                       dev->name, netdev_drivername(dev, drivername, 64));
-
-               /* This works only because net_device_ops and the
-                  compatiablity structure are the same. */
-               dev->netdev_ops = (void *) &(dev->init);
-       }
-#endif
-
        /* Init, if this function is available */
        if (dev->netdev_ops->ndo_init) {
                ret = dev->netdev_ops->ndo_init(dev);
@@ -4706,13 +5017,30 @@ void netdev_run_todo(void)
  *     the internal statistics structure is used.
  */
 const struct net_device_stats *dev_get_stats(struct net_device *dev)
- {
+{
        const struct net_device_ops *ops = dev->netdev_ops;
 
        if (ops->ndo_get_stats)
                return ops->ndo_get_stats(dev);
-       else
-               return &dev->stats;
+       else {
+               unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
+               struct net_device_stats *stats = &dev->stats;
+               unsigned int i;
+               struct netdev_queue *txq;
+
+               for (i = 0; i < dev->num_tx_queues; i++) {
+                       txq = netdev_get_tx_queue(dev, i);
+                       tx_bytes   += txq->tx_bytes;
+                       tx_packets += txq->tx_packets;
+                       tx_dropped += txq->tx_dropped;
+               }
+               if (tx_bytes || tx_packets || tx_dropped) {
+                       stats->tx_bytes   = tx_bytes;
+                       stats->tx_packets = tx_packets;
+                       stats->tx_dropped = tx_dropped;
+               }
+               return stats;
+       }
 }
 EXPORT_SYMBOL(dev_get_stats);
 
@@ -4747,18 +5075,18 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
        struct netdev_queue *tx;
        struct net_device *dev;
        size_t alloc_size;
-       void *p;
+       struct net_device *p;
 
        BUG_ON(strlen(name) >= sizeof(dev->name));
 
        alloc_size = sizeof(struct net_device);
        if (sizeof_priv) {
                /* ensure 32-byte alignment of private area */
-               alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
+               alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
                alloc_size += sizeof_priv;
        }
        /* ensure 32-byte alignment of whole construct */
-       alloc_size += NETDEV_ALIGN_CONST;
+       alloc_size += NETDEV_ALIGN - 1;
 
        p = kzalloc(alloc_size, GFP_KERNEL);
        if (!p) {
@@ -4770,13 +5098,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
        if (!tx) {
                printk(KERN_ERR "alloc_netdev: Unable to allocate "
                       "tx qdiscs.\n");
-               kfree(p);
-               return NULL;
+               goto free_p;
        }
 
-       dev = (struct net_device *)
-               (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
+       dev = PTR_ALIGN(p, NETDEV_ALIGN);
        dev->padded = (char *)dev - (char *)p;
+
+       if (dev_addr_init(dev))
+               goto free_tx;
+
+       dev_unicast_init(dev);
+
        dev_net_set(dev, &init_net);
 
        dev->_tx = tx;
@@ -4788,9 +5120,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
        netdev_init_queues(dev);
 
        INIT_LIST_HEAD(&dev->napi_list);
+       dev->priv_flags = IFF_XMIT_DST_RELEASE;
        setup(dev);
        strcpy(dev->name, name);
        return dev;
+
+free_tx:
+       kfree(tx);
+
+free_p:
+       kfree(p);
+       return NULL;
 }
 EXPORT_SYMBOL(alloc_netdev_mq);
 
@@ -4810,6 +5150,9 @@ void free_netdev(struct net_device *dev)
 
        kfree(dev->_tx);
 
+       /* Flush device addresses */
+       dev_addr_flush(dev);
+
        list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
                netif_napi_del(p);
 
@@ -4969,6 +5312,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        /*
         *      Flush the unicast and multicast chains
         */
+       dev_unicast_flush(dev);
        dev_addr_discard(dev);
 
        netdev_unregister_kobject(dev);
@@ -5324,12 +5668,6 @@ EXPORT_SYMBOL(net_enable_timestamp);
 EXPORT_SYMBOL(net_disable_timestamp);
 EXPORT_SYMBOL(dev_get_flags);
 
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
-EXPORT_SYMBOL(br_handle_frame_hook);
-EXPORT_SYMBOL(br_fdb_get_hook);
-EXPORT_SYMBOL(br_fdb_put_hook);
-#endif
-
 EXPORT_SYMBOL(dev_load);
 
 EXPORT_PER_CPU_SYMBOL(softnet_data);