bridge: update sysfs link names if port device names have changed
[safe/jmp/linux-2.6] / net / 8021q / vlan_core.c
index f980b91..c584a0a 100644 (file)
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
+#include <linux/netpoll.h>
 #include "vlan.h"
 
 /* VLAN rx hw acceleration helper.  This acts like netif_{rx,receive_skb}(). */
 int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
-                     unsigned short vlan_tag, int polling)
+                     u16 vlan_tci, int polling)
 {
-       struct net_device_stats *stats;
-
-       if (skb_bond_should_drop(skb)) {
-               dev_kfree_skb_any(skb);
+       if (netpoll_rx(skb))
                return NET_RX_DROP;
-       }
 
-       skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK);
-       if (skb->dev == NULL) {
-               dev_kfree_skb_any(skb);
-               /* Not NET_RX_DROP, this is not being dropped
-                * due to congestion. */
-               return NET_RX_SUCCESS;
-       }
-       skb->dev->last_rx = jiffies;
+       if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
+               goto drop;
+
+       skb->skb_iif = skb->dev->ifindex;
+       __vlan_hwaccel_put_tag(skb, vlan_tci);
+       skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+
+       if (!skb->dev)
+               goto drop;
+
+       return (polling ? netif_receive_skb(skb) : netif_rx(skb));
+
+drop:
+       dev_kfree_skb_any(skb);
+       return NET_RX_DROP;
+}
+EXPORT_SYMBOL(__vlan_hwaccel_rx);
 
-       stats = &skb->dev->stats;
-       stats->rx_packets++;
-       stats->rx_bytes += skb->len;
+int vlan_hwaccel_do_receive(struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       struct vlan_rx_stats     *rx_stats;
+
+       skb->dev = vlan_dev_info(dev)->real_dev;
+       netif_nit_deliver(skb);
+
+       skb->dev = dev;
+       skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
+       skb->vlan_tci = 0;
+
+       rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats,
+                              smp_processor_id());
+
+       rx_stats->rx_packets++;
+       rx_stats->rx_bytes += skb->len;
 
-       skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tag);
        switch (skb->pkt_type) {
        case PACKET_BROADCAST:
                break;
        case PACKET_MULTICAST:
-               stats->multicast++;
+               rx_stats->multicast++;
                break;
        case PACKET_OTHERHOST:
                /* Our lower layer thinks this is not local, let's make sure.
                 * This allows the VLAN to have a different MAC than the
                 * underlying device, and still route correctly. */
                if (!compare_ether_addr(eth_hdr(skb)->h_dest,
-                                       skb->dev->dev_addr))
+                                       dev->dev_addr))
                        skb->pkt_type = PACKET_HOST;
                break;
        };
-       return (polling ? netif_receive_skb(skb) : netif_rx(skb));
+       return 0;
 }
-EXPORT_SYMBOL(__vlan_hwaccel_rx);
 
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
        return vlan_dev_info(dev)->real_dev;
 }
-EXPORT_SYMBOL_GPL(vlan_dev_real_dev);
+EXPORT_SYMBOL(vlan_dev_real_dev);
 
 u16 vlan_dev_vlan_id(const struct net_device *dev)
 {
        return vlan_dev_info(dev)->vlan_id;
 }
-EXPORT_SYMBOL_GPL(vlan_dev_vlan_id);
+EXPORT_SYMBOL(vlan_dev_vlan_id);
+
+static gro_result_t
+vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
+               unsigned int vlan_tci, struct sk_buff *skb)
+{
+       struct sk_buff *p;
+
+       if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
+               goto drop;
+
+       skb->skb_iif = skb->dev->ifindex;
+       __vlan_hwaccel_put_tag(skb, vlan_tci);
+       skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+
+       if (!skb->dev)
+               goto drop;
+
+       for (p = napi->gro_list; p; p = p->next) {
+               NAPI_GRO_CB(p)->same_flow =
+                       p->dev == skb->dev && !compare_ether_header(
+                               skb_mac_header(p), skb_gro_mac_header(skb));
+               NAPI_GRO_CB(p)->flush = 0;
+       }
+
+       return dev_gro_receive(napi, skb);
+
+drop:
+       return GRO_DROP;
+}
+
+gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+                             unsigned int vlan_tci, struct sk_buff *skb)
+{
+       if (netpoll_rx_on(skb))
+               return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
+                       ? GRO_DROP : GRO_NORMAL;
+
+       skb_gro_reset_offset(skb);
+
+       return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
+}
+EXPORT_SYMBOL(vlan_gro_receive);
+
+gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+                           unsigned int vlan_tci)
+{
+       struct sk_buff *skb = napi_frags_skb(napi);
+
+       if (!skb)
+               return GRO_DROP;
+
+       if (netpoll_rx_on(skb)) {
+               skb->protocol = eth_type_trans(skb, skb->dev);
+               return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
+                       ? GRO_DROP : GRO_NORMAL;
+       }
+
+       return napi_frags_finish(napi, skb,
+                                vlan_gro_common(napi, grp, vlan_tci, skb));
+}
+EXPORT_SYMBOL(vlan_gro_frags);