net: rename skb->iif to skb->skb_iif
[safe/jmp/linux-2.6] / net / core / skbuff.c
index b242020..bfa3e78 100644 (file)
@@ -1,11 +1,9 @@
 /*
  *     Routines having to do with the 'struct sk_buff' memory handlers.
  *
- *     Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
+ *     Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
  *                     Florian La Roche <rzsfl@rz.uni-sb.de>
  *
- *     Version:        $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
- *
  *     Fixes:
  *             Alan Cox        :       Fixed the worst of the load
  *                                     balancer bugs.
@@ -41,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/in.h>
 #endif
 #include <linux/string.h>
 #include <linux/skbuff.h>
+#include <linux/splice.h>
 #include <linux/cache.h>
 #include <linux/rtnetlink.h>
 #include <linux/init.h>
+#include <linux/scatterlist.h>
+#include <linux/errqueue.h>
 
 #include <net/protocol.h>
 #include <net/dst.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
+#include <trace/events/skb.h>
 
 #include "kmap_skb.h"
 
 static struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 
+static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
+                                 struct pipe_buffer *buf)
+{
+       put_page(buf->page);
+}
+
+static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
+                               struct pipe_buffer *buf)
+{
+       get_page(buf->page);
+}
+
+static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
+                              struct pipe_buffer *buf)
+{
+       return 1;
+}
+
+
+/* Pipe buffer operations for a socket. */
+static struct pipe_buf_operations sock_pipe_buf_ops = {
+       .can_merge = 0,
+       .map = generic_pipe_buf_map,
+       .unmap = generic_pipe_buf_unmap,
+       .confirm = generic_pipe_buf_confirm,
+       .release = sock_pipe_buf_release,
+       .steal = sock_pipe_buf_steal,
+       .get = sock_pipe_buf_get,
+};
+
 /*
  *     Keep out-of-line to prevent kernel bloat.
  *     __builtin_return_address is not used because it is not always
@@ -87,11 +120,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 {
        printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
-                         "data:%p tail:%p end:%p dev:%s\n",
-              here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
+                         "data:%p tail:%#lx end:%#lx dev:%s\n",
+              here, skb->len, sz, skb->head, skb->data,
+              (unsigned long)skb->tail, (unsigned long)skb->end,
               skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
+EXPORT_SYMBOL(skb_over_panic);
 
 /**
  *     skb_under_panic -       private function
@@ -105,19 +140,13 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 {
        printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
-                         "data:%p tail:%p end:%p dev:%s\n",
-              here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
+                         "data:%p tail:%#lx end:%#lx dev:%s\n",
+              here, skb->len, sz, skb->head, skb->data,
+              (unsigned long)skb->tail, (unsigned long)skb->end,
               skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
-
-void skb_truesize_bug(struct sk_buff *skb)
-{
-       printk(KERN_ERR "SKB BUG: Invalid truesize (%u) "
-              "len=%u, sizeof(sk_buff)=%Zd\n",
-              skb->truesize, skb->len, sizeof(struct sk_buff));
-}
-EXPORT_SYMBOL(skb_truesize_bug);
+EXPORT_SYMBOL(skb_under_panic);
 
 /*     Allocate a new skbuff. We do this ourselves so we can fill in a few
  *     'private' fields and also do memory statistics to find all the
@@ -155,20 +184,30 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        if (!skb)
                goto out;
 
-       /* Get the DATA. Size must match skb_add_mtu(). */
        size = SKB_DATA_ALIGN(size);
        data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
                        gfp_mask, node);
        if (!data)
                goto nodata;
 
-       memset(skb, 0, offsetof(struct sk_buff, truesize));
+       /*
+        * Only clear those fields we need to clear, not those that we will
+        * actually initialise below. Hence, don't put any more fields after
+        * the tail pointer in struct sk_buff!
+        */
+       memset(skb, 0, offsetof(struct sk_buff, tail));
        skb->truesize = size + sizeof(struct sk_buff);
        atomic_set(&skb->users, 1);
        skb->head = data;
        skb->data = data;
-       skb->tail = data;
-       skb->end  = data + size;
+       skb_reset_tail_pointer(skb);
+       skb->end = skb->tail + size;
+       kmemcheck_annotate_bitfield(skb, flags1);
+       kmemcheck_annotate_bitfield(skb, flags2);
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       skb->mac_header = ~0U;
+#endif
+
        /* make sure we initialize shinfo sequentially */
        shinfo = skb_shinfo(skb);
        atomic_set(&shinfo->dataref, 1);
@@ -177,12 +216,16 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        shinfo->gso_segs = 0;
        shinfo->gso_type = 0;
        shinfo->ip6_frag_id = 0;
-       shinfo->frag_list = NULL;
+       shinfo->tx_flags.flags = 0;
+       skb_frag_list_init(skb);
+       memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
 
        if (fclone) {
                struct sk_buff *child = skb + 1;
                atomic_t *fclone_ref = (atomic_t *) (child + 1);
 
+               kmemcheck_annotate_bitfield(child, flags1);
+               kmemcheck_annotate_bitfield(child, flags2);
                skb->fclone = SKB_FCLONE_ORIG;
                atomic_set(fclone_ref, 1);
 
@@ -195,6 +238,7 @@ nodata:
        skb = NULL;
        goto out;
 }
+EXPORT_SYMBOL(__alloc_skb);
 
 /**
  *     __netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -222,6 +266,49 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
        }
        return skb;
 }
+EXPORT_SYMBOL(__netdev_alloc_skb);
+
+struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
+{
+       int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+       struct page *page;
+
+       page = alloc_pages_node(node, gfp_mask, 0);
+       return page;
+}
+EXPORT_SYMBOL(__netdev_alloc_page);
+
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+               int size)
+{
+       skb_fill_page_desc(skb, i, page, off, size);
+       skb->len += size;
+       skb->data_len += size;
+       skb->truesize += size;
+}
+EXPORT_SYMBOL(skb_add_rx_frag);
+
+/**
+ *     dev_alloc_skb - allocate an skbuff for receiving
+ *     @length: length to allocate
+ *
+ *     Allocate a new &sk_buff and assign it a usage count of one. The
+ *     buffer has unspecified headroom built in. Users should allocate
+ *     the headroom they think they need without accounting for the
+ *     built in space. The built in space is used for optimisations.
+ *
+ *     %NULL is returned if there is no free memory. Although this function
+ *     allocates memory it can be called from an interrupt.
+ */
+struct sk_buff *dev_alloc_skb(unsigned int length)
+{
+       /*
+        * There is more code here than it seems:
+        * __dev_alloc_skb is an inline
+        */
+       return __dev_alloc_skb(length, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(dev_alloc_skb);
 
 static void skb_drop_list(struct sk_buff **listp)
 {
@@ -245,7 +332,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
 {
        struct sk_buff *list;
 
-       for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
+       skb_walk_frags(skb, list)
                skb_get(list);
 }
 
@@ -260,7 +347,7 @@ static void skb_release_data(struct sk_buff *skb)
                                put_page(skb_shinfo(skb)->frags[i].page);
                }
 
-               if (skb_shinfo(skb)->frag_list)
+               if (skb_has_frags(skb))
                        skb_drop_fraglist(skb);
 
                kfree(skb->head);
@@ -270,12 +357,11 @@ static void skb_release_data(struct sk_buff *skb)
 /*
  *     Free an skbuff by memory without cleaning the state.
  */
-void kfree_skbmem(struct sk_buff *skb)
+static void kfree_skbmem(struct sk_buff *skb)
 {
        struct sk_buff *other;
        atomic_t *fclone_ref;
 
-       skb_release_data(skb);
        switch (skb->fclone) {
        case SKB_FCLONE_UNAVAILABLE:
                kmem_cache_free(skbuff_head_cache, skb);
@@ -299,21 +385,12 @@ void kfree_skbmem(struct sk_buff *skb)
                if (atomic_dec_and_test(fclone_ref))
                        kmem_cache_free(skbuff_fclone_cache, other);
                break;
-       };
+       }
 }
 
-/**
- *     __kfree_skb - private function
- *     @skb: buffer
- *
- *     Free an sk_buff. Release anything attached to the buffer.
- *     Clean the state. This is an internal helper function. Users should
- *     always call kfree_skb
- */
-
-void __kfree_skb(struct sk_buff *skb)
+static void skb_release_head_state(struct sk_buff *skb)
 {
-       dst_release(skb->dst);
+       skb_dst_drop(skb);
 #ifdef CONFIG_XFRM
        secpath_put(skb->sp);
 #endif
@@ -321,15 +398,13 @@ void __kfree_skb(struct sk_buff *skb)
                WARN_ON(in_irq());
                skb->destructor(skb);
        }
-#ifdef CONFIG_NETFILTER
-       nf_conntrack_put(skb->nfct);
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+       nf_conntrack_put(skb->nfct);
        nf_conntrack_put_reasm(skb->nfct_reasm);
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
        nf_bridge_put(skb->nf_bridge);
 #endif
-#endif
 /* XXX: IS this still necessary? - JHS */
 #ifdef CONFIG_NET_SCHED
        skb->tc_index = 0;
@@ -337,9 +412,30 @@ void __kfree_skb(struct sk_buff *skb)
        skb->tc_verd = 0;
 #endif
 #endif
+}
+
+/* Free everything but the sk_buff shell. */
+static void skb_release_all(struct sk_buff *skb)
+{
+       skb_release_head_state(skb);
+       skb_release_data(skb);
+}
+
+/**
+ *     __kfree_skb - private function
+ *     @skb: buffer
+ *
+ *     Free an sk_buff. Release anything attached to the buffer.
+ *     Clean the state. This is an internal helper function. Users should
+ *     always call kfree_skb
+ */
 
+void __kfree_skb(struct sk_buff *skb)
+{
+       skb_release_all(skb);
        kfree_skbmem(skb);
 }
+EXPORT_SYMBOL(__kfree_skb);
 
 /**
  *     kfree_skb - free an sk_buff
@@ -356,8 +452,169 @@ void kfree_skb(struct sk_buff *skb)
                smp_rmb();
        else if (likely(!atomic_dec_and_test(&skb->users)))
                return;
+       trace_kfree_skb(skb, __builtin_return_address(0));
+       __kfree_skb(skb);
+}
+EXPORT_SYMBOL(kfree_skb);
+
+/**
+ *     consume_skb - free an skbuff
+ *     @skb: buffer to free
+ *
+ *     Drop a ref to the buffer and free it if the usage count has hit zero
+ *     Functions identically to kfree_skb, but kfree_skb assumes that the frame
+ *     is being dropped after a failure and notes that
+ */
+void consume_skb(struct sk_buff *skb)
+{
+       if (unlikely(!skb))
+               return;
+       if (likely(atomic_read(&skb->users) == 1))
+               smp_rmb();
+       else if (likely(!atomic_dec_and_test(&skb->users)))
+               return;
        __kfree_skb(skb);
 }
+EXPORT_SYMBOL(consume_skb);
+
+/**
+ *     skb_recycle_check - check if skb can be reused for receive
+ *     @skb: buffer
+ *     @skb_size: minimum receive buffer size
+ *
+ *     Checks that the skb passed in is not shared or cloned, and
+ *     that it is linear and its head portion at least as large as
+ *     skb_size so that it can be recycled as a receive buffer.
+ *     If these conditions are met, this function does any necessary
+ *     reference count dropping and cleans up the skbuff as if it
+ *     just came from __alloc_skb().
+ */
+int skb_recycle_check(struct sk_buff *skb, int skb_size)
+{
+       struct skb_shared_info *shinfo;
+
+       if (irqs_disabled())
+               return 0;
+
+       if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+               return 0;
+
+       skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+       if (skb_end_pointer(skb) - skb->head < skb_size)
+               return 0;
+
+       if (skb_shared(skb) || skb_cloned(skb))
+               return 0;
+
+       skb_release_head_state(skb);
+       shinfo = skb_shinfo(skb);
+       atomic_set(&shinfo->dataref, 1);
+       shinfo->nr_frags = 0;
+       shinfo->gso_size = 0;
+       shinfo->gso_segs = 0;
+       shinfo->gso_type = 0;
+       shinfo->ip6_frag_id = 0;
+       shinfo->tx_flags.flags = 0;
+       skb_frag_list_init(skb);
+       memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
+
+       memset(skb, 0, offsetof(struct sk_buff, tail));
+       skb->data = skb->head + NET_SKB_PAD;
+       skb_reset_tail_pointer(skb);
+
+       return 1;
+}
+EXPORT_SYMBOL(skb_recycle_check);
+
+static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+{
+       new->tstamp             = old->tstamp;
+       new->dev                = old->dev;
+       new->transport_header   = old->transport_header;
+       new->network_header     = old->network_header;
+       new->mac_header         = old->mac_header;
+       skb_dst_set(new, dst_clone(skb_dst(old)));
+#ifdef CONFIG_XFRM
+       new->sp                 = secpath_get(old->sp);
+#endif
+       memcpy(new->cb, old->cb, sizeof(old->cb));
+       new->csum               = old->csum;
+       new->local_df           = old->local_df;
+       new->pkt_type           = old->pkt_type;
+       new->ip_summed          = old->ip_summed;
+       skb_copy_queue_mapping(new, old);
+       new->priority           = old->priority;
+#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+       new->ipvs_property      = old->ipvs_property;
+#endif
+       new->protocol           = old->protocol;
+       new->mark               = old->mark;
+       new->skb_iif            = old->skb_iif;
+       __nf_copy(new, old);
+#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
+    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+       new->nf_trace           = old->nf_trace;
+#endif
+#ifdef CONFIG_NET_SCHED
+       new->tc_index           = old->tc_index;
+#ifdef CONFIG_NET_CLS_ACT
+       new->tc_verd            = old->tc_verd;
+#endif
+#endif
+       new->vlan_tci           = old->vlan_tci;
+
+       skb_copy_secmark(new, old);
+}
+
+/*
+ * You should not add any new code to this function.  Add it to
+ * __copy_skb_header above instead.
+ */
+static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
+{
+#define C(x) n->x = skb->x
+
+       n->next = n->prev = NULL;
+       n->sk = NULL;
+       __copy_skb_header(n, skb);
+
+       C(len);
+       C(data_len);
+       C(mac_len);
+       n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
+       n->cloned = 1;
+       n->nohdr = 0;
+       n->destructor = NULL;
+       C(tail);
+       C(end);
+       C(head);
+       C(data);
+       C(truesize);
+       atomic_set(&n->users, 1);
+
+       atomic_inc(&(skb_shinfo(skb)->dataref));
+       skb->cloned = 1;
+
+       return n;
+#undef C
+}
+
+/**
+ *     skb_morph       -       morph one skb into another
+ *     @dst: the skb to receive the contents
+ *     @src: the skb to supply the contents
+ *
+ *     This is identical to skb_clone except that the target skb is
+ *     supplied by the user.
+ *
+ *     The target skb is returned upon exit.
+ */
+struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
+{
+       skb_release_all(dst);
+       return __skb_clone(dst, src);
+}
+EXPORT_SYMBOL_GPL(skb_morph);
 
 /**
  *     skb_clone       -       duplicate an sk_buff
@@ -387,126 +644,34 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
                n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
                if (!n)
                        return NULL;
+
+               kmemcheck_annotate_bitfield(n, flags1);
+               kmemcheck_annotate_bitfield(n, flags2);
                n->fclone = SKB_FCLONE_UNAVAILABLE;
        }
 
-#define C(x) n->x = skb->x
-
-       n->next = n->prev = NULL;
-       n->sk = NULL;
-       C(tstamp);
-       C(dev);
-       C(h);
-       C(nh);
-       C(mac);
-       C(dst);
-       dst_clone(skb->dst);
-       C(sp);
-#ifdef CONFIG_INET
-       secpath_get(skb->sp);
-#endif
-       memcpy(n->cb, skb->cb, sizeof(skb->cb));
-       C(len);
-       C(data_len);
-       C(mac_len);
-       C(csum);
-       C(local_df);
-       n->cloned = 1;
-       n->nohdr = 0;
-       C(pkt_type);
-       C(ip_summed);
-       C(priority);
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
-       C(ipvs_property);
-#endif
-       C(protocol);
-       n->destructor = NULL;
-       C(mark);
-#ifdef CONFIG_NETFILTER
-       C(nfct);
-       nf_conntrack_get(skb->nfct);
-       C(nfctinfo);
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-       C(nfct_reasm);
-       nf_conntrack_get_reasm(skb->nfct_reasm);
-#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
-       C(nf_bridge);
-       nf_bridge_get(skb->nf_bridge);
-#endif
-#endif /*CONFIG_NETFILTER*/
-#ifdef CONFIG_NET_SCHED
-       C(tc_index);
-#ifdef CONFIG_NET_CLS_ACT
-       n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
-       n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
-       n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
-       C(iif);
-#endif
-       skb_copy_secmark(n, skb);
-#endif
-       C(truesize);
-       atomic_set(&n->users, 1);
-       C(head);
-       C(data);
-       C(tail);
-       C(end);
-
-       atomic_inc(&(skb_shinfo(skb)->dataref));
-       skb->cloned = 1;
-
-       return n;
+       return __skb_clone(n, skb);
 }
+EXPORT_SYMBOL(skb_clone);
 
 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 {
+#ifndef NET_SKBUFF_DATA_USES_OFFSET
        /*
         *      Shift between the two data areas in bytes
         */
        unsigned long offset = new->data - old->data;
-
-       new->sk         = NULL;
-       new->dev        = old->dev;
-       new->priority   = old->priority;
-       new->protocol   = old->protocol;
-       new->dst        = dst_clone(old->dst);
-#ifdef CONFIG_INET
-       new->sp         = secpath_get(old->sp);
-#endif
-       new->h.raw      = old->h.raw + offset;
-       new->nh.raw     = old->nh.raw + offset;
-       new->mac.raw    = old->mac.raw + offset;
-       memcpy(new->cb, old->cb, sizeof(old->cb));
-       new->local_df   = old->local_df;
-       new->fclone     = SKB_FCLONE_UNAVAILABLE;
-       new->pkt_type   = old->pkt_type;
-       new->tstamp     = old->tstamp;
-       new->destructor = NULL;
-       new->mark       = old->mark;
-#ifdef CONFIG_NETFILTER
-       new->nfct       = old->nfct;
-       nf_conntrack_get(old->nfct);
-       new->nfctinfo   = old->nfctinfo;
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-       new->nfct_reasm = old->nfct_reasm;
-       nf_conntrack_get_reasm(old->nfct_reasm);
-#endif
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
-       new->ipvs_property = old->ipvs_property;
-#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
-       new->nf_bridge  = old->nf_bridge;
-       nf_bridge_get(old->nf_bridge);
-#endif
-#endif
-#ifdef CONFIG_NET_SCHED
-#ifdef CONFIG_NET_CLS_ACT
-       new->tc_verd = old->tc_verd;
 #endif
-       new->tc_index   = old->tc_index;
+
+       __copy_skb_header(new, old);
+
+#ifndef NET_SKBUFF_DATA_USES_OFFSET
+       /* {transport,network,mac}_header are relative to skb->head */
+       new->transport_header += offset;
+       new->network_header   += offset;
+       if (skb_mac_header_was_set(new))
+               new->mac_header       += offset;
 #endif
-       skb_copy_secmark(new, old);
-       atomic_set(&new->users, 1);
        skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
        skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
        skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
@@ -535,8 +700,12 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
        /*
         *      Allocate the copy buffer
         */
-       struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
-                                     gfp_mask);
+       struct sk_buff *n;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       n = alloc_skb(skb->end + skb->data_len, gfp_mask);
+#else
+       n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
+#endif
        if (!n)
                return NULL;
 
@@ -544,8 +713,6 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
        skb_reserve(n, headerlen);
        /* Set the tail pointer and length */
        skb_put(n, skb->len);
-       n->csum      = skb->csum;
-       n->ip_summed = skb->ip_summed;
 
        if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
                BUG();
@@ -553,7 +720,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
        copy_skb_header(n, skb);
        return n;
 }
-
+EXPORT_SYMBOL(skb_copy);
 
 /**
  *     pskb_copy       -       create copy of an sk_buff with private head.
@@ -573,8 +740,12 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
        /*
         *      Allocate the copy buffer
         */
-       struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
-
+       struct sk_buff *n;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       n = alloc_skb(skb->end, gfp_mask);
+#else
+       n = alloc_skb(skb->end - skb->head, gfp_mask);
+#endif
        if (!n)
                goto out;
 
@@ -583,9 +754,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
        /* Set the tail pointer and length */
        skb_put(n, skb_headlen(skb));
        /* Copy the bytes */
-       memcpy(n->data, skb->data, n->len);
-       n->csum      = skb->csum;
-       n->ip_summed = skb->ip_summed;
+       skb_copy_from_linear_data(skb, n->data, n->len);
 
        n->truesize += skb->data_len;
        n->data_len  = skb->data_len;
@@ -601,7 +770,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
                skb_shinfo(n)->nr_frags = i;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
+       if (skb_has_frags(skb)) {
                skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
                skb_clone_fraglist(n);
        }
@@ -610,6 +779,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 out:
        return n;
 }
+EXPORT_SYMBOL(pskb_copy);
 
 /**
  *     pskb_expand_head - reallocate header of &sk_buff
@@ -632,9 +802,15 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 {
        int i;
        u8 *data;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       int size = nhead + skb->end + ntail;
+#else
        int size = nhead + (skb->end - skb->head) + ntail;
+#endif
        long off;
 
+       BUG_ON(nhead < 0);
+
        if (skb_shared(skb))
                BUG();
 
@@ -646,13 +822,18 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 
        /* Copy only real data... and, alas, header. This should be
         * optimized for the cases when header is void. */
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       memcpy(data + nhead, skb->head, skb->tail);
+#else
        memcpy(data + nhead, skb->head, skb->tail - skb->head);
-       memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
+#endif
+       memcpy(data + size, skb_end_pointer(skb),
+              sizeof(struct skb_shared_info));
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                get_page(skb_shinfo(skb)->frags[i].page);
 
-       if (skb_shinfo(skb)->frag_list)
+       if (skb_has_frags(skb))
                skb_clone_fraglist(skb);
 
        skb_release_data(skb);
@@ -660,13 +841,22 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
        off = (data + nhead) - skb->head;
 
        skb->head     = data;
-       skb->end      = data + size;
        skb->data    += off;
-       skb->tail    += off;
-       skb->mac.raw += off;
-       skb->h.raw   += off;
-       skb->nh.raw  += off;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       skb->end      = size;
+       off           = nhead;
+#else
+       skb->end      = skb->head + size;
+#endif
+       /* {transport,network,mac}_header and tail are relative to skb->head */
+       skb->tail             += off;
+       skb->transport_header += off;
+       skb->network_header   += off;
+       if (skb_mac_header_was_set(skb))
+               skb->mac_header += off;
+       skb->csum_start       += nhead;
        skb->cloned   = 0;
+       skb->hdr_len  = 0;
        skb->nohdr    = 0;
        atomic_set(&skb_shinfo(skb)->dataref, 1);
        return 0;
@@ -674,6 +864,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 nodata:
        return -ENOMEM;
 }
+EXPORT_SYMBOL(pskb_expand_head);
 
 /* Make private copy of skb with writable head and some headroom */
 
@@ -694,7 +885,7 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
        }
        return skb2;
 }
-
+EXPORT_SYMBOL(skb_realloc_headroom);
 
 /**
  *     skb_copy_expand -       copy and expand sk_buff
@@ -713,9 +904,6 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
  *
  *     You must pass %GFP_ATOMIC as the allocation priority if this function
  *     is called from an interrupt.
- *
- *     BUG ALERT: ip_summed is not copied. Why does this work? Is it used
- *     only by netfilter in the cases when checksum is recalculated? --ANK
  */
 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
                                int newheadroom, int newtailroom,
@@ -726,7 +914,9 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
         */
        struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
                                      gfp_mask);
+       int oldheadroom = skb_headroom(skb);
        int head_copy_len, head_copy_off;
+       int off;
 
        if (!n)
                return NULL;
@@ -736,7 +926,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
        /* Set the tail pointer and length */
        skb_put(n, skb->len);
 
-       head_copy_len = skb_headroom(skb);
+       head_copy_len = oldheadroom;
        head_copy_off = 0;
        if (newheadroom <= head_copy_len)
                head_copy_len = newheadroom;
@@ -750,8 +940,18 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 
        copy_skb_header(n, skb);
 
+       off                  = newheadroom - oldheadroom;
+       n->csum_start       += off;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       n->transport_header += off;
+       n->network_header   += off;
+       if (skb_mac_header_was_set(skb))
+               n->mac_header += off;
+#endif
+
        return n;
 }
+EXPORT_SYMBOL(skb_copy_expand);
 
 /**
  *     skb_pad                 -       zero pad the tail of an skb
@@ -797,22 +997,95 @@ free_skb:
        kfree_skb(skb);
        return err;
 }
+EXPORT_SYMBOL(skb_pad);
 
-/* Trims skb to length len. It can change skb pointers.
+/**
+ *     skb_put - add data to a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to add
+ *
+ *     This function extends the used data area of the buffer. If this would
+ *     exceed the total buffer size the kernel will panic. A pointer to the
+ *     first byte of the extra data is returned.
  */
+unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+{
+       unsigned char *tmp = skb_tail_pointer(skb);
+       SKB_LINEAR_ASSERT(skb);
+       skb->tail += len;
+       skb->len  += len;
+       if (unlikely(skb->tail > skb->end))
+               skb_over_panic(skb, len, __builtin_return_address(0));
+       return tmp;
+}
+EXPORT_SYMBOL(skb_put);
 
-int ___pskb_trim(struct sk_buff *skb, unsigned int len)
+/**
+ *     skb_push - add data to the start of a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to add
+ *
+ *     This function extends the used data area of the buffer at the buffer
+ *     start. If this would exceed the total buffer headroom the kernel will
+ *     panic. A pointer to the first byte of the extra data is returned.
+ */
+unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
 {
-       struct sk_buff **fragp;
-       struct sk_buff *frag;
-       int offset = skb_headlen(skb);
-       int nfrags = skb_shinfo(skb)->nr_frags;
-       int i;
-       int err;
+       skb->data -= len;
+       skb->len  += len;
+       if (unlikely(skb->data<skb->head))
+               skb_under_panic(skb, len, __builtin_return_address(0));
+       return skb->data;
+}
+EXPORT_SYMBOL(skb_push);
 
-       if (skb_cloned(skb) &&
-           unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
-               return err;
+/**
+ *     skb_pull - remove data from the start of a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to remove
+ *
+ *     This function removes data from the start of a buffer, returning
+ *     the memory to the headroom. A pointer to the next data in the buffer
+ *     is returned. Once the data has been pulled future pushes will overwrite
+ *     the old data.
+ */
+unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
+{
+       return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+}
+EXPORT_SYMBOL(skb_pull);
+
+/**
+ *     skb_trim - remove end from a buffer
+ *     @skb: buffer to alter
+ *     @len: new length
+ *
+ *     Cut the length of a buffer down by removing data from the tail. If
+ *     the buffer is already under the length specified it is not modified.
+ *     The skb must be linear.
+ */
+void skb_trim(struct sk_buff *skb, unsigned int len)
+{
+       if (skb->len > len)
+               __skb_trim(skb, len);
+}
+EXPORT_SYMBOL(skb_trim);
+
+/* Trims skb to length len. It can change skb pointers.
+ */
+
+int ___pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+       struct sk_buff **fragp;
+       struct sk_buff *frag;
+       int offset = skb_headlen(skb);
+       int nfrags = skb_shinfo(skb)->nr_frags;
+       int i;
+       int err;
+
+       if (skb_cloned(skb) &&
+           unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
+               return err;
 
        i = 0;
        if (offset >= len)
@@ -834,7 +1107,7 @@ drop_pages:
                for (; i < nfrags; i++)
                        put_page(skb_shinfo(skb)->frags[i].page);
 
-               if (skb_shinfo(skb)->frag_list)
+               if (skb_has_frags(skb))
                        skb_drop_fraglist(skb);
                goto done;
        }
@@ -877,11 +1150,12 @@ done:
        } else {
                skb->len       = len;
                skb->data_len  = 0;
-               skb->tail      = skb->data + len;
+               skb_set_tail_pointer(skb, len);
        }
 
        return 0;
 }
+EXPORT_SYMBOL(___pskb_trim);
 
 /**
  *     __pskb_pull_tail - advance tail of skb header
@@ -922,13 +1196,13 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
                        return NULL;
        }
 
-       if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
+       if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
                BUG();
 
        /* Optimization: no fragments, no reasons to preestimate
         * size of pulled pages. Superb.
         */
-       if (!skb_shinfo(skb)->frag_list)
+       if (!skb_has_frags(skb))
                goto pull_pages;
 
        /* Estimate size of pulled pages. */
@@ -975,8 +1249,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
                                        insp = list;
                                }
                                if (!pskb_pull(list, eat)) {
-                                       if (clone)
-                                               kfree_skb(clone);
+                                       kfree_skb(clone);
                                        return NULL;
                                }
                                break;
@@ -1018,15 +1291,17 @@ pull_pages:
        skb->tail     += delta;
        skb->data_len -= delta;
 
-       return skb->tail;
+       return skb_tail_pointer(skb);
 }
+EXPORT_SYMBOL(__pskb_pull_tail);
 
 /* Copy some data bits from skb to kernel buffer. */
 
 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 {
-       int i, copy;
        int start = skb_headlen(skb);
+       struct sk_buff *frag_iter;
+       int i, copy;
 
        if (offset > (int)skb->len - len)
                goto fault;
@@ -1035,7 +1310,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
        if ((copy = start - offset) > 0) {
                if (copy > len)
                        copy = len;
-               memcpy(to, skb->data + offset, copy);
+               skb_copy_from_linear_data_offset(skb, offset, to, copy);
                if ((len -= copy) == 0)
                        return 0;
                offset += copy;
@@ -1045,7 +1320,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                int end;
 
-               BUG_TRAP(start <= offset + len);
+               WARN_ON(start > offset + len);
 
                end = start + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
@@ -1068,28 +1343,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
                start = end;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+       skb_walk_frags(skb, frag_iter) {
+               int end;
 
-               for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               if (copy > len)
-                                       copy = len;
-                               if (skb_copy_bits(list, offset - start,
-                                                 to, copy))
-                                       goto fault;
-                               if ((len -= copy) == 0)
-                                       return 0;
-                               offset += copy;
-                               to     += copy;
-                       }
-                       start = end;
+               WARN_ON(start > offset + len);
+
+               end = start + frag_iter->len;
+               if ((copy = end - offset) > 0) {
+                       if (copy > len)
+                               copy = len;
+                       if (skb_copy_bits(frag_iter, offset - start, to, copy))
+                               goto fault;
+                       if ((len -= copy) == 0)
+                               return 0;
+                       offset += copy;
+                       to     += copy;
                }
+               start = end;
        }
        if (!len)
                return 0;
@@ -1097,6 +1367,226 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 fault:
        return -EFAULT;
 }
+EXPORT_SYMBOL(skb_copy_bits);
+
+/*
+ * Callback from splice_to_pipe(), if we need to release some pages
+ * at the end of the spd in case we error'ed out in filling the pipe.
+ */
+static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
+{
+       put_page(spd->pages[i]);
+}
+
+static inline struct page *linear_to_page(struct page *page, unsigned int *len,
+                                         unsigned int *offset,
+                                         struct sk_buff *skb, struct sock *sk)
+{
+       struct page *p = sk->sk_sndmsg_page;
+       unsigned int off;
+
+       if (!p) {
+new_page:
+               p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
+               if (!p)
+                       return NULL;
+
+               off = sk->sk_sndmsg_off = 0;
+               /* hold one ref to this page until it's full */
+       } else {
+               unsigned int mlen;
+
+               off = sk->sk_sndmsg_off;
+               mlen = PAGE_SIZE - off;
+               if (mlen < 64 && mlen < *len) {
+                       put_page(p);
+                       goto new_page;
+               }
+
+               *len = min_t(unsigned int, *len, mlen);
+       }
+
+       memcpy(page_address(p) + off, page_address(page) + *offset, *len);
+       sk->sk_sndmsg_off += *len;
+       *offset = off;
+       get_page(p);
+
+       return p;
+}
+
+/*
+ * Fill page/offset/length into spd, if it can hold more pages.
+ */
+static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
+                               unsigned int *len, unsigned int offset,
+                               struct sk_buff *skb, int linear,
+                               struct sock *sk)
+{
+       if (unlikely(spd->nr_pages == PIPE_BUFFERS))
+               return 1;
+
+       if (linear) {
+               page = linear_to_page(page, len, &offset, skb, sk);
+               if (!page)
+                       return 1;
+       } else
+               get_page(page);
+
+       spd->pages[spd->nr_pages] = page;
+       spd->partial[spd->nr_pages].len = *len;
+       spd->partial[spd->nr_pages].offset = offset;
+       spd->nr_pages++;
+
+       return 0;
+}
+
+static inline void __segment_seek(struct page **page, unsigned int *poff,
+                                 unsigned int *plen, unsigned int off)
+{
+       unsigned long n;
+
+       *poff += off;
+       n = *poff / PAGE_SIZE;
+       if (n)
+               *page = nth_page(*page, n);
+
+       *poff = *poff % PAGE_SIZE;
+       *plen -= off;
+}
+
+static inline int __splice_segment(struct page *page, unsigned int poff,
+                                  unsigned int plen, unsigned int *off,
+                                  unsigned int *len, struct sk_buff *skb,
+                                  struct splice_pipe_desc *spd, int linear,
+                                  struct sock *sk)
+{
+       if (!*len)
+               return 1;
+
+       /* skip this segment if already processed */
+       if (*off >= plen) {
+               *off -= plen;
+               return 0;
+       }
+
+       /* ignore any bits we already processed */
+       if (*off) {
+               __segment_seek(&page, &poff, &plen, *off);
+               *off = 0;
+       }
+
+       do {
+               unsigned int flen = min(*len, plen);
+
+               /* the linear region may spread across several pages  */
+               flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
+
+               if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk))
+                       return 1;
+
+               __segment_seek(&page, &poff, &plen, flen);
+               *len -= flen;
+
+       } while (*len && plen);
+
+       return 0;
+}
+
+/*
+ * Map linear and fragment data from the skb to spd. It reports failure if the
+ * pipe is full or if we already spliced the requested length.
+ */
+static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
+                            unsigned int *len, struct splice_pipe_desc *spd,
+                            struct sock *sk)
+{
+       int seg;
+
+       /*
+        * map the linear part
+        */
+       if (__splice_segment(virt_to_page(skb->data),
+                            (unsigned long) skb->data & (PAGE_SIZE - 1),
+                            skb_headlen(skb),
+                            offset, len, skb, spd, 1, sk))
+               return 1;
+
+       /*
+        * then map the fragments
+        */
+       for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
+               const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
+
+               if (__splice_segment(f->page, f->page_offset, f->size,
+                                    offset, len, skb, spd, 0, sk))
+                       return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Map data from the skb to a pipe. Should handle both the linear part,
+ * the fragments, and the frag list. It does NOT handle frag lists within
+ * the frag list, if such a thing exists. We'd probably need to recurse to
+ * handle that cleanly.
+ */
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+                   struct pipe_inode_info *pipe, unsigned int tlen,
+                   unsigned int flags)
+{
+       struct partial_page partial[PIPE_BUFFERS];
+       struct page *pages[PIPE_BUFFERS];
+       struct splice_pipe_desc spd = {
+               .pages = pages,
+               .partial = partial,
+               .flags = flags,
+               .ops = &sock_pipe_buf_ops,
+               .spd_release = sock_spd_release,
+       };
+       struct sk_buff *frag_iter;
+       struct sock *sk = skb->sk;
+
+       /*
+        * __skb_splice_bits() only fails if the output has no room left,
+        * so no point in going over the frag_list for the error case.
+        */
+       if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
+               goto done;
+       else if (!tlen)
+               goto done;
+
+       /*
+        * now see if we have a frag_list to map
+        */
+       skb_walk_frags(skb, frag_iter) {
+               if (!tlen)
+                       break;
+               if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
+                       break;
+       }
+
+done:
+       if (spd.nr_pages) {
+               int ret;
+
+               /*
+                * Drop the socket lock, otherwise we have reverse
+                * locking dependencies between sk_lock and i_mutex
+                * here as compared to sendfile(). We enter here
+                * with the socket lock held, and splice_to_pipe() will
+                * grab the pipe inode lock. For sendfile() emulation,
+                * we call into ->sendpage() with the i_mutex lock held
+                * and networking will grab the socket lock.
+                */
+               release_sock(sk);
+               ret = splice_to_pipe(pipe, &spd);
+               lock_sock(sk);
+               return ret;
+       }
+
+       return 0;
+}
 
 /**
  *     skb_store_bits - store bits from kernel buffer to skb
@@ -1110,10 +1600,11 @@ fault:
  *     traversing fragment lists and such.
  */
 
-int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
+int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
 {
-       int i, copy;
        int start = skb_headlen(skb);
+       struct sk_buff *frag_iter;
+       int i, copy;
 
        if (offset > (int)skb->len - len)
                goto fault;
@@ -1121,7 +1612,7 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
        if ((copy = start - offset) > 0) {
                if (copy > len)
                        copy = len;
-               memcpy(skb->data + offset, from, copy);
+               skb_copy_to_linear_data_offset(skb, offset, from, copy);
                if ((len -= copy) == 0)
                        return 0;
                offset += copy;
@@ -1132,7 +1623,7 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                int end;
 
-               BUG_TRAP(start <= offset + len);
+               WARN_ON(start > offset + len);
 
                end = start + frag->size;
                if ((copy = end - offset) > 0) {
@@ -1154,28 +1645,24 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
                start = end;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+       skb_walk_frags(skb, frag_iter) {
+               int end;
 
-               for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               if (copy > len)
-                                       copy = len;
-                               if (skb_store_bits(list, offset - start,
-                                                  from, copy))
-                                       goto fault;
-                               if ((len -= copy) == 0)
-                                       return 0;
-                               offset += copy;
-                               from += copy;
-                       }
-                       start = end;
+               WARN_ON(start > offset + len);
+
+               end = start + frag_iter->len;
+               if ((copy = end - offset) > 0) {
+                       if (copy > len)
+                               copy = len;
+                       if (skb_store_bits(frag_iter, offset - start,
+                                          from, copy))
+                               goto fault;
+                       if ((len -= copy) == 0)
+                               return 0;
+                       offset += copy;
+                       from += copy;
                }
+               start = end;
        }
        if (!len)
                return 0;
@@ -1183,7 +1670,6 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
 fault:
        return -EFAULT;
 }
-
 EXPORT_SYMBOL(skb_store_bits);
 
 /* Checksum skb data. */
@@ -1193,6 +1679,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
 {
        int start = skb_headlen(skb);
        int i, copy = start - offset;
+       struct sk_buff *frag_iter;
        int pos = 0;
 
        /* Checksum header. */
@@ -1209,7 +1696,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                int end;
 
-               BUG_TRAP(start <= offset + len);
+               WARN_ON(start > offset + len);
 
                end = start + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
@@ -1232,34 +1719,31 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
                start = end;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+       skb_walk_frags(skb, frag_iter) {
+               int end;
 
-               for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               __wsum csum2;
-                               if (copy > len)
-                                       copy = len;
-                               csum2 = skb_checksum(list, offset - start,
-                                                    copy, 0);
-                               csum = csum_block_add(csum, csum2, pos);
-                               if ((len -= copy) == 0)
-                                       return csum;
-                               offset += copy;
-                               pos    += copy;
-                       }
-                       start = end;
+               WARN_ON(start > offset + len);
+
+               end = start + frag_iter->len;
+               if ((copy = end - offset) > 0) {
+                       __wsum csum2;
+                       if (copy > len)
+                               copy = len;
+                       csum2 = skb_checksum(frag_iter, offset - start,
+                                            copy, 0);
+                       csum = csum_block_add(csum, csum2, pos);
+                       if ((len -= copy) == 0)
+                               return csum;
+                       offset += copy;
+                       pos    += copy;
                }
+               start = end;
        }
        BUG_ON(len);
 
        return csum;
 }
+EXPORT_SYMBOL(skb_checksum);
 
 /* Both of above in one bottle. */
 
@@ -1268,6 +1752,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
 {
        int start = skb_headlen(skb);
        int i, copy = start - offset;
+       struct sk_buff *frag_iter;
        int pos = 0;
 
        /* Copy header. */
@@ -1286,7 +1771,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                int end;
 
-               BUG_TRAP(start <= offset + len);
+               WARN_ON(start > offset + len);
 
                end = start + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
@@ -1312,35 +1797,32 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
                start = end;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+       skb_walk_frags(skb, frag_iter) {
+               __wsum csum2;
+               int end;
 
-               for (; list; list = list->next) {
-                       __wsum csum2;
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               if (copy > len)
-                                       copy = len;
-                               csum2 = skb_copy_and_csum_bits(list,
-                                                              offset - start,
-                                                              to, copy, 0);
-                               csum = csum_block_add(csum, csum2, pos);
-                               if ((len -= copy) == 0)
-                                       return csum;
-                               offset += copy;
-                               to     += copy;
-                               pos    += copy;
-                       }
-                       start = end;
+               WARN_ON(start > offset + len);
+
+               end = start + frag_iter->len;
+               if ((copy = end - offset) > 0) {
+                       if (copy > len)
+                               copy = len;
+                       csum2 = skb_copy_and_csum_bits(frag_iter,
+                                                      offset - start,
+                                                      to, copy, 0);
+                       csum = csum_block_add(csum, csum2, pos);
+                       if ((len -= copy) == 0)
+                               return csum;
+                       offset += copy;
+                       to     += copy;
+                       pos    += copy;
                }
+               start = end;
        }
        BUG_ON(len);
        return csum;
 }
+EXPORT_SYMBOL(skb_copy_and_csum_bits);
 
 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
 {
@@ -1348,13 +1830,13 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
        long csstart;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL)
-               csstart = skb_transport_offset(skb);
+               csstart = skb->csum_start - skb_headroom(skb);
        else
                csstart = skb_headlen(skb);
 
        BUG_ON(csstart > skb_headlen(skb));
 
-       memcpy(to, skb->data, csstart);
+       skb_copy_from_linear_data(skb, to, csstart);
 
        csum = 0;
        if (csstart != skb->len)
@@ -1367,6 +1849,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
                *((__sum16 *)(to + csstuff)) = csum_fold(csum);
        }
 }
+EXPORT_SYMBOL(skb_copy_and_csum_dev);
 
 /**
  *     skb_dequeue - remove from the head of the queue
@@ -1387,6 +1870,7 @@ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
        spin_unlock_irqrestore(&list->lock, flags);
        return result;
 }
+EXPORT_SYMBOL(skb_dequeue);
 
 /**
  *     skb_dequeue_tail - remove from the tail of the queue
@@ -1406,6 +1890,7 @@ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
        spin_unlock_irqrestore(&list->lock, flags);
        return result;
 }
+EXPORT_SYMBOL(skb_dequeue_tail);
 
 /**
  *     skb_queue_purge - empty a list
@@ -1421,6 +1906,7 @@ void skb_queue_purge(struct sk_buff_head *list)
        while ((skb = skb_dequeue(list)) != NULL)
                kfree_skb(skb);
 }
+EXPORT_SYMBOL(skb_queue_purge);
 
 /**
  *     skb_queue_head - queue a buffer at the list head
@@ -1441,6 +1927,7 @@ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
        __skb_queue_head(list, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_queue_head);
 
 /**
  *     skb_queue_tail - queue a buffer at the list tail
@@ -1461,6 +1948,7 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
        __skb_queue_tail(list, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_queue_tail);
 
 /**
  *     skb_unlink      -       remove a buffer from a list
@@ -1480,6 +1968,7 @@ void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
        __skb_unlink(skb, list);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_unlink);
 
 /**
  *     skb_append      -       append a buffer
@@ -1496,10 +1985,10 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
        unsigned long flags;
 
        spin_lock_irqsave(&list->lock, flags);
-       __skb_append(old, newsk, list);
+       __skb_queue_after(list, old, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
-
+EXPORT_SYMBOL(skb_append);
 
 /**
  *     skb_insert      -       insert a buffer
@@ -1521,19 +2010,7 @@ void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
        __skb_insert(newsk, old->prev, old, list);
        spin_unlock_irqrestore(&list->lock, flags);
 }
-
-#if 0
-/*
- *     Tune the memory allocator for a new MTU size.
- */
-void skb_add_mtu(int mtu)
-{
-       /* Must match allocation in alloc_skb */
-       mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
-
-       kmem_add_cache_size(mtu);
-}
-#endif
+EXPORT_SYMBOL(skb_insert);
 
 static inline void skb_split_inside_header(struct sk_buff *skb,
                                           struct sk_buff* skb1,
@@ -1541,8 +2018,8 @@ static inline void skb_split_inside_header(struct sk_buff *skb,
 {
        int i;
 
-       memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
-
+       skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
+                                        pos - len);
        /* And move data appendix as is. */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -1553,7 +2030,7 @@ static inline void skb_split_inside_header(struct sk_buff *skb,
        skb1->len                  += skb1->data_len;
        skb->data_len              = 0;
        skb->len                   = len;
-       skb->tail                  = skb->data + len;
+       skb_set_tail_pointer(skb, len);
 }
 
 static inline void skb_split_no_header(struct sk_buff *skb,
@@ -1612,6 +2089,149 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
        else            /* Second chunk has no header, nothing to copy. */
                skb_split_no_header(skb, skb1, len, pos);
 }
+EXPORT_SYMBOL(skb_split);
+
+/* Shifting from/to a cloned skb is a no-go.
+ *
+ * Caller cannot keep skb_shinfo related pointers past calling here!
+ */
+static int skb_prepare_for_shift(struct sk_buff *skb)
+{
+       return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
+/**
+ * skb_shift - Shifts paged data partially from skb to another
+ * @tgt: buffer into which tail data gets added
+ * @skb: buffer from which the paged data comes from
+ * @shiftlen: shift up to this many bytes
+ *
+ * Attempts to shift up to shiftlen worth of bytes, which may be less than
+ * the length of the skb, from tgt to skb. Returns number bytes shifted.
+ * It's up to caller to free skb if everything was shifted.
+ *
+ * If @tgt runs out of frags, the whole operation is aborted.
+ *
+ * Skb cannot include anything else but paged data while tgt is allowed
+ * to have non-paged data as well.
+ *
+ * TODO: full sized shift could be optimized but that would need
+ * specialized skb free'er to handle frags without up-to-date nr_frags.
+ */
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
+{
+       int from, to, merge, todo;
+       struct skb_frag_struct *fragfrom, *fragto;
+
+       BUG_ON(shiftlen > skb->len);
+       BUG_ON(skb_headlen(skb));       /* Would corrupt stream */
+
+       todo = shiftlen;
+       from = 0;
+       to = skb_shinfo(tgt)->nr_frags;
+       fragfrom = &skb_shinfo(skb)->frags[from];
+
+       /* Actual merge is delayed until the point when we know we can
+        * commit all, so that we don't have to undo partial changes
+        */
+       if (!to ||
+           !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
+               merge = -1;
+       } else {
+               merge = to - 1;
+
+               todo -= fragfrom->size;
+               if (todo < 0) {
+                       if (skb_prepare_for_shift(skb) ||
+                           skb_prepare_for_shift(tgt))
+                               return 0;
+
+                       /* All previous frag pointers might be stale! */
+                       fragfrom = &skb_shinfo(skb)->frags[from];
+                       fragto = &skb_shinfo(tgt)->frags[merge];
+
+                       fragto->size += shiftlen;
+                       fragfrom->size -= shiftlen;
+                       fragfrom->page_offset += shiftlen;
+
+                       goto onlymerged;
+               }
+
+               from++;
+       }
+
+       /* Skip full, not-fitting skb to avoid expensive operations */
+       if ((shiftlen == skb->len) &&
+           (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
+               return 0;
+
+       if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
+               return 0;
+
+       while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
+               if (to == MAX_SKB_FRAGS)
+                       return 0;
+
+               fragfrom = &skb_shinfo(skb)->frags[from];
+               fragto = &skb_shinfo(tgt)->frags[to];
+
+               if (todo >= fragfrom->size) {
+                       *fragto = *fragfrom;
+                       todo -= fragfrom->size;
+                       from++;
+                       to++;
+
+               } else {
+                       get_page(fragfrom->page);
+                       fragto->page = fragfrom->page;
+                       fragto->page_offset = fragfrom->page_offset;
+                       fragto->size = todo;
+
+                       fragfrom->page_offset += todo;
+                       fragfrom->size -= todo;
+                       todo = 0;
+
+                       to++;
+                       break;
+               }
+       }
+
+       /* Ready to "commit" this state change to tgt */
+       skb_shinfo(tgt)->nr_frags = to;
+
+       if (merge >= 0) {
+               fragfrom = &skb_shinfo(skb)->frags[0];
+               fragto = &skb_shinfo(tgt)->frags[merge];
+
+               fragto->size += fragfrom->size;
+               put_page(fragfrom->page);
+       }
+
+       /* Reposition in the original skb */
+       to = 0;
+       while (from < skb_shinfo(skb)->nr_frags)
+               skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
+       skb_shinfo(skb)->nr_frags = to;
+
+       BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
+
+onlymerged:
+       /* Most likely the tgt won't ever need its checksum anymore, skb on
+        * the other hand might need it if it needs to be resent
+        */
+       tgt->ip_summed = CHECKSUM_PARTIAL;
+       skb->ip_summed = CHECKSUM_PARTIAL;
+
+       /* Yak, is it really working this way? Some helper please? */
+       skb->len -= shiftlen;
+       skb->data_len -= shiftlen;
+       skb->truesize -= shiftlen;
+       tgt->len += shiftlen;
+       tgt->data_len += shiftlen;
+       tgt->truesize += shiftlen;
+
+       return shiftlen;
+}
 
 /**
  * skb_prepare_seq_read - Prepare a sequential read of skb data
@@ -1632,6 +2252,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
        st->frag_idx = st->stepped_offset = 0;
        st->frag_data = NULL;
 }
+EXPORT_SYMBOL(skb_prepare_seq_read);
 
 /**
  * skb_seq_read - Sequentially read skb data
@@ -1650,11 +2271,11 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
  * of bytes already consumed and the next call to
  * skb_seq_read() will return the remaining part of the block.
  *
- * Note: The size of each block of data returned can be arbitary,
+ * Note 1: The size of each block of data returned can be arbitary,
  *       this limitation is the cost for zerocopy seqeuental
  *       reads of potentially non linear data.
  *
- * Note: Fragment lists within fragments are not implemented
+ * Note 2: Fragment lists within fragments are not implemented
  *       at the moment, state->root_skb could be replaced with
  *       a stack for this purpose.
  */
@@ -1668,10 +2289,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
                return 0;
 
 next_skb:
-       block_limit = skb_headlen(st->cur_skb);
+       block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
 
-       if (abs_offset < block_limit) {
-               *data = st->cur_skb->data + abs_offset;
+       if (abs_offset < block_limit && !st->frag_data) {
+               *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
                return block_limit - abs_offset;
        }
 
@@ -1701,18 +2322,24 @@ next_skb:
                st->stepped_offset += frag->size;
        }
 
-       if (st->cur_skb->next) {
-               st->cur_skb = st->cur_skb->next;
+       if (st->frag_data) {
+               kunmap_skb_frag(st->frag_data);
+               st->frag_data = NULL;
+       }
+
+       if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
+               st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
                st->frag_idx = 0;
                goto next_skb;
-       } else if (st->root_skb == st->cur_skb &&
-                  skb_shinfo(st->root_skb)->frag_list) {
-               st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
+       } else if (st->cur_skb->next) {
+               st->cur_skb = st->cur_skb->next;
+               st->frag_idx = 0;
                goto next_skb;
        }
 
        return 0;
 }
+EXPORT_SYMBOL(skb_seq_read);
 
 /**
  * skb_abort_seq_read - Abort a sequential read of skb data
@@ -1726,6 +2353,7 @@ void skb_abort_seq_read(struct skb_seq_state *st)
        if (st->frag_data)
                kunmap_skb_frag(st->frag_data);
 }
+EXPORT_SYMBOL(skb_abort_seq_read);
 
 #define TS_SKB_CB(state)       ((struct skb_seq_state *) &((state)->cb))
 
@@ -1768,6 +2396,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
        ret = textsearch_find(config, state);
        return (ret <= to - from ? ret : UINT_MAX);
 }
+EXPORT_SYMBOL(skb_find_text);
 
 /**
  * skb_append_datato_frags: - append the user data to a skb
@@ -1840,15 +2469,15 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 
        return 0;
 }
+EXPORT_SYMBOL(skb_append_datato_frags);
 
 /**
  *     skb_pull_rcsum - pull skb and update receive checksum
  *     @skb: buffer to update
- *     @start: start of data before pull
  *     @len: length of data pulled
  *
  *     This function performs an skb_pull on the packet and updates
- *     update the CHECKSUM_COMPLETE checksum.  It should be used on
+ *     the CHECKSUM_COMPLETE checksum.  It should be used on
  *     receive path processing instead of skb_pull unless you know
  *     that the checksum difference is zero (e.g., a valid IP header)
  *     or you are setting ip_summed to CHECKSUM_NONE.
@@ -1870,13 +2499,14 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
  *     @features: features for the output path (see dev->features)
  *
  *     This function performs segmentation on the given skb.  It returns
- *     the segment at the given position.  It returns NULL if there are
- *     no more segments to generate, or when an error is encountered.
+ *     a pointer to the first in a list of new skbs for the segments.
+ *     In case of error it returns ERR_PTR(err).
  */
 struct sk_buff *skb_segment(struct sk_buff *skb, int features)
 {
        struct sk_buff *segs = NULL;
        struct sk_buff *tail = NULL;
+       struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
        unsigned int mss = skb_shinfo(skb)->gso_size;
        unsigned int doffset = skb->data - skb_mac_header(skb);
        unsigned int offset = doffset;
@@ -1896,7 +2526,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                struct sk_buff *nskb;
                skb_frag_t *frag;
                int hsize;
-               int k;
                int size;
 
                len = skb->len - offset;
@@ -1909,9 +2538,36 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                if (hsize > len || !sg)
                        hsize = len;
 
-               nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
-               if (unlikely(!nskb))
-                       goto err;
+               if (!hsize && i >= nfrags) {
+                       BUG_ON(fskb->len != len);
+
+                       pos += len;
+                       nskb = skb_clone(fskb, GFP_ATOMIC);
+                       fskb = fskb->next;
+
+                       if (unlikely(!nskb))
+                               goto err;
+
+                       hsize = skb_end_pointer(nskb) - nskb->head;
+                       if (skb_cow_head(nskb, doffset + headroom)) {
+                               kfree_skb(nskb);
+                               goto err;
+                       }
+
+                       nskb->truesize += skb_end_pointer(nskb) - nskb->head -
+                                         hsize;
+                       skb_release_head_state(nskb);
+                       __skb_push(nskb, doffset);
+               } else {
+                       nskb = alloc_skb(hsize + doffset + headroom,
+                                        GFP_ATOMIC);
+
+                       if (unlikely(!nskb))
+                               goto err;
+
+                       skb_reserve(nskb, headroom);
+                       __skb_put(nskb, doffset);
+               }
 
                if (segs)
                        tail->next = nskb;
@@ -1919,21 +2575,20 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                        segs = nskb;
                tail = nskb;
 
-               nskb->dev = skb->dev;
-               nskb->priority = skb->priority;
-               nskb->protocol = skb->protocol;
-               nskb->dst = dst_clone(skb->dst);
-               memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
-               nskb->pkt_type = skb->pkt_type;
+               __copy_skb_header(nskb, skb);
                nskb->mac_len = skb->mac_len;
 
-               skb_reserve(nskb, headroom);
                skb_reset_mac_header(nskb);
-               nskb->nh.raw = nskb->data + skb->mac_len;
-               nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
-               memcpy(skb_put(nskb, doffset), skb->data, doffset);
+               skb_set_network_header(nskb, skb->mac_len);
+               nskb->transport_header = (nskb->network_header +
+                                         skb_network_header_len(skb));
+               skb_copy_from_linear_data(skb, nskb->data, doffset);
+
+               if (fskb != skb_shinfo(skb)->frag_list)
+                       continue;
 
                if (!sg) {
+                       nskb->ip_summed = CHECKSUM_NONE;
                        nskb->csum = skb_copy_and_csum_bits(skb, offset,
                                                            skb_put(nskb, len),
                                                            len, 0);
@@ -1941,15 +2596,11 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                }
 
                frag = skb_shinfo(nskb)->frags;
-               k = 0;
 
-               nskb->ip_summed = CHECKSUM_PARTIAL;
-               nskb->csum = skb->csum;
-               memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
-
-               while (pos < offset + len) {
-                       BUG_ON(i >= nfrags);
+               skb_copy_from_linear_data_offset(skb, offset,
+                                                skb_put(nskb, hsize), hsize);
 
+               while (pos < offset + len && i < nfrags) {
                        *frag = skb_shinfo(skb)->frags[i];
                        get_page(frag->page);
                        size = frag->size;
@@ -1959,20 +2610,39 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                                frag->size -= offset - pos;
                        }
 
-                       k++;
+                       skb_shinfo(nskb)->nr_frags++;
 
                        if (pos + size <= offset + len) {
                                i++;
                                pos += size;
                        } else {
                                frag->size -= pos + size - (offset + len);
-                               break;
+                               goto skip_fraglist;
                        }
 
                        frag++;
                }
 
-               skb_shinfo(nskb)->nr_frags = k;
+               if (pos < offset + len) {
+                       struct sk_buff *fskb2 = fskb;
+
+                       BUG_ON(pos + fskb->len != offset + len);
+
+                       pos += fskb->len;
+                       fskb = fskb->next;
+
+                       if (fskb2->next) {
+                               fskb2 = skb_clone(fskb2, GFP_ATOMIC);
+                               if (!fskb2)
+                                       goto err;
+                       } else
+                               skb_get(fskb2);
+
+                       SKB_FRAG_ASSERT(nskb);
+                       skb_shinfo(nskb)->frag_list = fskb2;
+               }
+
+skip_fraglist:
                nskb->data_len = len - hsize;
                nskb->len += nskb->data_len;
                nskb->truesize += nskb->data_len;
@@ -1987,55 +2657,391 @@ err:
        }
        return ERR_PTR(err);
 }
-
 EXPORT_SYMBOL_GPL(skb_segment);
 
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+       struct sk_buff *p = *head;
+       struct sk_buff *nskb;
+       struct skb_shared_info *skbinfo = skb_shinfo(skb);
+       struct skb_shared_info *pinfo = skb_shinfo(p);
+       unsigned int headroom;
+       unsigned int len = skb_gro_len(skb);
+       unsigned int offset = skb_gro_offset(skb);
+       unsigned int headlen = skb_headlen(skb);
+
+       if (p->len + len >= 65536)
+               return -E2BIG;
+
+       if (pinfo->frag_list)
+               goto merge;
+       else if (headlen <= offset) {
+               skb_frag_t *frag;
+               skb_frag_t *frag2;
+               int i = skbinfo->nr_frags;
+               int nr_frags = pinfo->nr_frags + i;
+
+               offset -= headlen;
+
+               if (nr_frags > MAX_SKB_FRAGS)
+                       return -E2BIG;
+
+               pinfo->nr_frags = nr_frags;
+               skbinfo->nr_frags = 0;
+
+               frag = pinfo->frags + nr_frags;
+               frag2 = skbinfo->frags + i;
+               do {
+                       *--frag = *--frag2;
+               } while (--i);
+
+               frag->page_offset += offset;
+               frag->size -= offset;
+
+               skb->truesize -= skb->data_len;
+               skb->len -= skb->data_len;
+               skb->data_len = 0;
+
+               NAPI_GRO_CB(skb)->free = 1;
+               goto done;
+       } else if (skb_gro_len(p) != pinfo->gso_size)
+               return -E2BIG;
+
+       headroom = skb_headroom(p);
+       nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
+       if (unlikely(!nskb))
+               return -ENOMEM;
+
+       __copy_skb_header(nskb, p);
+       nskb->mac_len = p->mac_len;
+
+       skb_reserve(nskb, headroom);
+       __skb_put(nskb, skb_gro_offset(p));
+
+       skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
+       skb_set_network_header(nskb, skb_network_offset(p));
+       skb_set_transport_header(nskb, skb_transport_offset(p));
+
+       __skb_pull(p, skb_gro_offset(p));
+       memcpy(skb_mac_header(nskb), skb_mac_header(p),
+              p->data - skb_mac_header(p));
+
+       *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
+       skb_shinfo(nskb)->frag_list = p;
+       skb_shinfo(nskb)->gso_size = pinfo->gso_size;
+       skb_header_release(p);
+       nskb->prev = p;
+
+       nskb->data_len += p->len;
+       nskb->truesize += p->len;
+       nskb->len += p->len;
+
+       *head = nskb;
+       nskb->next = p->next;
+       p->next = NULL;
+
+       p = nskb;
+
+merge:
+       if (offset > headlen) {
+               skbinfo->frags[0].page_offset += offset - headlen;
+               skbinfo->frags[0].size -= offset - headlen;
+               offset = headlen;
+       }
+
+       __skb_pull(skb, offset);
+
+       p->prev->next = skb;
+       p->prev = skb;
+       skb_header_release(skb);
+
+done:
+       NAPI_GRO_CB(p)->count++;
+       p->data_len += len;
+       p->truesize += len;
+       p->len += len;
+
+       NAPI_GRO_CB(skb)->same_flow = 1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(skb_gro_receive);
+
 void __init skb_init(void)
 {
        skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
                                              sizeof(struct sk_buff),
                                              0,
                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-                                             NULL, NULL);
+                                             NULL);
        skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
                                                (2*sizeof(struct sk_buff)) +
                                                sizeof(atomic_t),
                                                0,
                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-                                               NULL, NULL);
+                                               NULL);
 }
 
-EXPORT_SYMBOL(___pskb_trim);
-EXPORT_SYMBOL(__kfree_skb);
-EXPORT_SYMBOL(kfree_skb);
-EXPORT_SYMBOL(__pskb_pull_tail);
-EXPORT_SYMBOL(__alloc_skb);
-EXPORT_SYMBOL(__netdev_alloc_skb);
-EXPORT_SYMBOL(pskb_copy);
-EXPORT_SYMBOL(pskb_expand_head);
-EXPORT_SYMBOL(skb_checksum);
-EXPORT_SYMBOL(skb_clone);
-EXPORT_SYMBOL(skb_clone_fraglist);
-EXPORT_SYMBOL(skb_copy);
-EXPORT_SYMBOL(skb_copy_and_csum_bits);
-EXPORT_SYMBOL(skb_copy_and_csum_dev);
-EXPORT_SYMBOL(skb_copy_bits);
-EXPORT_SYMBOL(skb_copy_expand);
-EXPORT_SYMBOL(skb_over_panic);
-EXPORT_SYMBOL(skb_pad);
-EXPORT_SYMBOL(skb_realloc_headroom);
-EXPORT_SYMBOL(skb_under_panic);
-EXPORT_SYMBOL(skb_dequeue);
-EXPORT_SYMBOL(skb_dequeue_tail);
-EXPORT_SYMBOL(skb_insert);
-EXPORT_SYMBOL(skb_queue_purge);
-EXPORT_SYMBOL(skb_queue_head);
-EXPORT_SYMBOL(skb_queue_tail);
-EXPORT_SYMBOL(skb_unlink);
-EXPORT_SYMBOL(skb_append);
-EXPORT_SYMBOL(skb_split);
-EXPORT_SYMBOL(skb_prepare_seq_read);
-EXPORT_SYMBOL(skb_seq_read);
-EXPORT_SYMBOL(skb_abort_seq_read);
-EXPORT_SYMBOL(skb_find_text);
-EXPORT_SYMBOL(skb_append_datato_frags);
+/**
+ *     skb_to_sgvec - Fill a scatter-gather list from a socket buffer
+ *     @skb: Socket buffer containing the buffers to be mapped
+ *     @sg: The scatter-gather list to map into
+ *     @offset: The offset into the buffer's contents to start mapping
+ *     @len: Length of buffer space to be mapped
+ *
+ *     Fill the specified scatter-gather list with mappings/pointers into a
+ *     region of the buffer space attached to a socket buffer.
+ */
+static int
+__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+       int start = skb_headlen(skb);
+       int i, copy = start - offset;
+       struct sk_buff *frag_iter;
+       int elt = 0;
+
+       if (copy > 0) {
+               if (copy > len)
+                       copy = len;
+               sg_set_buf(sg, skb->data + offset, copy);
+               elt++;
+               if ((len -= copy) == 0)
+                       return elt;
+               offset += copy;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               int end;
+
+               WARN_ON(start > offset + len);
+
+               end = start + skb_shinfo(skb)->frags[i].size;
+               if ((copy = end - offset) > 0) {
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+                       if (copy > len)
+                               copy = len;
+                       sg_set_page(&sg[elt], frag->page, copy,
+                                       frag->page_offset+offset-start);
+                       elt++;
+                       if (!(len -= copy))
+                               return elt;
+                       offset += copy;
+               }
+               start = end;
+       }
+
+       skb_walk_frags(skb, frag_iter) {
+               int end;
+
+               WARN_ON(start > offset + len);
+
+               end = start + frag_iter->len;
+               if ((copy = end - offset) > 0) {
+                       if (copy > len)
+                               copy = len;
+                       elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
+                                             copy);
+                       if ((len -= copy) == 0)
+                               return elt;
+                       offset += copy;
+               }
+               start = end;
+       }
+       BUG_ON(len);
+       return elt;
+}
+
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+       int nsg = __skb_to_sgvec(skb, sg, offset, len);
+
+       sg_mark_end(&sg[nsg - 1]);
+
+       return nsg;
+}
+EXPORT_SYMBOL_GPL(skb_to_sgvec);
+
+/**
+ *     skb_cow_data - Check that a socket buffer's data buffers are writable
+ *     @skb: The socket buffer to check.
+ *     @tailbits: Amount of trailing space to be added
+ *     @trailer: Returned pointer to the skb where the @tailbits space begins
+ *
+ *     Make sure that the data buffers attached to a socket buffer are
+ *     writable. If they are not, private copies are made of the data buffers
+ *     and the socket buffer is set to use these instead.
+ *
+ *     If @tailbits is given, make sure that there is space to write @tailbits
+ *     bytes of data beyond current end of socket buffer.  @trailer will be
+ *     set to point to the skb in which this space begins.
+ *
+ *     The number of scatterlist elements required to completely map the
+ *     COW'd and extended socket buffer will be returned.
+ */
+int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
+{
+       int copyflag;
+       int elt;
+       struct sk_buff *skb1, **skb_p;
+
+       /* If skb is cloned or its head is paged, reallocate
+        * head pulling out all the pages (pages are considered not writable
+        * at the moment even if they are anonymous).
+        */
+       if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
+           __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
+               return -ENOMEM;
+
+       /* Easy case. Most of packets will go this way. */
+       if (!skb_has_frags(skb)) {
+               /* A little of trouble, not enough of space for trailer.
+                * This should not happen, when stack is tuned to generate
+                * good frames. OK, on miss we reallocate and reserve even more
+                * space, 128 bytes is fair. */
+
+               if (skb_tailroom(skb) < tailbits &&
+                   pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
+                       return -ENOMEM;
+
+               /* Voila! */
+               *trailer = skb;
+               return 1;
+       }
+
+       /* Misery. We are in troubles, going to mincer fragments... */
+
+       elt = 1;
+       skb_p = &skb_shinfo(skb)->frag_list;
+       copyflag = 0;
+
+       while ((skb1 = *skb_p) != NULL) {
+               int ntail = 0;
+
+               /* The fragment is partially pulled by someone,
+                * this can happen on input. Copy it and everything
+                * after it. */
+
+               if (skb_shared(skb1))
+                       copyflag = 1;
+
+               /* If the skb is the last, worry about trailer. */
+
+               if (skb1->next == NULL && tailbits) {
+                       if (skb_shinfo(skb1)->nr_frags ||
+                           skb_has_frags(skb1) ||
+                           skb_tailroom(skb1) < tailbits)
+                               ntail = tailbits + 128;
+               }
+
+               if (copyflag ||
+                   skb_cloned(skb1) ||
+                   ntail ||
+                   skb_shinfo(skb1)->nr_frags ||
+                   skb_has_frags(skb1)) {
+                       struct sk_buff *skb2;
+
+                       /* Fuck, we are miserable poor guys... */
+                       if (ntail == 0)
+                               skb2 = skb_copy(skb1, GFP_ATOMIC);
+                       else
+                               skb2 = skb_copy_expand(skb1,
+                                                      skb_headroom(skb1),
+                                                      ntail,
+                                                      GFP_ATOMIC);
+                       if (unlikely(skb2 == NULL))
+                               return -ENOMEM;
+
+                       if (skb1->sk)
+                               skb_set_owner_w(skb2, skb1->sk);
+
+                       /* Looking around. Are we still alive?
+                        * OK, link new skb, drop old one */
+
+                       skb2->next = skb1->next;
+                       *skb_p = skb2;
+                       kfree_skb(skb1);
+                       skb1 = skb2;
+               }
+               elt++;
+               *trailer = skb1;
+               skb_p = &skb1->next;
+       }
+
+       return elt;
+}
+EXPORT_SYMBOL_GPL(skb_cow_data);
+
+void skb_tstamp_tx(struct sk_buff *orig_skb,
+               struct skb_shared_hwtstamps *hwtstamps)
+{
+       struct sock *sk = orig_skb->sk;
+       struct sock_exterr_skb *serr;
+       struct sk_buff *skb;
+       int err;
+
+       if (!sk)
+               return;
+
+       skb = skb_clone(orig_skb, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       if (hwtstamps) {
+               *skb_hwtstamps(skb) =
+                       *hwtstamps;
+       } else {
+               /*
+                * no hardware time stamps available,
+                * so keep the skb_shared_tx and only
+                * store software time stamp
+                */
+               skb->tstamp = ktime_get_real();
+       }
+
+       serr = SKB_EXT_ERR(skb);
+       memset(serr, 0, sizeof(*serr));
+       serr->ee.ee_errno = ENOMSG;
+       serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+       err = sock_queue_err_skb(sk, skb);
+       if (err)
+               kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(skb_tstamp_tx);
+
+
+/**
+ * skb_partial_csum_set - set up and verify partial csum values for packet
+ * @skb: the skb to set
+ * @start: the number of bytes after skb->data to start checksumming.
+ * @off: the offset from start to place the checksum.
+ *
+ * For untrusted partially-checksummed packets, we need to make sure the values
+ * for skb->csum_start and skb->csum_offset are valid so we don't oops.
+ *
+ * This function checks and sets those values and skb->ip_summed: if this
+ * returns false you should drop the packet.
+ */
+bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
+{
+       if (unlikely(start > skb_headlen(skb)) ||
+           unlikely((int)start + off > skb_headlen(skb) - 2)) {
+               if (net_ratelimit())
+                       printk(KERN_WARNING
+                              "bad partial csum: csum=%u/%u len=%u\n",
+                              start, off, skb_headlen(skb));
+               return false;
+       }
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       skb->csum_start = skb_headroom(skb) + start;
+       skb->csum_offset = off;
+       return true;
+}
+EXPORT_SYMBOL_GPL(skb_partial_csum_set);
+
+void __skb_warn_lro_forwarding(const struct sk_buff *skb)
+{
+       if (net_ratelimit())
+               pr_warning("%s: received packets cannot be forwarded"
+                          " while LRO is enabled\n", skb->dev->name);
+}
+EXPORT_SYMBOL(__skb_warn_lro_forwarding);