cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits: net
[safe/jmp/linux-2.6] / net / core / skbuff.c
index ddcbc4d..b8d0abb 100644 (file)
@@ -1,11 +1,9 @@
 /*
  *     Routines having to do with the 'struct sk_buff' memory handlers.
  *
- *     Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
+ *     Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
  *                     Florian La Roche <rzsfl@rz.uni-sb.de>
  *
- *     Version:        $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
- *
  *     Fixes:
  *             Alan Cox        :       Fixed the worst of the load
  *                                     balancer bugs.
 #endif
 #include <linux/string.h>
 #include <linux/skbuff.h>
+#include <linux/splice.h>
 #include <linux/cache.h>
 #include <linux/rtnetlink.h>
 #include <linux/init.h>
+#include <linux/scatterlist.h>
 
 #include <net/protocol.h>
 #include <net/dst.h>
 static struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 
+static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
+                                 struct pipe_buffer *buf)
+{
+       struct sk_buff *skb = (struct sk_buff *) buf->private;
+
+       kfree_skb(skb);
+}
+
+static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
+                               struct pipe_buffer *buf)
+{
+       struct sk_buff *skb = (struct sk_buff *) buf->private;
+
+       skb_get(skb);
+}
+
+static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
+                              struct pipe_buffer *buf)
+{
+       return 1;
+}
+
+
+/* Pipe buffer operations for a socket. */
+static struct pipe_buf_operations sock_pipe_buf_ops = {
+       .can_merge = 0,
+       .map = generic_pipe_buf_map,
+       .unmap = generic_pipe_buf_unmap,
+       .confirm = generic_pipe_buf_confirm,
+       .release = sock_pipe_buf_release,
+       .steal = sock_pipe_buf_steal,
+       .get = sock_pipe_buf_get,
+};
+
 /*
  *     Keep out-of-line to prevent kernel bloat.
  *     __builtin_return_address is not used because it is not always
@@ -87,9 +121,9 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 {
        printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
-                         "data:%p tail:%#lx end:%p dev:%s\n",
+                         "data:%p tail:%#lx end:%#lx dev:%s\n",
               here, skb->len, sz, skb->head, skb->data,
-              (unsigned long)skb->tail, skb->end,
+              (unsigned long)skb->tail, (unsigned long)skb->end,
               skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
@@ -106,16 +140,16 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 {
        printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
-                         "data:%p tail:%#lx end:%p dev:%s\n",
+                         "data:%p tail:%#lx end:%#lx dev:%s\n",
               here, skb->len, sz, skb->head, skb->data,
-              (unsigned long)skb->tail, skb->end,
+              (unsigned long)skb->tail, (unsigned long)skb->end,
               skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
 
 void skb_truesize_bug(struct sk_buff *skb)
 {
-       printk(KERN_ERR "SKB BUG: Invalid truesize (%u) "
+       WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
               "len=%u, sizeof(sk_buff)=%Zd\n",
               skb->truesize, skb->len, sizeof(struct sk_buff));
 }
@@ -157,20 +191,24 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        if (!skb)
                goto out;
 
-       /* Get the DATA. Size must match skb_add_mtu(). */
        size = SKB_DATA_ALIGN(size);
        data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
                        gfp_mask, node);
        if (!data)
                goto nodata;
 
-       memset(skb, 0, offsetof(struct sk_buff, truesize));
+       /*
+        * Only clear those fields we need to clear, not those that we will
+        * actually initialise below. Hence, don't put any more fields after
+        * the tail pointer in struct sk_buff!
+        */
+       memset(skb, 0, offsetof(struct sk_buff, tail));
        skb->truesize = size + sizeof(struct sk_buff);
        atomic_set(&skb->users, 1);
        skb->head = data;
        skb->data = data;
        skb_reset_tail_pointer(skb);
-       skb->end  = data + size;
+       skb->end = skb->tail + size;
        /* make sure we initialize shinfo sequentially */
        shinfo = skb_shinfo(skb);
        atomic_set(&shinfo->dataref, 1);
@@ -225,6 +263,48 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
        return skb;
 }
 
+struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
+{
+       int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+       struct page *page;
+
+       page = alloc_pages_node(node, gfp_mask, 0);
+       return page;
+}
+EXPORT_SYMBOL(__netdev_alloc_page);
+
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+               int size)
+{
+       skb_fill_page_desc(skb, i, page, off, size);
+       skb->len += size;
+       skb->data_len += size;
+       skb->truesize += size;
+}
+EXPORT_SYMBOL(skb_add_rx_frag);
+
+/**
+ *     dev_alloc_skb - allocate an skbuff for receiving
+ *     @length: length to allocate
+ *
+ *     Allocate a new &sk_buff and assign it a usage count of one. The
+ *     buffer has unspecified headroom built in. Users should allocate
+ *     the headroom they think they need without accounting for the
+ *     built in space. The built in space is used for optimisations.
+ *
+ *     %NULL is returned if there is no free memory. Although this function
+ *     allocates memory it can be called from an interrupt.
+ */
+struct sk_buff *dev_alloc_skb(unsigned int length)
+{
+       /*
+        * There is more code here than it seems:
+        * __dev_alloc_skb is an inline
+        */
+       return __dev_alloc_skb(length, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(dev_alloc_skb);
+
 static void skb_drop_list(struct sk_buff **listp)
 {
        struct sk_buff *list = *listp;
@@ -272,12 +352,11 @@ static void skb_release_data(struct sk_buff *skb)
 /*
  *     Free an skbuff by memory without cleaning the state.
  */
-void kfree_skbmem(struct sk_buff *skb)
+static void kfree_skbmem(struct sk_buff *skb)
 {
        struct sk_buff *other;
        atomic_t *fclone_ref;
 
-       skb_release_data(skb);
        switch (skb->fclone) {
        case SKB_FCLONE_UNAVAILABLE:
                kmem_cache_free(skbuff_head_cache, skb);
@@ -301,19 +380,10 @@ void kfree_skbmem(struct sk_buff *skb)
                if (atomic_dec_and_test(fclone_ref))
                        kmem_cache_free(skbuff_fclone_cache, other);
                break;
-       };
+       }
 }
 
-/**
- *     __kfree_skb - private function
- *     @skb: buffer
- *
- *     Free an sk_buff. Release anything attached to the buffer.
- *     Clean the state. This is an internal helper function. Users should
- *     always call kfree_skb
- */
-
-void __kfree_skb(struct sk_buff *skb)
+static void skb_release_head_state(struct sk_buff *skb)
 {
        dst_release(skb->dst);
 #ifdef CONFIG_XFRM
@@ -323,15 +393,13 @@ void __kfree_skb(struct sk_buff *skb)
                WARN_ON(in_irq());
                skb->destructor(skb);
        }
-#ifdef CONFIG_NETFILTER
-       nf_conntrack_put(skb->nfct);
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+       nf_conntrack_put(skb->nfct);
        nf_conntrack_put_reasm(skb->nfct_reasm);
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
        nf_bridge_put(skb->nf_bridge);
 #endif
-#endif
 /* XXX: IS this still necessary? - JHS */
 #ifdef CONFIG_NET_SCHED
        skb->tc_index = 0;
@@ -339,7 +407,27 @@ void __kfree_skb(struct sk_buff *skb)
        skb->tc_verd = 0;
 #endif
 #endif
+}
+
+/* Free everything but the sk_buff shell. */
+static void skb_release_all(struct sk_buff *skb)
+{
+       skb_release_head_state(skb);
+       skb_release_data(skb);
+}
+
+/**
+ *     __kfree_skb - private function
+ *     @skb: buffer
+ *
+ *     Free an sk_buff. Release anything attached to the buffer.
+ *     Clean the state. This is an internal helper function. Users should
+ *     always call kfree_skb
+ */
 
+void __kfree_skb(struct sk_buff *skb)
+{
+       skb_release_all(skb);
        kfree_skbmem(skb);
 }
 
@@ -362,6 +450,141 @@ void kfree_skb(struct sk_buff *skb)
 }
 
 /**
+ *     skb_recycle_check - check if skb can be reused for receive
+ *     @skb: buffer
+ *     @skb_size: minimum receive buffer size
+ *
+ *     Checks that the skb passed in is not shared or cloned, and
+ *     that it is linear and its head portion at least as large as
+ *     skb_size so that it can be recycled as a receive buffer.
+ *     If these conditions are met, this function does any necessary
+ *     reference count dropping and cleans up the skbuff as if it
+ *     just came from __alloc_skb().
+ */
+int skb_recycle_check(struct sk_buff *skb, int skb_size)
+{
+       struct skb_shared_info *shinfo;
+
+       if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+               return 0;
+
+       skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+       if (skb_end_pointer(skb) - skb->head < skb_size)
+               return 0;
+
+       if (skb_shared(skb) || skb_cloned(skb))
+               return 0;
+
+       skb_release_head_state(skb);
+       shinfo = skb_shinfo(skb);
+       atomic_set(&shinfo->dataref, 1);
+       shinfo->nr_frags = 0;
+       shinfo->gso_size = 0;
+       shinfo->gso_segs = 0;
+       shinfo->gso_type = 0;
+       shinfo->ip6_frag_id = 0;
+       shinfo->frag_list = NULL;
+
+       memset(skb, 0, offsetof(struct sk_buff, tail));
+       skb->data = skb->head + NET_SKB_PAD;
+       skb_reset_tail_pointer(skb);
+
+       return 1;
+}
+EXPORT_SYMBOL(skb_recycle_check);
+
+static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+{
+       new->tstamp             = old->tstamp;
+       new->dev                = old->dev;
+       new->transport_header   = old->transport_header;
+       new->network_header     = old->network_header;
+       new->mac_header         = old->mac_header;
+       new->dst                = dst_clone(old->dst);
+#ifdef CONFIG_XFRM
+       new->sp                 = secpath_get(old->sp);
+#endif
+       memcpy(new->cb, old->cb, sizeof(old->cb));
+       new->csum_start         = old->csum_start;
+       new->csum_offset        = old->csum_offset;
+       new->local_df           = old->local_df;
+       new->pkt_type           = old->pkt_type;
+       new->ip_summed          = old->ip_summed;
+       skb_copy_queue_mapping(new, old);
+       new->priority           = old->priority;
+#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+       new->ipvs_property      = old->ipvs_property;
+#endif
+       new->protocol           = old->protocol;
+       new->mark               = old->mark;
+       __nf_copy(new, old);
+#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
+    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+       new->nf_trace           = old->nf_trace;
+#endif
+#ifdef CONFIG_NET_SCHED
+       new->tc_index           = old->tc_index;
+#ifdef CONFIG_NET_CLS_ACT
+       new->tc_verd            = old->tc_verd;
+#endif
+#endif
+       new->vlan_tci           = old->vlan_tci;
+
+       skb_copy_secmark(new, old);
+}
+
+static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
+{
+#define C(x) n->x = skb->x
+
+       n->next = n->prev = NULL;
+       n->sk = NULL;
+       __copy_skb_header(n, skb);
+
+       C(len);
+       C(data_len);
+       C(mac_len);
+       n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
+       n->cloned = 1;
+       n->nohdr = 0;
+       n->destructor = NULL;
+       C(iif);
+       C(tail);
+       C(end);
+       C(head);
+       C(data);
+       C(truesize);
+#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
+       C(do_not_encrypt);
+       C(requeue);
+#endif
+       atomic_set(&n->users, 1);
+
+       atomic_inc(&(skb_shinfo(skb)->dataref));
+       skb->cloned = 1;
+
+       return n;
+#undef C
+}
+
+/**
+ *     skb_morph       -       morph one skb into another
+ *     @dst: the skb to receive the contents
+ *     @src: the skb to supply the contents
+ *
+ *     This is identical to skb_clone except that the target skb is
+ *     supplied by the user.
+ *
+ *     The target skb is returned upon exit.
+ */
+struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
+{
+       skb_release_all(dst);
+       return __skb_clone(dst, src);
+}
+EXPORT_SYMBOL_GPL(skb_morph);
+
+/**
  *     skb_clone       -       duplicate an sk_buff
  *     @skb: buffer to clone
  *     @gfp_mask: allocation priority
@@ -392,60 +615,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
                n->fclone = SKB_FCLONE_UNAVAILABLE;
        }
 
-#define C(x) n->x = skb->x
-
-       n->next = n->prev = NULL;
-       n->sk = NULL;
-       C(tstamp);
-       C(dev);
-       C(transport_header);
-       C(network_header);
-       C(mac_header);
-       C(dst);
-       dst_clone(skb->dst);
-       C(sp);
-#ifdef CONFIG_INET
-       secpath_get(skb->sp);
-#endif
-       memcpy(n->cb, skb->cb, sizeof(skb->cb));
-       C(len);
-       C(data_len);
-       C(mac_len);
-       C(csum);
-       C(local_df);
-       n->cloned = 1;
-       n->nohdr = 0;
-       C(pkt_type);
-       C(ip_summed);
-       C(priority);
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
-       C(ipvs_property);
-#endif
-       C(protocol);
-       n->destructor = NULL;
-       C(mark);
-       __nf_copy(n, skb);
-#ifdef CONFIG_NET_SCHED
-       C(tc_index);
-#ifdef CONFIG_NET_CLS_ACT
-       n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
-       n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
-       n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
-       C(iif);
-#endif
-       skb_copy_secmark(n, skb);
-#endif
-       C(truesize);
-       atomic_set(&n->users, 1);
-       C(head);
-       C(data);
-       C(tail);
-       C(end);
-
-       atomic_inc(&(skb_shinfo(skb)->dataref));
-       skb->cloned = 1;
-
-       return n;
+       return __skb_clone(n, skb);
 }
 
 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
@@ -456,42 +626,15 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
         */
        unsigned long offset = new->data - old->data;
 #endif
-       new->sk         = NULL;
-       new->dev        = old->dev;
-       new->priority   = old->priority;
-       new->protocol   = old->protocol;
-       new->dst        = dst_clone(old->dst);
-#ifdef CONFIG_INET
-       new->sp         = secpath_get(old->sp);
-#endif
-       new->transport_header = old->transport_header;
-       new->network_header   = old->network_header;
-       new->mac_header       = old->mac_header;
+
+       __copy_skb_header(new, old);
+
 #ifndef NET_SKBUFF_DATA_USES_OFFSET
        /* {transport,network,mac}_header are relative to skb->head */
        new->transport_header += offset;
        new->network_header   += offset;
        new->mac_header       += offset;
 #endif
-       memcpy(new->cb, old->cb, sizeof(old->cb));
-       new->local_df   = old->local_df;
-       new->fclone     = SKB_FCLONE_UNAVAILABLE;
-       new->pkt_type   = old->pkt_type;
-       new->tstamp     = old->tstamp;
-       new->destructor = NULL;
-       new->mark       = old->mark;
-       __nf_copy(new, old);
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
-       new->ipvs_property = old->ipvs_property;
-#endif
-#ifdef CONFIG_NET_SCHED
-#ifdef CONFIG_NET_CLS_ACT
-       new->tc_verd = old->tc_verd;
-#endif
-       new->tc_index   = old->tc_index;
-#endif
-       skb_copy_secmark(new, old);
-       atomic_set(&new->users, 1);
        skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
        skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
        skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
@@ -520,8 +663,12 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
        /*
         *      Allocate the copy buffer
         */
-       struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
-                                     gfp_mask);
+       struct sk_buff *n;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       n = alloc_skb(skb->end + skb->data_len, gfp_mask);
+#else
+       n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
+#endif
        if (!n)
                return NULL;
 
@@ -529,8 +676,6 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
        skb_reserve(n, headerlen);
        /* Set the tail pointer and length */
        skb_put(n, skb->len);
-       n->csum      = skb->csum;
-       n->ip_summed = skb->ip_summed;
 
        if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
                BUG();
@@ -558,8 +703,12 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
        /*
         *      Allocate the copy buffer
         */
-       struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
-
+       struct sk_buff *n;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       n = alloc_skb(skb->end, gfp_mask);
+#else
+       n = alloc_skb(skb->end - skb->head, gfp_mask);
+#endif
        if (!n)
                goto out;
 
@@ -568,9 +717,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
        /* Set the tail pointer and length */
        skb_put(n, skb_headlen(skb));
        /* Copy the bytes */
-       memcpy(n->data, skb->data, n->len);
-       n->csum      = skb->csum;
-       n->ip_summed = skb->ip_summed;
+       skb_copy_from_linear_data(skb, n->data, n->len);
 
        n->truesize += skb->data_len;
        n->data_len  = skb->data_len;
@@ -617,9 +764,15 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 {
        int i;
        u8 *data;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       int size = nhead + skb->end + ntail;
+#else
        int size = nhead + (skb->end - skb->head) + ntail;
+#endif
        long off;
 
+       BUG_ON(nhead < 0);
+
        if (skb_shared(skb))
                BUG();
 
@@ -631,13 +784,13 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 
        /* Copy only real data... and, alas, header. This should be
         * optimized for the cases when header is void. */
-       memcpy(data + nhead, skb->head,
-               skb->tail
-#ifndef NET_SKBUFF_DATA_USES_OFFSET
-               - skb->head
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       memcpy(data + nhead, skb->head, skb->tail);
+#else
+       memcpy(data + nhead, skb->head, skb->tail - skb->head);
 #endif
-               );
-       memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
+       memcpy(data + size, skb_end_pointer(skb),
+              sizeof(struct skb_shared_info));
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                get_page(skb_shinfo(skb)->frags[i].page);
@@ -650,16 +803,21 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
        off = (data + nhead) - skb->head;
 
        skb->head     = data;
-       skb->end      = data + size;
        skb->data    += off;
-#ifndef NET_SKBUFF_DATA_USES_OFFSET
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       skb->end      = size;
+       off           = nhead;
+#else
+       skb->end      = skb->head + size;
+#endif
        /* {transport,network,mac}_header and tail are relative to skb->head */
        skb->tail             += off;
        skb->transport_header += off;
        skb->network_header   += off;
        skb->mac_header       += off;
-#endif
+       skb->csum_start       += nhead;
        skb->cloned   = 0;
+       skb->hdr_len  = 0;
        skb->nohdr    = 0;
        atomic_set(&skb_shinfo(skb)->dataref, 1);
        return 0;
@@ -706,9 +864,6 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
  *
  *     You must pass %GFP_ATOMIC as the allocation priority if this function
  *     is called from an interrupt.
- *
- *     BUG ALERT: ip_summed is not copied. Why does this work? Is it used
- *     only by netfilter in the cases when checksum is recalculated? --ANK
  */
 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
                                int newheadroom, int newtailroom,
@@ -719,7 +874,9 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
         */
        struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
                                      gfp_mask);
+       int oldheadroom = skb_headroom(skb);
        int head_copy_len, head_copy_off;
+       int off;
 
        if (!n)
                return NULL;
@@ -729,7 +886,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
        /* Set the tail pointer and length */
        skb_put(n, skb->len);
 
-       head_copy_len = skb_headroom(skb);
+       head_copy_len = oldheadroom;
        head_copy_off = 0;
        if (newheadroom <= head_copy_len)
                head_copy_len = newheadroom;
@@ -743,6 +900,14 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 
        copy_skb_header(n, skb);
 
+       off                  = newheadroom - oldheadroom;
+       n->csum_start       += off;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       n->transport_header += off;
+       n->network_header   += off;
+       n->mac_header       += off;
+#endif
+
        return n;
 }
 
@@ -769,7 +934,7 @@ int skb_pad(struct sk_buff *skb, int pad)
                return 0;
        }
 
-       ntail = skb->data_len + pad - (skb->end - skb_tail_pointer(skb));
+       ntail = skb->data_len + pad - (skb->end - skb->tail);
        if (likely(skb_cloned(skb) || ntail > 0)) {
                err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
                if (unlikely(err))
@@ -791,6 +956,78 @@ free_skb:
        return err;
 }
 
+/**
+ *     skb_put - add data to a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to add
+ *
+ *     This function extends the used data area of the buffer. If this would
+ *     exceed the total buffer size the kernel will panic. A pointer to the
+ *     first byte of the extra data is returned.
+ */
+unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+{
+       unsigned char *tmp = skb_tail_pointer(skb);
+       SKB_LINEAR_ASSERT(skb);
+       skb->tail += len;
+       skb->len  += len;
+       if (unlikely(skb->tail > skb->end))
+               skb_over_panic(skb, len, __builtin_return_address(0));
+       return tmp;
+}
+EXPORT_SYMBOL(skb_put);
+
+/**
+ *     skb_push - add data to the start of a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to add
+ *
+ *     This function extends the used data area of the buffer at the buffer
+ *     start. If this would exceed the total buffer headroom the kernel will
+ *     panic. A pointer to the first byte of the extra data is returned.
+ */
+unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
+{
+       skb->data -= len;
+       skb->len  += len;
+       if (unlikely(skb->data<skb->head))
+               skb_under_panic(skb, len, __builtin_return_address(0));
+       return skb->data;
+}
+EXPORT_SYMBOL(skb_push);
+
+/**
+ *     skb_pull - remove data from the start of a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to remove
+ *
+ *     This function removes data from the start of a buffer, returning
+ *     the memory to the headroom. A pointer to the next data in the buffer
+ *     is returned. Once the data has been pulled future pushes will overwrite
+ *     the old data.
+ */
+unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
+{
+       return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+}
+EXPORT_SYMBOL(skb_pull);
+
+/**
+ *     skb_trim - remove end from a buffer
+ *     @skb: buffer to alter
+ *     @len: new length
+ *
+ *     Cut the length of a buffer down by removing data from the tail. If
+ *     the buffer is already under the length specified it is not modified.
+ *     The skb must be linear.
+ */
+void skb_trim(struct sk_buff *skb, unsigned int len)
+{
+       if (skb->len > len)
+               __skb_trim(skb, len);
+}
+EXPORT_SYMBOL(skb_trim);
+
 /* Trims skb to length len. It can change skb pointers.
  */
 
@@ -907,7 +1144,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
         * plus 128 bytes for future expansions. If we have enough
         * room at tail, reallocate without expansion only if skb is cloned.
         */
-       int i, k, eat = (skb_tail_pointer(skb) + delta) - skb->end;
+       int i, k, eat = (skb->tail + delta) - skb->end;
 
        if (eat > 0 || skb_cloned(skb)) {
                if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
@@ -1028,7 +1265,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
        if ((copy = start - offset) > 0) {
                if (copy > len)
                        copy = len;
-               memcpy(to, skb->data + offset, copy);
+               skb_copy_from_linear_data_offset(skb, offset, to, copy);
                if ((len -= copy) == 0)
                        return 0;
                offset += copy;
@@ -1038,7 +1275,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                int end;
 
-               BUG_TRAP(start <= offset + len);
+               WARN_ON(start > offset + len);
 
                end = start + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
@@ -1067,7 +1304,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
                for (; list; list = list->next) {
                        int end;
 
-                       BUG_TRAP(start <= offset + len);
+                       WARN_ON(start > offset + len);
 
                        end = start + list->len;
                        if ((copy = end - offset) > 0) {
@@ -1091,6 +1328,194 @@ fault:
        return -EFAULT;
 }
 
+/*
+ * Callback from splice_to_pipe(), if we need to release some pages
+ * at the end of the spd in case we error'ed out in filling the pipe.
+ */
+static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
+{
+       struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private;
+
+       kfree_skb(skb);
+}
+
+/*
+ * Fill page/offset/length into spd, if it can hold more pages.
+ */
+static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
+                               unsigned int len, unsigned int offset,
+                               struct sk_buff *skb)
+{
+       if (unlikely(spd->nr_pages == PIPE_BUFFERS))
+               return 1;
+
+       spd->pages[spd->nr_pages] = page;
+       spd->partial[spd->nr_pages].len = len;
+       spd->partial[spd->nr_pages].offset = offset;
+       spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
+       spd->nr_pages++;
+       return 0;
+}
+
+static inline void __segment_seek(struct page **page, unsigned int *poff,
+                                 unsigned int *plen, unsigned int off)
+{
+       *poff += off;
+       *page += *poff / PAGE_SIZE;
+       *poff = *poff % PAGE_SIZE;
+       *plen -= off;
+}
+
+static inline int __splice_segment(struct page *page, unsigned int poff,
+                                  unsigned int plen, unsigned int *off,
+                                  unsigned int *len, struct sk_buff *skb,
+                                  struct splice_pipe_desc *spd)
+{
+       if (!*len)
+               return 1;
+
+       /* skip this segment if already processed */
+       if (*off >= plen) {
+               *off -= plen;
+               return 0;
+       }
+
+       /* ignore any bits we already processed */
+       if (*off) {
+               __segment_seek(&page, &poff, &plen, *off);
+               *off = 0;
+       }
+
+       do {
+               unsigned int flen = min(*len, plen);
+
+               /* the linear region may spread across several pages  */
+               flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
+
+               if (spd_fill_page(spd, page, flen, poff, skb))
+                       return 1;
+
+               __segment_seek(&page, &poff, &plen, flen);
+               *len -= flen;
+
+       } while (*len && plen);
+
+       return 0;
+}
+
+/*
+ * Map linear and fragment data from the skb to spd. It reports failure if the
+ * pipe is full or if we already spliced the requested length.
+ */
+static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
+                     unsigned int *len,
+                     struct splice_pipe_desc *spd)
+{
+       int seg;
+
+       /*
+        * map the linear part
+        */
+       if (__splice_segment(virt_to_page(skb->data),
+                            (unsigned long) skb->data & (PAGE_SIZE - 1),
+                            skb_headlen(skb),
+                            offset, len, skb, spd))
+               return 1;
+
+       /*
+        * then map the fragments
+        */
+       for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
+               const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
+
+               if (__splice_segment(f->page, f->page_offset, f->size,
+                                    offset, len, skb, spd))
+                       return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Map data from the skb to a pipe. Should handle both the linear part,
+ * the fragments, and the frag list. It does NOT handle frag lists within
+ * the frag list, if such a thing exists. We'd probably need to recurse to
+ * handle that cleanly.
+ */
+int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
+                   struct pipe_inode_info *pipe, unsigned int tlen,
+                   unsigned int flags)
+{
+       struct partial_page partial[PIPE_BUFFERS];
+       struct page *pages[PIPE_BUFFERS];
+       struct splice_pipe_desc spd = {
+               .pages = pages,
+               .partial = partial,
+               .flags = flags,
+               .ops = &sock_pipe_buf_ops,
+               .spd_release = sock_spd_release,
+       };
+       struct sk_buff *skb;
+
+       /*
+        * I'd love to avoid the clone here, but tcp_read_sock()
+        * ignores reference counts and unconditonally kills the sk_buff
+        * on return from the actor.
+        */
+       skb = skb_clone(__skb, GFP_KERNEL);
+       if (unlikely(!skb))
+               return -ENOMEM;
+
+       /*
+        * __skb_splice_bits() only fails if the output has no room left,
+        * so no point in going over the frag_list for the error case.
+        */
+       if (__skb_splice_bits(skb, &offset, &tlen, &spd))
+               goto done;
+       else if (!tlen)
+               goto done;
+
+       /*
+        * now see if we have a frag_list to map
+        */
+       if (skb_shinfo(skb)->frag_list) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+               for (; list && tlen; list = list->next) {
+                       if (__skb_splice_bits(list, &offset, &tlen, &spd))
+                               break;
+               }
+       }
+
+done:
+       /*
+        * drop our reference to the clone, the pipe consumption will
+        * drop the rest.
+        */
+       kfree_skb(skb);
+
+       if (spd.nr_pages) {
+               int ret;
+               struct sock *sk = __skb->sk;
+
+               /*
+                * Drop the socket lock, otherwise we have reverse
+                * locking dependencies between sk_lock and i_mutex
+                * here as compared to sendfile(). We enter here
+                * with the socket lock held, and splice_to_pipe() will
+                * grab the pipe inode lock. For sendfile() emulation,
+                * we call into ->sendpage() with the i_mutex lock held
+                * and networking will grab the socket lock.
+                */
+               release_sock(sk);
+               ret = splice_to_pipe(pipe, &spd);
+               lock_sock(sk);
+               return ret;
+       }
+
+       return 0;
+}
+
 /**
  *     skb_store_bits - store bits from kernel buffer to skb
  *     @skb: destination buffer
@@ -1103,7 +1528,7 @@ fault:
  *     traversing fragment lists and such.
  */
 
-int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
+int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
 {
        int i, copy;
        int start = skb_headlen(skb);
@@ -1114,7 +1539,7 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
        if ((copy = start - offset) > 0) {
                if (copy > len)
                        copy = len;
-               memcpy(skb->data + offset, from, copy);
+               skb_copy_to_linear_data_offset(skb, offset, from, copy);
                if ((len -= copy) == 0)
                        return 0;
                offset += copy;
@@ -1125,7 +1550,7 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                int end;
 
-               BUG_TRAP(start <= offset + len);
+               WARN_ON(start > offset + len);
 
                end = start + frag->size;
                if ((copy = end - offset) > 0) {
@@ -1153,7 +1578,7 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
                for (; list; list = list->next) {
                        int end;
 
-                       BUG_TRAP(start <= offset + len);
+                       WARN_ON(start > offset + len);
 
                        end = start + list->len;
                        if ((copy = end - offset) > 0) {
@@ -1202,7 +1627,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                int end;
 
-               BUG_TRAP(start <= offset + len);
+               WARN_ON(start > offset + len);
 
                end = start + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
@@ -1231,7 +1656,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
                for (; list; list = list->next) {
                        int end;
 
-                       BUG_TRAP(start <= offset + len);
+                       WARN_ON(start > offset + len);
 
                        end = start + list->len;
                        if ((copy = end - offset) > 0) {
@@ -1279,7 +1704,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                int end;
 
-               BUG_TRAP(start <= offset + len);
+               WARN_ON(start > offset + len);
 
                end = start + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
@@ -1312,7 +1737,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
                        __wsum csum2;
                        int end;
 
-                       BUG_TRAP(start <= offset + len);
+                       WARN_ON(start > offset + len);
 
                        end = start + list->len;
                        if ((copy = end - offset) > 0) {
@@ -1341,13 +1766,13 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
        long csstart;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL)
-               csstart = skb_transport_offset(skb);
+               csstart = skb->csum_start - skb_headroom(skb);
        else
                csstart = skb_headlen(skb);
 
        BUG_ON(csstart > skb_headlen(skb));
 
-       memcpy(to, skb->data, csstart);
+       skb_copy_from_linear_data(skb, to, csstart);
 
        csum = 0;
        if (csstart != skb->len)
@@ -1489,7 +1914,7 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
        unsigned long flags;
 
        spin_lock_irqsave(&list->lock, flags);
-       __skb_append(old, newsk, list);
+       __skb_queue_after(list, old, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
 
@@ -1515,27 +1940,14 @@ void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
        spin_unlock_irqrestore(&list->lock, flags);
 }
 
-#if 0
-/*
- *     Tune the memory allocator for a new MTU size.
- */
-void skb_add_mtu(int mtu)
-{
-       /* Must match allocation in alloc_skb */
-       mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
-
-       kmem_add_cache_size(mtu);
-}
-#endif
-
 static inline void skb_split_inside_header(struct sk_buff *skb,
                                           struct sk_buff* skb1,
                                           const u32 len, const int pos)
 {
        int i;
 
-       memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
-
+       skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
+                                        pos - len);
        /* And move data appendix as is. */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -1606,6 +2018,148 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
                skb_split_no_header(skb, skb1, len, pos);
 }
 
+/* Shifting from/to a cloned skb is a no-go.
+ *
+ * Caller cannot keep skb_shinfo related pointers past calling here!
+ */
+static int skb_prepare_for_shift(struct sk_buff *skb)
+{
+       return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
+/**
+ * skb_shift - Shifts paged data partially from skb to another
+ * @tgt: buffer into which tail data gets added
+ * @skb: buffer from which the paged data comes from
+ * @shiftlen: shift up to this many bytes
+ *
+ * Attempts to shift up to shiftlen worth of bytes, which may be less than
+ * the length of the skb, from tgt to skb. Returns number bytes shifted.
+ * It's up to caller to free skb if everything was shifted.
+ *
+ * If @tgt runs out of frags, the whole operation is aborted.
+ *
+ * Skb cannot include anything else but paged data while tgt is allowed
+ * to have non-paged data as well.
+ *
+ * TODO: full sized shift could be optimized but that would need
+ * specialized skb free'er to handle frags without up-to-date nr_frags.
+ */
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
+{
+       int from, to, merge, todo;
+       struct skb_frag_struct *fragfrom, *fragto;
+
+       BUG_ON(shiftlen > skb->len);
+       BUG_ON(skb_headlen(skb));       /* Would corrupt stream */
+
+       todo = shiftlen;
+       from = 0;
+       to = skb_shinfo(tgt)->nr_frags;
+       fragfrom = &skb_shinfo(skb)->frags[from];
+
+       /* Actual merge is delayed until the point when we know we can
+        * commit all, so that we don't have to undo partial changes
+        */
+       if (!to ||
+           !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
+               merge = -1;
+       } else {
+               merge = to - 1;
+
+               todo -= fragfrom->size;
+               if (todo < 0) {
+                       if (skb_prepare_for_shift(skb) ||
+                           skb_prepare_for_shift(tgt))
+                               return 0;
+
+                       /* All previous frag pointers might be stale! */
+                       fragfrom = &skb_shinfo(skb)->frags[from];
+                       fragto = &skb_shinfo(tgt)->frags[merge];
+
+                       fragto->size += shiftlen;
+                       fragfrom->size -= shiftlen;
+                       fragfrom->page_offset += shiftlen;
+
+                       goto onlymerged;
+               }
+
+               from++;
+       }
+
+       /* Skip full, not-fitting skb to avoid expensive operations */
+       if ((shiftlen == skb->len) &&
+           (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
+               return 0;
+
+       if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
+               return 0;
+
+       while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
+               if (to == MAX_SKB_FRAGS)
+                       return 0;
+
+               fragfrom = &skb_shinfo(skb)->frags[from];
+               fragto = &skb_shinfo(tgt)->frags[to];
+
+               if (todo >= fragfrom->size) {
+                       *fragto = *fragfrom;
+                       todo -= fragfrom->size;
+                       from++;
+                       to++;
+
+               } else {
+                       get_page(fragfrom->page);
+                       fragto->page = fragfrom->page;
+                       fragto->page_offset = fragfrom->page_offset;
+                       fragto->size = todo;
+
+                       fragfrom->page_offset += todo;
+                       fragfrom->size -= todo;
+                       todo = 0;
+
+                       to++;
+                       break;
+               }
+       }
+
+       /* Ready to "commit" this state change to tgt */
+       skb_shinfo(tgt)->nr_frags = to;
+
+       if (merge >= 0) {
+               fragfrom = &skb_shinfo(skb)->frags[0];
+               fragto = &skb_shinfo(tgt)->frags[merge];
+
+               fragto->size += fragfrom->size;
+               put_page(fragfrom->page);
+       }
+
+       /* Reposition in the original skb */
+       to = 0;
+       while (from < skb_shinfo(skb)->nr_frags)
+               skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
+       skb_shinfo(skb)->nr_frags = to;
+
+       BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
+
+onlymerged:
+       /* Most likely the tgt won't ever need its checksum anymore, skb on
+        * the other hand might need it if it needs to be resent
+        */
+       tgt->ip_summed = CHECKSUM_PARTIAL;
+       skb->ip_summed = CHECKSUM_PARTIAL;
+
+       /* Yak, is it really working this way? Some helper please? */
+       skb->len -= shiftlen;
+       skb->data_len -= shiftlen;
+       skb->truesize -= shiftlen;
+       tgt->len += shiftlen;
+       tgt->data_len += shiftlen;
+       tgt->truesize += shiftlen;
+
+       return shiftlen;
+}
+
 /**
  * skb_prepare_seq_read - Prepare a sequential read of skb data
  * @skb: the buffer to read
@@ -1643,11 +2197,11 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
  * of bytes already consumed and the next call to
  * skb_seq_read() will return the remaining part of the block.
  *
- * Note: The size of each block of data returned can be arbitary,
+ * Note 1: The size of each block of data returned can be arbitary,
  *       this limitation is the cost for zerocopy seqeuental
  *       reads of potentially non linear data.
  *
- * Note: Fragment lists within fragments are not implemented
+ * Note 2: Fragment lists within fragments are not implemented
  *       at the moment, state->root_skb could be replaced with
  *       a stack for this purpose.
  */
@@ -1694,6 +2248,11 @@ next_skb:
                st->stepped_offset += frag->size;
        }
 
+       if (st->frag_data) {
+               kunmap_skb_frag(st->frag_data);
+               st->frag_data = NULL;
+       }
+
        if (st->cur_skb->next) {
                st->cur_skb = st->cur_skb->next;
                st->frag_idx = 0;
@@ -1837,11 +2396,10 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 /**
  *     skb_pull_rcsum - pull skb and update receive checksum
  *     @skb: buffer to update
- *     @start: start of data before pull
  *     @len: length of data pulled
  *
  *     This function performs an skb_pull on the packet and updates
- *     update the CHECKSUM_COMPLETE checksum.  It should be used on
+ *     the CHECKSUM_COMPLETE checksum.  It should be used on
  *     receive path processing instead of skb_pull unless you know
  *     that the checksum difference is zero (e.g., a valid IP header)
  *     or you are setting ip_summed to CHECKSUM_NONE.
@@ -1863,13 +2421,14 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
  *     @features: features for the output path (see dev->features)
  *
  *     This function performs segmentation on the given skb.  It returns
- *     the segment at the given position.  It returns NULL if there are
- *     no more segments to generate, or when an error is encountered.
+ *     a pointer to the first in a list of new skbs for the segments.
+ *     In case of error it returns ERR_PTR(err).
  */
 struct sk_buff *skb_segment(struct sk_buff *skb, int features)
 {
        struct sk_buff *segs = NULL;
        struct sk_buff *tail = NULL;
+       struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
        unsigned int mss = skb_shinfo(skb)->gso_size;
        unsigned int doffset = skb->data - skb_mac_header(skb);
        unsigned int offset = doffset;
@@ -1889,7 +2448,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                struct sk_buff *nskb;
                skb_frag_t *frag;
                int hsize;
-               int k;
                int size;
 
                len = skb->len - offset;
@@ -1902,9 +2460,36 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                if (hsize > len || !sg)
                        hsize = len;
 
-               nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
-               if (unlikely(!nskb))
-                       goto err;
+               if (!hsize && i >= nfrags) {
+                       BUG_ON(fskb->len != len);
+
+                       pos += len;
+                       nskb = skb_clone(fskb, GFP_ATOMIC);
+                       fskb = fskb->next;
+
+                       if (unlikely(!nskb))
+                               goto err;
+
+                       hsize = skb_end_pointer(nskb) - nskb->head;
+                       if (skb_cow_head(nskb, doffset + headroom)) {
+                               kfree_skb(nskb);
+                               goto err;
+                       }
+
+                       nskb->truesize += skb_end_pointer(nskb) - nskb->head -
+                                         hsize;
+                       skb_release_head_state(nskb);
+                       __skb_push(nskb, doffset);
+               } else {
+                       nskb = alloc_skb(hsize + doffset + headroom,
+                                        GFP_ATOMIC);
+
+                       if (unlikely(!nskb))
+                               goto err;
+
+                       skb_reserve(nskb, headroom);
+                       __skb_put(nskb, doffset);
+               }
 
                if (segs)
                        tail->next = nskb;
@@ -1912,22 +2497,20 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                        segs = nskb;
                tail = nskb;
 
-               nskb->dev = skb->dev;
-               nskb->priority = skb->priority;
-               nskb->protocol = skb->protocol;
-               nskb->dst = dst_clone(skb->dst);
-               memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
-               nskb->pkt_type = skb->pkt_type;
+               __copy_skb_header(nskb, skb);
                nskb->mac_len = skb->mac_len;
 
-               skb_reserve(nskb, headroom);
                skb_reset_mac_header(nskb);
                skb_set_network_header(nskb, skb->mac_len);
                nskb->transport_header = (nskb->network_header +
                                          skb_network_header_len(skb));
-               memcpy(skb_put(nskb, doffset), skb->data, doffset);
+               skb_copy_from_linear_data(skb, nskb->data, doffset);
+
+               if (pos >= offset + len)
+                       continue;
 
                if (!sg) {
+                       nskb->ip_summed = CHECKSUM_NONE;
                        nskb->csum = skb_copy_and_csum_bits(skb, offset,
                                                            skb_put(nskb, len),
                                                            len, 0);
@@ -1935,15 +2518,11 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                }
 
                frag = skb_shinfo(nskb)->frags;
-               k = 0;
-
-               nskb->ip_summed = CHECKSUM_PARTIAL;
-               nskb->csum = skb->csum;
-               memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
 
-               while (pos < offset + len) {
-                       BUG_ON(i >= nfrags);
+               skb_copy_from_linear_data_offset(skb, offset,
+                                                skb_put(nskb, hsize), hsize);
 
+               while (pos < offset + len && i < nfrags) {
                        *frag = skb_shinfo(skb)->frags[i];
                        get_page(frag->page);
                        size = frag->size;
@@ -1953,20 +2532,39 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                                frag->size -= offset - pos;
                        }
 
-                       k++;
+                       skb_shinfo(nskb)->nr_frags++;
 
                        if (pos + size <= offset + len) {
                                i++;
                                pos += size;
                        } else {
                                frag->size -= pos + size - (offset + len);
-                               break;
+                               goto skip_fraglist;
                        }
 
                        frag++;
                }
 
-               skb_shinfo(nskb)->nr_frags = k;
+               if (pos < offset + len) {
+                       struct sk_buff *fskb2 = fskb;
+
+                       BUG_ON(pos + fskb->len != offset + len);
+
+                       pos += fskb->len;
+                       fskb = fskb->next;
+
+                       if (fskb2->next) {
+                               fskb2 = skb_clone(fskb2, GFP_ATOMIC);
+                               if (!fskb2)
+                                       goto err;
+                       } else
+                               skb_get(fskb2);
+
+                       BUG_ON(skb_shinfo(nskb)->frag_list);
+                       skb_shinfo(nskb)->frag_list = fskb2;
+               }
+
+skip_fraglist:
                nskb->data_len = len - hsize;
                nskb->len += nskb->data_len;
                nskb->truesize += nskb->data_len;
@@ -1984,19 +2582,304 @@ err:
 
 EXPORT_SYMBOL_GPL(skb_segment);
 
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+       struct sk_buff *p = *head;
+       struct sk_buff *nskb;
+       unsigned int headroom;
+       unsigned int hlen = p->data - skb_mac_header(p);
+
+       if (hlen + p->len + skb->len >= 65536)
+               return -E2BIG;
+
+       if (skb_shinfo(p)->frag_list)
+               goto merge;
+
+       headroom = skb_headroom(p);
+       nskb = netdev_alloc_skb(p->dev, headroom);
+       if (unlikely(!nskb))
+               return -ENOMEM;
+
+       __copy_skb_header(nskb, p);
+       nskb->mac_len = p->mac_len;
+
+       skb_reserve(nskb, headroom);
+
+       skb_set_mac_header(nskb, -hlen);
+       skb_set_network_header(nskb, skb_network_offset(p));
+       skb_set_transport_header(nskb, skb_transport_offset(p));
+
+       memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen);
+
+       *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
+       skb_shinfo(nskb)->frag_list = p;
+       skb_header_release(p);
+       nskb->prev = p;
+
+       nskb->data_len += p->len;
+       nskb->truesize += p->len;
+       nskb->len += p->len;
+
+       *head = nskb;
+       nskb->next = p->next;
+       p->next = NULL;
+
+       p = nskb;
+
+merge:
+       NAPI_GRO_CB(p)->count++;
+       p->prev->next = skb;
+       p->prev = skb;
+       skb_header_release(skb);
+
+       p->data_len += skb->len;
+       p->truesize += skb->len;
+       p->len += skb->len;
+
+       NAPI_GRO_CB(skb)->same_flow = 1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(skb_gro_receive);
+
 void __init skb_init(void)
 {
        skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
                                              sizeof(struct sk_buff),
                                              0,
                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-                                             NULL, NULL);
+                                             NULL);
        skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
                                                (2*sizeof(struct sk_buff)) +
                                                sizeof(atomic_t),
                                                0,
                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-                                               NULL, NULL);
+                                               NULL);
+}
+
+/**
+ *     skb_to_sgvec - Fill a scatter-gather list from a socket buffer
+ *     @skb: Socket buffer containing the buffers to be mapped
+ *     @sg: The scatter-gather list to map into
+ *     @offset: The offset into the buffer's contents to start mapping
+ *     @len: Length of buffer space to be mapped
+ *
+ *     Fill the specified scatter-gather list with mappings/pointers into a
+ *     region of the buffer space attached to a socket buffer.
+ */
+static int
+__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+       int start = skb_headlen(skb);
+       int i, copy = start - offset;
+       int elt = 0;
+
+       if (copy > 0) {
+               if (copy > len)
+                       copy = len;
+               sg_set_buf(sg, skb->data + offset, copy);
+               elt++;
+               if ((len -= copy) == 0)
+                       return elt;
+               offset += copy;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               int end;
+
+               WARN_ON(start > offset + len);
+
+               end = start + skb_shinfo(skb)->frags[i].size;
+               if ((copy = end - offset) > 0) {
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+                       if (copy > len)
+                               copy = len;
+                       sg_set_page(&sg[elt], frag->page, copy,
+                                       frag->page_offset+offset-start);
+                       elt++;
+                       if (!(len -= copy))
+                               return elt;
+                       offset += copy;
+               }
+               start = end;
+       }
+
+       if (skb_shinfo(skb)->frag_list) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+               for (; list; list = list->next) {
+                       int end;
+
+                       WARN_ON(start > offset + len);
+
+                       end = start + list->len;
+                       if ((copy = end - offset) > 0) {
+                               if (copy > len)
+                                       copy = len;
+                               elt += __skb_to_sgvec(list, sg+elt, offset - start,
+                                                     copy);
+                               if ((len -= copy) == 0)
+                                       return elt;
+                               offset += copy;
+                       }
+                       start = end;
+               }
+       }
+       BUG_ON(len);
+       return elt;
+}
+
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+       int nsg = __skb_to_sgvec(skb, sg, offset, len);
+
+       sg_mark_end(&sg[nsg - 1]);
+
+       return nsg;
+}
+
+/**
+ *     skb_cow_data - Check that a socket buffer's data buffers are writable
+ *     @skb: The socket buffer to check.
+ *     @tailbits: Amount of trailing space to be added
+ *     @trailer: Returned pointer to the skb where the @tailbits space begins
+ *
+ *     Make sure that the data buffers attached to a socket buffer are
+ *     writable. If they are not, private copies are made of the data buffers
+ *     and the socket buffer is set to use these instead.
+ *
+ *     If @tailbits is given, make sure that there is space to write @tailbits
+ *     bytes of data beyond current end of socket buffer.  @trailer will be
+ *     set to point to the skb in which this space begins.
+ *
+ *     The number of scatterlist elements required to completely map the
+ *     COW'd and extended socket buffer will be returned.
+ */
+int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
+{
+       int copyflag;
+       int elt;
+       struct sk_buff *skb1, **skb_p;
+
+       /* If skb is cloned or its head is paged, reallocate
+        * head pulling out all the pages (pages are considered not writable
+        * at the moment even if they are anonymous).
+        */
+       if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
+           __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
+               return -ENOMEM;
+
+       /* Easy case. Most of packets will go this way. */
+       if (!skb_shinfo(skb)->frag_list) {
+               /* A little of trouble, not enough of space for trailer.
+                * This should not happen, when stack is tuned to generate
+                * good frames. OK, on miss we reallocate and reserve even more
+                * space, 128 bytes is fair. */
+
+               if (skb_tailroom(skb) < tailbits &&
+                   pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
+                       return -ENOMEM;
+
+               /* Voila! */
+               *trailer = skb;
+               return 1;
+       }
+
+       /* Misery. We are in troubles, going to mincer fragments... */
+
+       elt = 1;
+       skb_p = &skb_shinfo(skb)->frag_list;
+       copyflag = 0;
+
+       while ((skb1 = *skb_p) != NULL) {
+               int ntail = 0;
+
+               /* The fragment is partially pulled by someone,
+                * this can happen on input. Copy it and everything
+                * after it. */
+
+               if (skb_shared(skb1))
+                       copyflag = 1;
+
+               /* If the skb is the last, worry about trailer. */
+
+               if (skb1->next == NULL && tailbits) {
+                       if (skb_shinfo(skb1)->nr_frags ||
+                           skb_shinfo(skb1)->frag_list ||
+                           skb_tailroom(skb1) < tailbits)
+                               ntail = tailbits + 128;
+               }
+
+               if (copyflag ||
+                   skb_cloned(skb1) ||
+                   ntail ||
+                   skb_shinfo(skb1)->nr_frags ||
+                   skb_shinfo(skb1)->frag_list) {
+                       struct sk_buff *skb2;
+
+                       /* Fuck, we are miserable poor guys... */
+                       if (ntail == 0)
+                               skb2 = skb_copy(skb1, GFP_ATOMIC);
+                       else
+                               skb2 = skb_copy_expand(skb1,
+                                                      skb_headroom(skb1),
+                                                      ntail,
+                                                      GFP_ATOMIC);
+                       if (unlikely(skb2 == NULL))
+                               return -ENOMEM;
+
+                       if (skb1->sk)
+                               skb_set_owner_w(skb2, skb1->sk);
+
+                       /* Looking around. Are we still alive?
+                        * OK, link new skb, drop old one */
+
+                       skb2->next = skb1->next;
+                       *skb_p = skb2;
+                       kfree_skb(skb1);
+                       skb1 = skb2;
+               }
+               elt++;
+               *trailer = skb1;
+               skb_p = &skb1->next;
+       }
+
+       return elt;
+}
+
+/**
+ * skb_partial_csum_set - set up and verify partial csum values for packet
+ * @skb: the skb to set
+ * @start: the number of bytes after skb->data to start checksumming.
+ * @off: the offset from start to place the checksum.
+ *
+ * For untrusted partially-checksummed packets, we need to make sure the values
+ * for skb->csum_start and skb->csum_offset are valid so we don't oops.
+ *
+ * This function checks and sets those values and skb->ip_summed: if this
+ * returns false you should drop the packet.
+ */
+bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
+{
+       if (unlikely(start > skb->len - 2) ||
+           unlikely((int)start + off > skb->len - 2)) {
+               if (net_ratelimit())
+                       printk(KERN_WARNING
+                              "bad partial csum: csum=%u/%u len=%u\n",
+                              start, off, skb->len);
+               return false;
+       }
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       skb->csum_start = skb_headroom(skb) + start;
+       skb->csum_offset = off;
+       return true;
+}
+
+void __skb_warn_lro_forwarding(const struct sk_buff *skb)
+{
+       if (net_ratelimit())
+               pr_warning("%s: received packets cannot be forwarded"
+                          " while LRO is enabled\n", skb->dev->name);
 }
 
 EXPORT_SYMBOL(___pskb_trim);
@@ -2009,7 +2892,6 @@ EXPORT_SYMBOL(pskb_copy);
 EXPORT_SYMBOL(pskb_expand_head);
 EXPORT_SYMBOL(skb_checksum);
 EXPORT_SYMBOL(skb_clone);
-EXPORT_SYMBOL(skb_clone_fraglist);
 EXPORT_SYMBOL(skb_copy);
 EXPORT_SYMBOL(skb_copy_and_csum_bits);
 EXPORT_SYMBOL(skb_copy_and_csum_dev);
@@ -2033,3 +2915,8 @@ EXPORT_SYMBOL(skb_seq_read);
 EXPORT_SYMBOL(skb_abort_seq_read);
 EXPORT_SYMBOL(skb_find_text);
 EXPORT_SYMBOL(skb_append_datato_frags);
+EXPORT_SYMBOL(__skb_warn_lro_forwarding);
+
+EXPORT_SYMBOL_GPL(skb_to_sgvec);
+EXPORT_SYMBOL_GPL(skb_cow_data);
+EXPORT_SYMBOL_GPL(skb_partial_csum_set);