Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[safe/jmp/linux-2.6] / net / core / skbuff.c
index 7f7bb1a..e5e2111 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     Routines having to do with the 'struct sk_buff' memory handlers.
  *
- *     Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
+ *     Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
  *                     Florian La Roche <rzsfl@rz.uni-sb.de>
  *
  *     Fixes:
@@ -55,6 +55,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/init.h>
 #include <linux/scatterlist.h>
+#include <linux/errqueue.h>
 
 #include <net/protocol.h>
 #include <net/dst.h>
@@ -73,17 +74,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
                                  struct pipe_buffer *buf)
 {
-       struct sk_buff *skb = (struct sk_buff *) buf->private;
-
-       kfree_skb(skb);
+       put_page(buf->page);
 }
 
 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
 {
-       struct sk_buff *skb = (struct sk_buff *) buf->private;
-
-       skb_get(skb);
+       get_page(buf->page);
 }
 
 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
@@ -127,6 +124,7 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
               skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
+EXPORT_SYMBOL(skb_over_panic);
 
 /**
  *     skb_under_panic -       private function
@@ -146,14 +144,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
               skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
-
-void skb_truesize_bug(struct sk_buff *skb)
-{
-       printk(KERN_ERR "SKB BUG: Invalid truesize (%u) "
-              "len=%u, sizeof(sk_buff)=%Zd\n",
-              skb->truesize, skb->len, sizeof(struct sk_buff));
-}
-EXPORT_SYMBOL(skb_truesize_bug);
+EXPORT_SYMBOL(skb_under_panic);
 
 /*     Allocate a new skbuff. We do this ourselves so we can fill in a few
  *     'private' fields and also do memory statistics to find all the
@@ -217,7 +208,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        shinfo->gso_segs = 0;
        shinfo->gso_type = 0;
        shinfo->ip6_frag_id = 0;
+       shinfo->tx_flags.flags = 0;
        shinfo->frag_list = NULL;
+       memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
 
        if (fclone) {
                struct sk_buff *child = skb + 1;
@@ -235,6 +228,7 @@ nodata:
        skb = NULL;
        goto out;
 }
+EXPORT_SYMBOL(__alloc_skb);
 
 /**
  *     __netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -262,6 +256,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
        }
        return skb;
 }
+EXPORT_SYMBOL(__netdev_alloc_skb);
 
 struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
 {
@@ -430,6 +425,7 @@ void __kfree_skb(struct sk_buff *skb)
        skb_release_all(skb);
        kfree_skbmem(skb);
 }
+EXPORT_SYMBOL(__kfree_skb);
 
 /**
  *     kfree_skb - free an sk_buff
@@ -448,7 +444,20 @@ void kfree_skb(struct sk_buff *skb)
                return;
        __kfree_skb(skb);
 }
+EXPORT_SYMBOL(kfree_skb);
 
+/**
+ *     skb_recycle_check - check if skb can be reused for receive
+ *     @skb: buffer
+ *     @skb_size: minimum receive buffer size
+ *
+ *     Checks that the skb passed in is not shared or cloned, and
+ *     that it is linear and its head portion at least as large as
+ *     skb_size so that it can be recycled as a receive buffer.
+ *     If these conditions are met, this function does any necessary
+ *     reference count dropping and cleans up the skbuff as if it
+ *     just came from __alloc_skb().
+ */
 int skb_recycle_check(struct sk_buff *skb, int skb_size)
 {
        struct skb_shared_info *shinfo;
@@ -474,8 +483,8 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
        shinfo->frag_list = NULL;
 
        memset(skb, 0, offsetof(struct sk_buff, tail));
-       skb_reset_tail_pointer(skb);
        skb->data = skb->head + NET_SKB_PAD;
+       skb_reset_tail_pointer(skb);
 
        return 1;
 }
@@ -489,7 +498,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->network_header     = old->network_header;
        new->mac_header         = old->mac_header;
        new->dst                = dst_clone(old->dst);
-#ifdef CONFIG_INET
+#ifdef CONFIG_XFRM
        new->sp                 = secpath_get(old->sp);
 #endif
        memcpy(new->cb, old->cb, sizeof(old->cb));
@@ -544,6 +553,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
        C(truesize);
 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
        C(do_not_encrypt);
+       C(requeue);
 #endif
        atomic_set(&n->users, 1);
 
@@ -604,6 +614,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 
        return __skb_clone(n, skb);
 }
+EXPORT_SYMBOL(skb_clone);
 
 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 {
@@ -670,7 +681,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
        copy_skb_header(n, skb);
        return n;
 }
-
+EXPORT_SYMBOL(skb_copy);
 
 /**
  *     pskb_copy       -       create copy of an sk_buff with private head.
@@ -729,6 +740,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 out:
        return n;
 }
+EXPORT_SYMBOL(pskb_copy);
 
 /**
  *     pskb_expand_head - reallocate header of &sk_buff
@@ -812,6 +824,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 nodata:
        return -ENOMEM;
 }
+EXPORT_SYMBOL(pskb_expand_head);
 
 /* Make private copy of skb with writable head and some headroom */
 
@@ -832,7 +845,7 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
        }
        return skb2;
 }
-
+EXPORT_SYMBOL(skb_realloc_headroom);
 
 /**
  *     skb_copy_expand -       copy and expand sk_buff
@@ -897,6 +910,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 
        return n;
 }
+EXPORT_SYMBOL(skb_copy_expand);
 
 /**
  *     skb_pad                 -       zero pad the tail of an skb
@@ -942,6 +956,7 @@ free_skb:
        kfree_skb(skb);
        return err;
 }
+EXPORT_SYMBOL(skb_pad);
 
 /**
  *     skb_put - add data to a buffer
@@ -1099,6 +1114,7 @@ done:
 
        return 0;
 }
+EXPORT_SYMBOL(___pskb_trim);
 
 /**
  *     __pskb_pull_tail - advance tail of skb header
@@ -1192,8 +1208,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
                                        insp = list;
                                }
                                if (!pskb_pull(list, eat)) {
-                                       if (clone)
-                                               kfree_skb(clone);
+                                       kfree_skb(clone);
                                        return NULL;
                                }
                                break;
@@ -1237,6 +1252,7 @@ pull_pages:
 
        return skb_tail_pointer(skb);
 }
+EXPORT_SYMBOL(__pskb_pull_tail);
 
 /* Copy some data bits from skb to kernel buffer. */
 
@@ -1314,6 +1330,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 fault:
        return -EFAULT;
 }
+EXPORT_SYMBOL(skb_copy_bits);
 
 /*
  * Callback from splice_to_pipe(), if we need to release some pages
@@ -1321,34 +1338,81 @@ fault:
  */
 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
 {
-       struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private;
+       put_page(spd->pages[i]);
+}
+
+static inline struct page *linear_to_page(struct page *page, unsigned int *len,
+                                         unsigned int *offset,
+                                         struct sk_buff *skb)
+{
+       struct sock *sk = skb->sk;
+       struct page *p = sk->sk_sndmsg_page;
+       unsigned int off;
 
-       kfree_skb(skb);
+       if (!p) {
+new_page:
+               p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
+               if (!p)
+                       return NULL;
+
+               off = sk->sk_sndmsg_off = 0;
+               /* hold one ref to this page until it's full */
+       } else {
+               unsigned int mlen;
+
+               off = sk->sk_sndmsg_off;
+               mlen = PAGE_SIZE - off;
+               if (mlen < 64 && mlen < *len) {
+                       put_page(p);
+                       goto new_page;
+               }
+
+               *len = min_t(unsigned int, *len, mlen);
+       }
+
+       memcpy(page_address(p) + off, page_address(page) + *offset, *len);
+       sk->sk_sndmsg_off += *len;
+       *offset = off;
+       get_page(p);
+
+       return p;
 }
 
 /*
  * Fill page/offset/length into spd, if it can hold more pages.
  */
 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
-                               unsigned int len, unsigned int offset,
-                               struct sk_buff *skb)
+                               unsigned int *len, unsigned int offset,
+                               struct sk_buff *skb, int linear)
 {
        if (unlikely(spd->nr_pages == PIPE_BUFFERS))
                return 1;
 
+       if (linear) {
+               page = linear_to_page(page, len, &offset, skb);
+               if (!page)
+                       return 1;
+       } else
+               get_page(page);
+
        spd->pages[spd->nr_pages] = page;
-       spd->partial[spd->nr_pages].len = len;
+       spd->partial[spd->nr_pages].len = *len;
        spd->partial[spd->nr_pages].offset = offset;
-       spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
        spd->nr_pages++;
+
        return 0;
 }
 
 static inline void __segment_seek(struct page **page, unsigned int *poff,
                                  unsigned int *plen, unsigned int off)
 {
+       unsigned long n;
+
        *poff += off;
-       *page += *poff / PAGE_SIZE;
+       n = *poff / PAGE_SIZE;
+       if (n)
+               *page = nth_page(*page, n);
+
        *poff = *poff % PAGE_SIZE;
        *plen -= off;
 }
@@ -1356,7 +1420,7 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
 static inline int __splice_segment(struct page *page, unsigned int poff,
                                   unsigned int plen, unsigned int *off,
                                   unsigned int *len, struct sk_buff *skb,
-                                  struct splice_pipe_desc *spd)
+                                  struct splice_pipe_desc *spd, int linear)
 {
        if (!*len)
                return 1;
@@ -1379,7 +1443,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
                /* the linear region may spread across several pages  */
                flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
 
-               if (spd_fill_page(spd, page, flen, poff, skb))
+               if (spd_fill_page(spd, page, &flen, poff, skb, linear))
                        return 1;
 
                __segment_seek(&page, &poff, &plen, flen);
@@ -1406,7 +1470,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
        if (__splice_segment(virt_to_page(skb->data),
                             (unsigned long) skb->data & (PAGE_SIZE - 1),
                             skb_headlen(skb),
-                            offset, len, skb, spd))
+                            offset, len, skb, spd, 1))
                return 1;
 
        /*
@@ -1416,7 +1480,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
 
                if (__splice_segment(f->page, f->page_offset, f->size,
-                                    offset, len, skb, spd))
+                                    offset, len, skb, spd, 0))
                        return 1;
        }
 
@@ -1429,7 +1493,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
  * the frag list, if such a thing exists. We'd probably need to recurse to
  * handle that cleanly.
  */
-int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
                    struct pipe_inode_info *pipe, unsigned int tlen,
                    unsigned int flags)
 {
@@ -1442,16 +1506,6 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
                .ops = &sock_pipe_buf_ops,
                .spd_release = sock_spd_release,
        };
-       struct sk_buff *skb;
-
-       /*
-        * I'd love to avoid the clone here, but tcp_read_sock()
-        * ignores reference counts and unconditonally kills the sk_buff
-        * on return from the actor.
-        */
-       skb = skb_clone(__skb, GFP_KERNEL);
-       if (unlikely(!skb))
-               return -ENOMEM;
 
        /*
         * __skb_splice_bits() only fails if the output has no room left,
@@ -1475,15 +1529,9 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
        }
 
 done:
-       /*
-        * drop our reference to the clone, the pipe consumption will
-        * drop the rest.
-        */
-       kfree_skb(skb);
-
        if (spd.nr_pages) {
+               struct sock *sk = skb->sk;
                int ret;
-               struct sock *sk = __skb->sk;
 
                /*
                 * Drop the socket lock, otherwise we have reverse
@@ -1588,7 +1636,6 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
 fault:
        return -EFAULT;
 }
-
 EXPORT_SYMBOL(skb_store_bits);
 
 /* Checksum skb data. */
@@ -1665,6 +1712,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
 
        return csum;
 }
+EXPORT_SYMBOL(skb_checksum);
 
 /* Both of above in one bottle. */
 
@@ -1746,6 +1794,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
        BUG_ON(len);
        return csum;
 }
+EXPORT_SYMBOL(skb_copy_and_csum_bits);
 
 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
 {
@@ -1772,6 +1821,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
                *((__sum16 *)(to + csstuff)) = csum_fold(csum);
        }
 }
+EXPORT_SYMBOL(skb_copy_and_csum_dev);
 
 /**
  *     skb_dequeue - remove from the head of the queue
@@ -1792,6 +1842,7 @@ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
        spin_unlock_irqrestore(&list->lock, flags);
        return result;
 }
+EXPORT_SYMBOL(skb_dequeue);
 
 /**
  *     skb_dequeue_tail - remove from the tail of the queue
@@ -1811,6 +1862,7 @@ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
        spin_unlock_irqrestore(&list->lock, flags);
        return result;
 }
+EXPORT_SYMBOL(skb_dequeue_tail);
 
 /**
  *     skb_queue_purge - empty a list
@@ -1826,6 +1878,7 @@ void skb_queue_purge(struct sk_buff_head *list)
        while ((skb = skb_dequeue(list)) != NULL)
                kfree_skb(skb);
 }
+EXPORT_SYMBOL(skb_queue_purge);
 
 /**
  *     skb_queue_head - queue a buffer at the list head
@@ -1846,6 +1899,7 @@ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
        __skb_queue_head(list, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_queue_head);
 
 /**
  *     skb_queue_tail - queue a buffer at the list tail
@@ -1866,6 +1920,7 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
        __skb_queue_tail(list, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_queue_tail);
 
 /**
  *     skb_unlink      -       remove a buffer from a list
@@ -1885,6 +1940,7 @@ void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
        __skb_unlink(skb, list);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_unlink);
 
 /**
  *     skb_append      -       append a buffer
@@ -1904,7 +1960,7 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
        __skb_queue_after(list, old, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
-
+EXPORT_SYMBOL(skb_append);
 
 /**
  *     skb_insert      -       insert a buffer
@@ -1926,6 +1982,7 @@ void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
        __skb_insert(newsk, old->prev, old, list);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_insert);
 
 static inline void skb_split_inside_header(struct sk_buff *skb,
                                           struct sk_buff* skb1,
@@ -2004,6 +2061,149 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
        else            /* Second chunk has no header, nothing to copy. */
                skb_split_no_header(skb, skb1, len, pos);
 }
+EXPORT_SYMBOL(skb_split);
+
+/* Shifting from/to a cloned skb is a no-go.
+ *
+ * Caller cannot keep skb_shinfo related pointers past calling here!
+ */
+static int skb_prepare_for_shift(struct sk_buff *skb)
+{
+       return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
+/**
+ * skb_shift - Shifts paged data partially from skb to another
+ * @tgt: buffer into which tail data gets added
+ * @skb: buffer from which the paged data comes from
+ * @shiftlen: shift up to this many bytes
+ *
+ * Attempts to shift up to shiftlen worth of bytes, which may be less than
+ * the length of the skb, from tgt to skb. Returns number bytes shifted.
+ * It's up to caller to free skb if everything was shifted.
+ *
+ * If @tgt runs out of frags, the whole operation is aborted.
+ *
+ * Skb cannot include anything else but paged data while tgt is allowed
+ * to have non-paged data as well.
+ *
+ * TODO: full sized shift could be optimized but that would need
+ * specialized skb free'er to handle frags without up-to-date nr_frags.
+ */
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
+{
+       int from, to, merge, todo;
+       struct skb_frag_struct *fragfrom, *fragto;
+
+       BUG_ON(shiftlen > skb->len);
+       BUG_ON(skb_headlen(skb));       /* Would corrupt stream */
+
+       todo = shiftlen;
+       from = 0;
+       to = skb_shinfo(tgt)->nr_frags;
+       fragfrom = &skb_shinfo(skb)->frags[from];
+
+       /* Actual merge is delayed until the point when we know we can
+        * commit all, so that we don't have to undo partial changes
+        */
+       if (!to ||
+           !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
+               merge = -1;
+       } else {
+               merge = to - 1;
+
+               todo -= fragfrom->size;
+               if (todo < 0) {
+                       if (skb_prepare_for_shift(skb) ||
+                           skb_prepare_for_shift(tgt))
+                               return 0;
+
+                       /* All previous frag pointers might be stale! */
+                       fragfrom = &skb_shinfo(skb)->frags[from];
+                       fragto = &skb_shinfo(tgt)->frags[merge];
+
+                       fragto->size += shiftlen;
+                       fragfrom->size -= shiftlen;
+                       fragfrom->page_offset += shiftlen;
+
+                       goto onlymerged;
+               }
+
+               from++;
+       }
+
+       /* Skip full, not-fitting skb to avoid expensive operations */
+       if ((shiftlen == skb->len) &&
+           (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
+               return 0;
+
+       if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
+               return 0;
+
+       while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
+               if (to == MAX_SKB_FRAGS)
+                       return 0;
+
+               fragfrom = &skb_shinfo(skb)->frags[from];
+               fragto = &skb_shinfo(tgt)->frags[to];
+
+               if (todo >= fragfrom->size) {
+                       *fragto = *fragfrom;
+                       todo -= fragfrom->size;
+                       from++;
+                       to++;
+
+               } else {
+                       get_page(fragfrom->page);
+                       fragto->page = fragfrom->page;
+                       fragto->page_offset = fragfrom->page_offset;
+                       fragto->size = todo;
+
+                       fragfrom->page_offset += todo;
+                       fragfrom->size -= todo;
+                       todo = 0;
+
+                       to++;
+                       break;
+               }
+       }
+
+       /* Ready to "commit" this state change to tgt */
+       skb_shinfo(tgt)->nr_frags = to;
+
+       if (merge >= 0) {
+               fragfrom = &skb_shinfo(skb)->frags[0];
+               fragto = &skb_shinfo(tgt)->frags[merge];
+
+               fragto->size += fragfrom->size;
+               put_page(fragfrom->page);
+       }
+
+       /* Reposition in the original skb */
+       to = 0;
+       while (from < skb_shinfo(skb)->nr_frags)
+               skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
+       skb_shinfo(skb)->nr_frags = to;
+
+       BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
+
+onlymerged:
+       /* Most likely the tgt won't ever need its checksum anymore, skb on
+        * the other hand might need it if it needs to be resent
+        */
+       tgt->ip_summed = CHECKSUM_PARTIAL;
+       skb->ip_summed = CHECKSUM_PARTIAL;
+
+       /* Yak, is it really working this way? Some helper please? */
+       skb->len -= shiftlen;
+       skb->data_len -= shiftlen;
+       skb->truesize -= shiftlen;
+       tgt->len += shiftlen;
+       tgt->data_len += shiftlen;
+       tgt->truesize += shiftlen;
+
+       return shiftlen;
+}
 
 /**
  * skb_prepare_seq_read - Prepare a sequential read of skb data
@@ -2024,6 +2224,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
        st->frag_idx = st->stepped_offset = 0;
        st->frag_data = NULL;
 }
+EXPORT_SYMBOL(skb_prepare_seq_read);
 
 /**
  * skb_seq_read - Sequentially read skb data
@@ -2060,10 +2261,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
                return 0;
 
 next_skb:
-       block_limit = skb_headlen(st->cur_skb);
+       block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
 
        if (abs_offset < block_limit) {
-               *data = st->cur_skb->data + abs_offset;
+               *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
                return block_limit - abs_offset;
        }
 
@@ -2098,18 +2299,20 @@ next_skb:
                st->frag_data = NULL;
        }
 
-       if (st->cur_skb->next) {
-               st->cur_skb = st->cur_skb->next;
+       if (st->root_skb == st->cur_skb &&
+           skb_shinfo(st->root_skb)->frag_list) {
+               st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
                st->frag_idx = 0;
                goto next_skb;
-       } else if (st->root_skb == st->cur_skb &&
-                  skb_shinfo(st->root_skb)->frag_list) {
-               st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
+       } else if (st->cur_skb->next) {
+               st->cur_skb = st->cur_skb->next;
+               st->frag_idx = 0;
                goto next_skb;
        }
 
        return 0;
 }
+EXPORT_SYMBOL(skb_seq_read);
 
 /**
  * skb_abort_seq_read - Abort a sequential read of skb data
@@ -2123,6 +2326,7 @@ void skb_abort_seq_read(struct skb_seq_state *st)
        if (st->frag_data)
                kunmap_skb_frag(st->frag_data);
 }
+EXPORT_SYMBOL(skb_abort_seq_read);
 
 #define TS_SKB_CB(state)       ((struct skb_seq_state *) &((state)->cb))
 
@@ -2165,6 +2369,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
        ret = textsearch_find(config, state);
        return (ret <= to - from ? ret : UINT_MAX);
 }
+EXPORT_SYMBOL(skb_find_text);
 
 /**
  * skb_append_datato_frags: - append the user data to a skb
@@ -2237,6 +2442,7 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 
        return 0;
 }
+EXPORT_SYMBOL(skb_append_datato_frags);
 
 /**
  *     skb_pull_rcsum - pull skb and update receive checksum
@@ -2273,6 +2479,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
 {
        struct sk_buff *segs = NULL;
        struct sk_buff *tail = NULL;
+       struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
        unsigned int mss = skb_shinfo(skb)->gso_size;
        unsigned int doffset = skb->data - skb_mac_header(skb);
        unsigned int offset = doffset;
@@ -2292,7 +2499,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                struct sk_buff *nskb;
                skb_frag_t *frag;
                int hsize;
-               int k;
                int size;
 
                len = skb->len - offset;
@@ -2305,9 +2511,36 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                if (hsize > len || !sg)
                        hsize = len;
 
-               nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
-               if (unlikely(!nskb))
-                       goto err;
+               if (!hsize && i >= nfrags) {
+                       BUG_ON(fskb->len != len);
+
+                       pos += len;
+                       nskb = skb_clone(fskb, GFP_ATOMIC);
+                       fskb = fskb->next;
+
+                       if (unlikely(!nskb))
+                               goto err;
+
+                       hsize = skb_end_pointer(nskb) - nskb->head;
+                       if (skb_cow_head(nskb, doffset + headroom)) {
+                               kfree_skb(nskb);
+                               goto err;
+                       }
+
+                       nskb->truesize += skb_end_pointer(nskb) - nskb->head -
+                                         hsize;
+                       skb_release_head_state(nskb);
+                       __skb_push(nskb, doffset);
+               } else {
+                       nskb = alloc_skb(hsize + doffset + headroom,
+                                        GFP_ATOMIC);
+
+                       if (unlikely(!nskb))
+                               goto err;
+
+                       skb_reserve(nskb, headroom);
+                       __skb_put(nskb, doffset);
+               }
 
                if (segs)
                        tail->next = nskb;
@@ -2318,13 +2551,15 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                __copy_skb_header(nskb, skb);
                nskb->mac_len = skb->mac_len;
 
-               skb_reserve(nskb, headroom);
                skb_reset_mac_header(nskb);
                skb_set_network_header(nskb, skb->mac_len);
                nskb->transport_header = (nskb->network_header +
                                          skb_network_header_len(skb));
-               skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
-                                         doffset);
+               skb_copy_from_linear_data(skb, nskb->data, doffset);
+
+               if (pos >= offset + len)
+                       continue;
+
                if (!sg) {
                        nskb->ip_summed = CHECKSUM_NONE;
                        nskb->csum = skb_copy_and_csum_bits(skb, offset,
@@ -2334,14 +2569,11 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                }
 
                frag = skb_shinfo(nskb)->frags;
-               k = 0;
 
                skb_copy_from_linear_data_offset(skb, offset,
                                                 skb_put(nskb, hsize), hsize);
 
-               while (pos < offset + len) {
-                       BUG_ON(i >= nfrags);
-
+               while (pos < offset + len && i < nfrags) {
                        *frag = skb_shinfo(skb)->frags[i];
                        get_page(frag->page);
                        size = frag->size;
@@ -2351,20 +2583,39 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                                frag->size -= offset - pos;
                        }
 
-                       k++;
+                       skb_shinfo(nskb)->nr_frags++;
 
                        if (pos + size <= offset + len) {
                                i++;
                                pos += size;
                        } else {
                                frag->size -= pos + size - (offset + len);
-                               break;
+                               goto skip_fraglist;
                        }
 
                        frag++;
                }
 
-               skb_shinfo(nskb)->nr_frags = k;
+               if (pos < offset + len) {
+                       struct sk_buff *fskb2 = fskb;
+
+                       BUG_ON(pos + fskb->len != offset + len);
+
+                       pos += fskb->len;
+                       fskb = fskb->next;
+
+                       if (fskb2->next) {
+                               fskb2 = skb_clone(fskb2, GFP_ATOMIC);
+                               if (!fskb2)
+                                       goto err;
+                       } else
+                               skb_get(fskb2);
+
+                       BUG_ON(skb_shinfo(nskb)->frag_list);
+                       skb_shinfo(nskb)->frag_list = fskb2;
+               }
+
+skip_fraglist:
                nskb->data_len = len - hsize;
                nskb->len += nskb->data_len;
                nskb->truesize += nskb->data_len;
@@ -2379,9 +2630,107 @@ err:
        }
        return ERR_PTR(err);
 }
-
 EXPORT_SYMBOL_GPL(skb_segment);
 
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+       struct sk_buff *p = *head;
+       struct sk_buff *nskb;
+       unsigned int headroom;
+       unsigned int len = skb_gro_len(skb);
+
+       if (p->len + len >= 65536)
+               return -E2BIG;
+
+       if (skb_shinfo(p)->frag_list)
+               goto merge;
+       else if (skb_headlen(skb) <= skb_gro_offset(skb)) {
+               if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags >
+                   MAX_SKB_FRAGS)
+                       return -E2BIG;
+
+               skb_shinfo(skb)->frags[0].page_offset +=
+                       skb_gro_offset(skb) - skb_headlen(skb);
+               skb_shinfo(skb)->frags[0].size -=
+                       skb_gro_offset(skb) - skb_headlen(skb);
+
+               memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
+                      skb_shinfo(skb)->frags,
+                      skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
+
+               skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
+               skb_shinfo(skb)->nr_frags = 0;
+
+               skb->truesize -= skb->data_len;
+               skb->len -= skb->data_len;
+               skb->data_len = 0;
+
+               NAPI_GRO_CB(skb)->free = 1;
+               goto done;
+       }
+
+       headroom = skb_headroom(p);
+       nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
+       if (unlikely(!nskb))
+               return -ENOMEM;
+
+       __copy_skb_header(nskb, p);
+       nskb->mac_len = p->mac_len;
+
+       skb_reserve(nskb, headroom);
+       __skb_put(nskb, skb_gro_offset(p));
+
+       skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
+       skb_set_network_header(nskb, skb_network_offset(p));
+       skb_set_transport_header(nskb, skb_transport_offset(p));
+
+       __skb_pull(p, skb_gro_offset(p));
+       memcpy(skb_mac_header(nskb), skb_mac_header(p),
+              p->data - skb_mac_header(p));
+
+       *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
+       skb_shinfo(nskb)->frag_list = p;
+       skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size;
+       skb_header_release(p);
+       nskb->prev = p;
+
+       nskb->data_len += p->len;
+       nskb->truesize += p->len;
+       nskb->len += p->len;
+
+       *head = nskb;
+       nskb->next = p->next;
+       p->next = NULL;
+
+       p = nskb;
+
+merge:
+       if (skb_gro_offset(skb) > skb_headlen(skb)) {
+               skb_shinfo(skb)->frags[0].page_offset +=
+                       skb_gro_offset(skb) - skb_headlen(skb);
+               skb_shinfo(skb)->frags[0].size -=
+                       skb_gro_offset(skb) - skb_headlen(skb);
+               skb_gro_reset_offset(skb);
+               skb_gro_pull(skb, skb_headlen(skb));
+       }
+
+       __skb_pull(skb, skb_gro_offset(skb));
+
+       p->prev->next = skb;
+       p->prev = skb;
+       skb_header_release(skb);
+
+done:
+       NAPI_GRO_CB(p)->count++;
+       p->data_len += len;
+       p->truesize += len;
+       p->len += len;
+
+       NAPI_GRO_CB(skb)->same_flow = 1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(skb_gro_receive);
+
 void __init skb_init(void)
 {
        skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
@@ -2478,6 +2827,7 @@ int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int le
 
        return nsg;
 }
+EXPORT_SYMBOL_GPL(skb_to_sgvec);
 
 /**
  *     skb_cow_data - Check that a socket buffer's data buffers are writable
@@ -2587,6 +2937,45 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
 
        return elt;
 }
+EXPORT_SYMBOL_GPL(skb_cow_data);
+
+void skb_tstamp_tx(struct sk_buff *orig_skb,
+               struct skb_shared_hwtstamps *hwtstamps)
+{
+       struct sock *sk = orig_skb->sk;
+       struct sock_exterr_skb *serr;
+       struct sk_buff *skb;
+       int err;
+
+       if (!sk)
+               return;
+
+       skb = skb_clone(orig_skb, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       if (hwtstamps) {
+               *skb_hwtstamps(skb) =
+                       *hwtstamps;
+       } else {
+               /*
+                * no hardware time stamps available,
+                * so keep the skb_shared_tx and only
+                * store software time stamp
+                */
+               skb->tstamp = ktime_get_real();
+       }
+
+       serr = SKB_EXT_ERR(skb);
+       memset(serr, 0, sizeof(*serr));
+       serr->ee.ee_errno = ENOMSG;
+       serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+       err = sock_queue_err_skb(sk, skb);
+       if (err)
+               kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(skb_tstamp_tx);
+
 
 /**
  * skb_partial_csum_set - set up and verify partial csum values for packet
@@ -2615,6 +3004,7 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
        skb->csum_offset = off;
        return true;
 }
+EXPORT_SYMBOL_GPL(skb_partial_csum_set);
 
 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
 {
@@ -2622,42 +3012,4 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb)
                pr_warning("%s: received packets cannot be forwarded"
                           " while LRO is enabled\n", skb->dev->name);
 }
-
-EXPORT_SYMBOL(___pskb_trim);
-EXPORT_SYMBOL(__kfree_skb);
-EXPORT_SYMBOL(kfree_skb);
-EXPORT_SYMBOL(__pskb_pull_tail);
-EXPORT_SYMBOL(__alloc_skb);
-EXPORT_SYMBOL(__netdev_alloc_skb);
-EXPORT_SYMBOL(pskb_copy);
-EXPORT_SYMBOL(pskb_expand_head);
-EXPORT_SYMBOL(skb_checksum);
-EXPORT_SYMBOL(skb_clone);
-EXPORT_SYMBOL(skb_copy);
-EXPORT_SYMBOL(skb_copy_and_csum_bits);
-EXPORT_SYMBOL(skb_copy_and_csum_dev);
-EXPORT_SYMBOL(skb_copy_bits);
-EXPORT_SYMBOL(skb_copy_expand);
-EXPORT_SYMBOL(skb_over_panic);
-EXPORT_SYMBOL(skb_pad);
-EXPORT_SYMBOL(skb_realloc_headroom);
-EXPORT_SYMBOL(skb_under_panic);
-EXPORT_SYMBOL(skb_dequeue);
-EXPORT_SYMBOL(skb_dequeue_tail);
-EXPORT_SYMBOL(skb_insert);
-EXPORT_SYMBOL(skb_queue_purge);
-EXPORT_SYMBOL(skb_queue_head);
-EXPORT_SYMBOL(skb_queue_tail);
-EXPORT_SYMBOL(skb_unlink);
-EXPORT_SYMBOL(skb_append);
-EXPORT_SYMBOL(skb_split);
-EXPORT_SYMBOL(skb_prepare_seq_read);
-EXPORT_SYMBOL(skb_seq_read);
-EXPORT_SYMBOL(skb_abort_seq_read);
-EXPORT_SYMBOL(skb_find_text);
-EXPORT_SYMBOL(skb_append_datato_frags);
 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
-
-EXPORT_SYMBOL_GPL(skb_to_sgvec);
-EXPORT_SYMBOL_GPL(skb_cow_data);
-EXPORT_SYMBOL_GPL(skb_partial_csum_set);