Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[safe/jmp/linux-2.6] / net / core / skbuff.c
index 3aafb10..f8abf68 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/in.h>
@@ -55,6 +56,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/init.h>
 #include <linux/scatterlist.h>
+#include <linux/errqueue.h>
 
 #include <net/protocol.h>
 #include <net/dst.h>
@@ -64,6 +66,7 @@
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
+#include <trace/events/skb.h>
 
 #include "kmap_skb.h"
 
@@ -73,17 +76,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
                                  struct pipe_buffer *buf)
 {
-       struct sk_buff *skb = (struct sk_buff *) buf->private;
-
-       kfree_skb(skb);
+       put_page(buf->page);
 }
 
 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
 {
-       struct sk_buff *skb = (struct sk_buff *) buf->private;
-
-       skb_get(skb);
+       get_page(buf->page);
 }
 
 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
@@ -94,7 +93,7 @@ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
 
 
 /* Pipe buffer operations for a socket. */
-static struct pipe_buf_operations sock_pipe_buf_ops = {
+static const struct pipe_buf_operations sock_pipe_buf_ops = {
        .can_merge = 0,
        .map = generic_pipe_buf_map,
        .unmap = generic_pipe_buf_unmap,
@@ -118,7 +117,7 @@ static struct pipe_buf_operations sock_pipe_buf_ops = {
  *
  *     Out of line support code for skb_put(). Not user callable.
  */
-void skb_over_panic(struct sk_buff *skb, int sz, void *here)
+static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 {
        printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
                          "data:%p tail:%#lx end:%#lx dev:%s\n",
@@ -137,7 +136,7 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
  *     Out of line support code for skb_push(). Not user callable.
  */
 
-void skb_under_panic(struct sk_buff *skb, int sz, void *here)
+static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 {
        printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
                          "data:%p tail:%#lx end:%#lx dev:%s\n",
@@ -147,14 +146,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
        BUG();
 }
 
-void skb_truesize_bug(struct sk_buff *skb)
-{
-       WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
-              "len=%u, sizeof(sk_buff)=%Zd\n",
-              skb->truesize, skb->len, sizeof(struct sk_buff));
-}
-EXPORT_SYMBOL(skb_truesize_bug);
-
 /*     Allocate a new skbuff. We do this ourselves so we can fill in a few
  *     'private' fields and also do memory statistics to find all the
  *     [BEEP] leaks.
@@ -190,12 +181,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
        if (!skb)
                goto out;
+       prefetchw(skb);
 
        size = SKB_DATA_ALIGN(size);
        data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
                        gfp_mask, node);
        if (!data)
                goto nodata;
+       prefetchw(data + size);
 
        /*
         * Only clear those fields we need to clear, not those that we will
@@ -209,20 +202,23 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        skb->data = data;
        skb_reset_tail_pointer(skb);
        skb->end = skb->tail + size;
+       kmemcheck_annotate_bitfield(skb, flags1);
+       kmemcheck_annotate_bitfield(skb, flags2);
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       skb->mac_header = ~0U;
+#endif
+
        /* make sure we initialize shinfo sequentially */
        shinfo = skb_shinfo(skb);
+       memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
        atomic_set(&shinfo->dataref, 1);
-       shinfo->nr_frags  = 0;
-       shinfo->gso_size = 0;
-       shinfo->gso_segs = 0;
-       shinfo->gso_type = 0;
-       shinfo->ip6_frag_id = 0;
-       shinfo->frag_list = NULL;
 
        if (fclone) {
                struct sk_buff *child = skb + 1;
                atomic_t *fclone_ref = (atomic_t *) (child + 1);
 
+               kmemcheck_annotate_bitfield(child, flags1);
+               kmemcheck_annotate_bitfield(child, flags2);
                skb->fclone = SKB_FCLONE_ORIG;
                atomic_set(fclone_ref, 1);
 
@@ -235,6 +231,7 @@ nodata:
        skb = NULL;
        goto out;
 }
+EXPORT_SYMBOL(__alloc_skb);
 
 /**
  *     __netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -262,6 +259,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
        }
        return skb;
 }
+EXPORT_SYMBOL(__netdev_alloc_skb);
 
 struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
 {
@@ -327,7 +325,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
 {
        struct sk_buff *list;
 
-       for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
+       skb_walk_frags(skb, list)
                skb_get(list);
 }
 
@@ -342,7 +340,7 @@ static void skb_release_data(struct sk_buff *skb)
                                put_page(skb_shinfo(skb)->frags[i].page);
                }
 
-               if (skb_shinfo(skb)->frag_list)
+               if (skb_has_frags(skb))
                        skb_drop_fraglist(skb);
 
                kfree(skb->head);
@@ -385,7 +383,7 @@ static void kfree_skbmem(struct sk_buff *skb)
 
 static void skb_release_head_state(struct sk_buff *skb)
 {
-       dst_release(skb->dst);
+       skb_dst_drop(skb);
 #ifdef CONFIG_XFRM
        secpath_put(skb->sp);
 #endif
@@ -430,6 +428,7 @@ void __kfree_skb(struct sk_buff *skb)
        skb_release_all(skb);
        kfree_skbmem(skb);
 }
+EXPORT_SYMBOL(__kfree_skb);
 
 /**
  *     kfree_skb - free an sk_buff
@@ -446,8 +445,30 @@ void kfree_skb(struct sk_buff *skb)
                smp_rmb();
        else if (likely(!atomic_dec_and_test(&skb->users)))
                return;
+       trace_kfree_skb(skb, __builtin_return_address(0));
+       __kfree_skb(skb);
+}
+EXPORT_SYMBOL(kfree_skb);
+
+/**
+ *     consume_skb - free an skbuff
+ *     @skb: buffer to free
+ *
+ *     Drop a ref to the buffer and free it if the usage count has hit zero
+ *     Functions identically to kfree_skb, but kfree_skb assumes that the frame
+ *     is being dropped after a failure and notes that
+ */
+void consume_skb(struct sk_buff *skb)
+{
+       if (unlikely(!skb))
+               return;
+       if (likely(atomic_read(&skb->users) == 1))
+               smp_rmb();
+       else if (likely(!atomic_dec_and_test(&skb->users)))
+               return;
        __kfree_skb(skb);
 }
+EXPORT_SYMBOL(consume_skb);
 
 /**
  *     skb_recycle_check - check if skb can be reused for receive
@@ -465,6 +486,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
 {
        struct skb_shared_info *shinfo;
 
+       if (irqs_disabled())
+               return 0;
+
        if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
                return 0;
 
@@ -476,14 +500,10 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
                return 0;
 
        skb_release_head_state(skb);
+
        shinfo = skb_shinfo(skb);
+       memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
        atomic_set(&shinfo->dataref, 1);
-       shinfo->nr_frags = 0;
-       shinfo->gso_size = 0;
-       shinfo->gso_segs = 0;
-       shinfo->gso_type = 0;
-       shinfo->ip6_frag_id = 0;
-       shinfo->frag_list = NULL;
 
        memset(skb, 0, offsetof(struct sk_buff, tail));
        skb->data = skb->head + NET_SKB_PAD;
@@ -500,13 +520,13 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->transport_header   = old->transport_header;
        new->network_header     = old->network_header;
        new->mac_header         = old->mac_header;
-       new->dst                = dst_clone(old->dst);
+       skb_dst_copy(new, old);
+       new->rxhash             = old->rxhash;
 #ifdef CONFIG_XFRM
        new->sp                 = secpath_get(old->sp);
 #endif
        memcpy(new->cb, old->cb, sizeof(old->cb));
-       new->csum_start         = old->csum_start;
-       new->csum_offset        = old->csum_offset;
+       new->csum               = old->csum;
        new->local_df           = old->local_df;
        new->pkt_type           = old->pkt_type;
        new->ip_summed          = old->ip_summed;
@@ -517,6 +537,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 #endif
        new->protocol           = old->protocol;
        new->mark               = old->mark;
+       new->skb_iif            = old->skb_iif;
        __nf_copy(new, old);
 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
@@ -533,6 +554,10 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        skb_copy_secmark(new, old);
 }
 
+/*
+ * You should not add any new code to this function.  Add it to
+ * __copy_skb_header above instead.
+ */
 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 {
 #define C(x) n->x = skb->x
@@ -544,20 +569,16 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
        C(len);
        C(data_len);
        C(mac_len);
+       C(rxhash);
        n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
        n->cloned = 1;
        n->nohdr = 0;
        n->destructor = NULL;
-       C(iif);
        C(tail);
        C(end);
        C(head);
        C(data);
        C(truesize);
-#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
-       C(do_not_encrypt);
-       C(requeue);
-#endif
        atomic_set(&n->users, 1);
 
        atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -612,11 +633,15 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
                n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
                if (!n)
                        return NULL;
+
+               kmemcheck_annotate_bitfield(n, flags1);
+               kmemcheck_annotate_bitfield(n, flags2);
                n->fclone = SKB_FCLONE_UNAVAILABLE;
        }
 
        return __skb_clone(n, skb);
 }
+EXPORT_SYMBOL(skb_clone);
 
 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 {
@@ -633,7 +658,8 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        /* {transport,network,mac}_header are relative to skb->head */
        new->transport_header += offset;
        new->network_header   += offset;
-       new->mac_header       += offset;
+       if (skb_mac_header_was_set(new))
+               new->mac_header       += offset;
 #endif
        skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
        skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
@@ -683,7 +709,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
        copy_skb_header(n, skb);
        return n;
 }
-
+EXPORT_SYMBOL(skb_copy);
 
 /**
  *     pskb_copy       -       create copy of an sk_buff with private head.
@@ -733,7 +759,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
                skb_shinfo(n)->nr_frags = i;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
+       if (skb_has_frags(skb)) {
                skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
                skb_clone_fraglist(n);
        }
@@ -742,6 +768,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 out:
        return n;
 }
+EXPORT_SYMBOL(pskb_copy);
 
 /**
  *     pskb_expand_head - reallocate header of &sk_buff
@@ -795,7 +822,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                get_page(skb_shinfo(skb)->frags[i].page);
 
-       if (skb_shinfo(skb)->frag_list)
+       if (skb_has_frags(skb))
                skb_clone_fraglist(skb);
 
        skb_release_data(skb);
@@ -814,7 +841,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
        skb->tail             += off;
        skb->transport_header += off;
        skb->network_header   += off;
-       skb->mac_header       += off;
+       if (skb_mac_header_was_set(skb))
+               skb->mac_header += off;
        skb->csum_start       += nhead;
        skb->cloned   = 0;
        skb->hdr_len  = 0;
@@ -825,6 +853,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 nodata:
        return -ENOMEM;
 }
+EXPORT_SYMBOL(pskb_expand_head);
 
 /* Make private copy of skb with writable head and some headroom */
 
@@ -845,7 +874,7 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
        }
        return skb2;
 }
-
+EXPORT_SYMBOL(skb_realloc_headroom);
 
 /**
  *     skb_copy_expand -       copy and expand sk_buff
@@ -905,11 +934,13 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
        n->transport_header += off;
        n->network_header   += off;
-       n->mac_header       += off;
+       if (skb_mac_header_was_set(skb))
+               n->mac_header += off;
 #endif
 
        return n;
 }
+EXPORT_SYMBOL(skb_copy_expand);
 
 /**
  *     skb_pad                 -       zero pad the tail of an skb
@@ -955,6 +986,7 @@ free_skb:
        kfree_skb(skb);
        return err;
 }
+EXPORT_SYMBOL(skb_pad);
 
 /**
  *     skb_put - add data to a buffer
@@ -1008,7 +1040,7 @@ EXPORT_SYMBOL(skb_push);
  */
 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
 {
-       return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+       return skb_pull_inline(skb, len);
 }
 EXPORT_SYMBOL(skb_pull);
 
@@ -1064,7 +1096,7 @@ drop_pages:
                for (; i < nfrags; i++)
                        put_page(skb_shinfo(skb)->frags[i].page);
 
-               if (skb_shinfo(skb)->frag_list)
+               if (skb_has_frags(skb))
                        skb_drop_fraglist(skb);
                goto done;
        }
@@ -1112,6 +1144,7 @@ done:
 
        return 0;
 }
+EXPORT_SYMBOL(___pskb_trim);
 
 /**
  *     __pskb_pull_tail - advance tail of skb header
@@ -1158,7 +1191,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
        /* Optimization: no fragments, no reasons to preestimate
         * size of pulled pages. Superb.
         */
-       if (!skb_shinfo(skb)->frag_list)
+       if (!skb_has_frags(skb))
                goto pull_pages;
 
        /* Estimate size of pulled pages. */
@@ -1205,8 +1238,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
                                        insp = list;
                                }
                                if (!pskb_pull(list, eat)) {
-                                       if (clone)
-                                               kfree_skb(clone);
+                                       kfree_skb(clone);
                                        return NULL;
                                }
                                break;
@@ -1250,13 +1282,15 @@ pull_pages:
 
        return skb_tail_pointer(skb);
 }
+EXPORT_SYMBOL(__pskb_pull_tail);
 
 /* Copy some data bits from skb to kernel buffer. */
 
 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 {
-       int i, copy;
        int start = skb_headlen(skb);
+       struct sk_buff *frag_iter;
+       int i, copy;
 
        if (offset > (int)skb->len - len)
                goto fault;
@@ -1298,28 +1332,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
                start = end;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+       skb_walk_frags(skb, frag_iter) {
+               int end;
 
-               for (; list; list = list->next) {
-                       int end;
-
-                       WARN_ON(start > offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               if (copy > len)
-                                       copy = len;
-                               if (skb_copy_bits(list, offset - start,
-                                                 to, copy))
-                                       goto fault;
-                               if ((len -= copy) == 0)
-                                       return 0;
-                               offset += copy;
-                               to     += copy;
-                       }
-                       start = end;
+               WARN_ON(start > offset + len);
+
+               end = start + frag_iter->len;
+               if ((copy = end - offset) > 0) {
+                       if (copy > len)
+                               copy = len;
+                       if (skb_copy_bits(frag_iter, offset - start, to, copy))
+                               goto fault;
+                       if ((len -= copy) == 0)
+                               return 0;
+                       offset += copy;
+                       to     += copy;
                }
+               start = end;
        }
        if (!len)
                return 0;
@@ -1327,6 +1356,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 fault:
        return -EFAULT;
 }
+EXPORT_SYMBOL(skb_copy_bits);
 
 /*
  * Callback from splice_to_pipe(), if we need to release some pages
@@ -1334,34 +1364,82 @@ fault:
  */
 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
 {
-       struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private;
+       put_page(spd->pages[i]);
+}
 
-       kfree_skb(skb);
+static inline struct page *linear_to_page(struct page *page, unsigned int *len,
+                                         unsigned int *offset,
+                                         struct sk_buff *skb, struct sock *sk)
+{
+       struct page *p = sk->sk_sndmsg_page;
+       unsigned int off;
+
+       if (!p) {
+new_page:
+               p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
+               if (!p)
+                       return NULL;
+
+               off = sk->sk_sndmsg_off = 0;
+               /* hold one ref to this page until it's full */
+       } else {
+               unsigned int mlen;
+
+               off = sk->sk_sndmsg_off;
+               mlen = PAGE_SIZE - off;
+               if (mlen < 64 && mlen < *len) {
+                       put_page(p);
+                       goto new_page;
+               }
+
+               *len = min_t(unsigned int, *len, mlen);
+       }
+
+       memcpy(page_address(p) + off, page_address(page) + *offset, *len);
+       sk->sk_sndmsg_off += *len;
+       *offset = off;
+       get_page(p);
+
+       return p;
 }
 
 /*
  * Fill page/offset/length into spd, if it can hold more pages.
  */
-static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
-                               unsigned int len, unsigned int offset,
-                               struct sk_buff *skb)
+static inline int spd_fill_page(struct splice_pipe_desc *spd,
+                               struct pipe_inode_info *pipe, struct page *page,
+                               unsigned int *len, unsigned int offset,
+                               struct sk_buff *skb, int linear,
+                               struct sock *sk)
 {
-       if (unlikely(spd->nr_pages == PIPE_BUFFERS))
+       if (unlikely(spd->nr_pages == pipe->buffers))
                return 1;
 
+       if (linear) {
+               page = linear_to_page(page, len, &offset, skb, sk);
+               if (!page)
+                       return 1;
+       } else
+               get_page(page);
+
        spd->pages[spd->nr_pages] = page;
-       spd->partial[spd->nr_pages].len = len;
+       spd->partial[spd->nr_pages].len = *len;
        spd->partial[spd->nr_pages].offset = offset;
-       spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
        spd->nr_pages++;
+
        return 0;
 }
 
 static inline void __segment_seek(struct page **page, unsigned int *poff,
                                  unsigned int *plen, unsigned int off)
 {
+       unsigned long n;
+
        *poff += off;
-       *page += *poff / PAGE_SIZE;
+       n = *poff / PAGE_SIZE;
+       if (n)
+               *page = nth_page(*page, n);
+
        *poff = *poff % PAGE_SIZE;
        *plen -= off;
 }
@@ -1369,7 +1447,9 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
 static inline int __splice_segment(struct page *page, unsigned int poff,
                                   unsigned int plen, unsigned int *off,
                                   unsigned int *len, struct sk_buff *skb,
-                                  struct splice_pipe_desc *spd)
+                                  struct splice_pipe_desc *spd, int linear,
+                                  struct sock *sk,
+                                  struct pipe_inode_info *pipe)
 {
        if (!*len)
                return 1;
@@ -1392,7 +1472,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
                /* the linear region may spread across several pages  */
                flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
 
-               if (spd_fill_page(spd, page, flen, poff, skb))
+               if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
                        return 1;
 
                __segment_seek(&page, &poff, &plen, flen);
@@ -1407,9 +1487,9 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
  * Map linear and fragment data from the skb to spd. It reports failure if the
  * pipe is full or if we already spliced the requested length.
  */
-static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
-                     unsigned int *len,
-                     struct splice_pipe_desc *spd)
+static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
+                            unsigned int *offset, unsigned int *len,
+                            struct splice_pipe_desc *spd, struct sock *sk)
 {
        int seg;
 
@@ -1419,7 +1499,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
        if (__splice_segment(virt_to_page(skb->data),
                             (unsigned long) skb->data & (PAGE_SIZE - 1),
                             skb_headlen(skb),
-                            offset, len, skb, spd))
+                            offset, len, skb, spd, 1, sk, pipe))
                return 1;
 
        /*
@@ -1429,7 +1509,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
 
                if (__splice_segment(f->page, f->page_offset, f->size,
-                                    offset, len, skb, spd))
+                                    offset, len, skb, spd, 0, sk, pipe))
                        return 1;
        }
 
@@ -1442,12 +1522,12 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
  * the frag list, if such a thing exists. We'd probably need to recurse to
  * handle that cleanly.
  */
-int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
                    struct pipe_inode_info *pipe, unsigned int tlen,
                    unsigned int flags)
 {
-       struct partial_page partial[PIPE_BUFFERS];
-       struct page *pages[PIPE_BUFFERS];
+       struct partial_page partial[PIPE_DEF_BUFFERS];
+       struct page *pages[PIPE_DEF_BUFFERS];
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
@@ -1455,22 +1535,18 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
                .ops = &sock_pipe_buf_ops,
                .spd_release = sock_spd_release,
        };
-       struct sk_buff *skb;
+       struct sk_buff *frag_iter;
+       struct sock *sk = skb->sk;
+       int ret = 0;
 
-       /*
-        * I'd love to avoid the clone here, but tcp_read_sock()
-        * ignores reference counts and unconditonally kills the sk_buff
-        * on return from the actor.
-        */
-       skb = skb_clone(__skb, GFP_KERNEL);
-       if (unlikely(!skb))
+       if (splice_grow_spd(pipe, &spd))
                return -ENOMEM;
 
        /*
         * __skb_splice_bits() only fails if the output has no room left,
         * so no point in going over the frag_list for the error case.
         */
-       if (__skb_splice_bits(skb, &offset, &tlen, &spd))
+       if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
                goto done;
        else if (!tlen)
                goto done;
@@ -1478,26 +1554,15 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
        /*
         * now see if we have a frag_list to map
         */
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
-               for (; list && tlen; list = list->next) {
-                       if (__skb_splice_bits(list, &offset, &tlen, &spd))
-                               break;
-               }
+       skb_walk_frags(skb, frag_iter) {
+               if (!tlen)
+                       break;
+               if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
+                       break;
        }
 
 done:
-       /*
-        * drop our reference to the clone, the pipe consumption will
-        * drop the rest.
-        */
-       kfree_skb(skb);
-
        if (spd.nr_pages) {
-               int ret;
-               struct sock *sk = __skb->sk;
-
                /*
                 * Drop the socket lock, otherwise we have reverse
                 * locking dependencies between sk_lock and i_mutex
@@ -1510,10 +1575,10 @@ done:
                release_sock(sk);
                ret = splice_to_pipe(pipe, &spd);
                lock_sock(sk);
-               return ret;
        }
 
-       return 0;
+       splice_shrink_spd(pipe, &spd);
+       return ret;
 }
 
 /**
@@ -1530,8 +1595,9 @@ done:
 
 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
 {
-       int i, copy;
        int start = skb_headlen(skb);
+       struct sk_buff *frag_iter;
+       int i, copy;
 
        if (offset > (int)skb->len - len)
                goto fault;
@@ -1572,28 +1638,24 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
                start = end;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+       skb_walk_frags(skb, frag_iter) {
+               int end;
 
-               for (; list; list = list->next) {
-                       int end;
-
-                       WARN_ON(start > offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               if (copy > len)
-                                       copy = len;
-                               if (skb_store_bits(list, offset - start,
-                                                  from, copy))
-                                       goto fault;
-                               if ((len -= copy) == 0)
-                                       return 0;
-                               offset += copy;
-                               from += copy;
-                       }
-                       start = end;
+               WARN_ON(start > offset + len);
+
+               end = start + frag_iter->len;
+               if ((copy = end - offset) > 0) {
+                       if (copy > len)
+                               copy = len;
+                       if (skb_store_bits(frag_iter, offset - start,
+                                          from, copy))
+                               goto fault;
+                       if ((len -= copy) == 0)
+                               return 0;
+                       offset += copy;
+                       from += copy;
                }
+               start = end;
        }
        if (!len)
                return 0;
@@ -1601,7 +1663,6 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
 fault:
        return -EFAULT;
 }
-
 EXPORT_SYMBOL(skb_store_bits);
 
 /* Checksum skb data. */
@@ -1611,6 +1672,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
 {
        int start = skb_headlen(skb);
        int i, copy = start - offset;
+       struct sk_buff *frag_iter;
        int pos = 0;
 
        /* Checksum header. */
@@ -1650,34 +1712,31 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
                start = end;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+       skb_walk_frags(skb, frag_iter) {
+               int end;
 
-               for (; list; list = list->next) {
-                       int end;
-
-                       WARN_ON(start > offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               __wsum csum2;
-                               if (copy > len)
-                                       copy = len;
-                               csum2 = skb_checksum(list, offset - start,
-                                                    copy, 0);
-                               csum = csum_block_add(csum, csum2, pos);
-                               if ((len -= copy) == 0)
-                                       return csum;
-                               offset += copy;
-                               pos    += copy;
-                       }
-                       start = end;
+               WARN_ON(start > offset + len);
+
+               end = start + frag_iter->len;
+               if ((copy = end - offset) > 0) {
+                       __wsum csum2;
+                       if (copy > len)
+                               copy = len;
+                       csum2 = skb_checksum(frag_iter, offset - start,
+                                            copy, 0);
+                       csum = csum_block_add(csum, csum2, pos);
+                       if ((len -= copy) == 0)
+                               return csum;
+                       offset += copy;
+                       pos    += copy;
                }
+               start = end;
        }
        BUG_ON(len);
 
        return csum;
 }
+EXPORT_SYMBOL(skb_checksum);
 
 /* Both of above in one bottle. */
 
@@ -1686,6 +1745,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
 {
        int start = skb_headlen(skb);
        int i, copy = start - offset;
+       struct sk_buff *frag_iter;
        int pos = 0;
 
        /* Copy header. */
@@ -1730,35 +1790,32 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
                start = end;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+       skb_walk_frags(skb, frag_iter) {
+               __wsum csum2;
+               int end;
 
-               for (; list; list = list->next) {
-                       __wsum csum2;
-                       int end;
-
-                       WARN_ON(start > offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               if (copy > len)
-                                       copy = len;
-                               csum2 = skb_copy_and_csum_bits(list,
-                                                              offset - start,
-                                                              to, copy, 0);
-                               csum = csum_block_add(csum, csum2, pos);
-                               if ((len -= copy) == 0)
-                                       return csum;
-                               offset += copy;
-                               to     += copy;
-                               pos    += copy;
-                       }
-                       start = end;
+               WARN_ON(start > offset + len);
+
+               end = start + frag_iter->len;
+               if ((copy = end - offset) > 0) {
+                       if (copy > len)
+                               copy = len;
+                       csum2 = skb_copy_and_csum_bits(frag_iter,
+                                                      offset - start,
+                                                      to, copy, 0);
+                       csum = csum_block_add(csum, csum2, pos);
+                       if ((len -= copy) == 0)
+                               return csum;
+                       offset += copy;
+                       to     += copy;
+                       pos    += copy;
                }
+               start = end;
        }
        BUG_ON(len);
        return csum;
 }
+EXPORT_SYMBOL(skb_copy_and_csum_bits);
 
 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
 {
@@ -1785,6 +1842,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
                *((__sum16 *)(to + csstuff)) = csum_fold(csum);
        }
 }
+EXPORT_SYMBOL(skb_copy_and_csum_dev);
 
 /**
  *     skb_dequeue - remove from the head of the queue
@@ -1805,6 +1863,7 @@ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
        spin_unlock_irqrestore(&list->lock, flags);
        return result;
 }
+EXPORT_SYMBOL(skb_dequeue);
 
 /**
  *     skb_dequeue_tail - remove from the tail of the queue
@@ -1824,6 +1883,7 @@ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
        spin_unlock_irqrestore(&list->lock, flags);
        return result;
 }
+EXPORT_SYMBOL(skb_dequeue_tail);
 
 /**
  *     skb_queue_purge - empty a list
@@ -1839,6 +1899,7 @@ void skb_queue_purge(struct sk_buff_head *list)
        while ((skb = skb_dequeue(list)) != NULL)
                kfree_skb(skb);
 }
+EXPORT_SYMBOL(skb_queue_purge);
 
 /**
  *     skb_queue_head - queue a buffer at the list head
@@ -1859,6 +1920,7 @@ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
        __skb_queue_head(list, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_queue_head);
 
 /**
  *     skb_queue_tail - queue a buffer at the list tail
@@ -1879,6 +1941,7 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
        __skb_queue_tail(list, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_queue_tail);
 
 /**
  *     skb_unlink      -       remove a buffer from a list
@@ -1898,6 +1961,7 @@ void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
        __skb_unlink(skb, list);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_unlink);
 
 /**
  *     skb_append      -       append a buffer
@@ -1917,7 +1981,7 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
        __skb_queue_after(list, old, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
-
+EXPORT_SYMBOL(skb_append);
 
 /**
  *     skb_insert      -       insert a buffer
@@ -1939,6 +2003,7 @@ void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
        __skb_insert(newsk, old->prev, old, list);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+EXPORT_SYMBOL(skb_insert);
 
 static inline void skb_split_inside_header(struct sk_buff *skb,
                                           struct sk_buff* skb1,
@@ -2017,6 +2082,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
        else            /* Second chunk has no header, nothing to copy. */
                skb_split_no_header(skb, skb1, len, pos);
 }
+EXPORT_SYMBOL(skb_split);
 
 /* Shifting from/to a cloned skb is a no-go.
  *
@@ -2179,6 +2245,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
        st->frag_idx = st->stepped_offset = 0;
        st->frag_data = NULL;
 }
+EXPORT_SYMBOL(skb_prepare_seq_read);
 
 /**
  * skb_seq_read - Sequentially read skb data
@@ -2215,10 +2282,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
                return 0;
 
 next_skb:
-       block_limit = skb_headlen(st->cur_skb);
+       block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
 
-       if (abs_offset < block_limit) {
-               *data = st->cur_skb->data + abs_offset;
+       if (abs_offset < block_limit && !st->frag_data) {
+               *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
                return block_limit - abs_offset;
        }
 
@@ -2253,18 +2320,19 @@ next_skb:
                st->frag_data = NULL;
        }
 
-       if (st->cur_skb->next) {
-               st->cur_skb = st->cur_skb->next;
+       if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
+               st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
                st->frag_idx = 0;
                goto next_skb;
-       } else if (st->root_skb == st->cur_skb &&
-                  skb_shinfo(st->root_skb)->frag_list) {
-               st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
+       } else if (st->cur_skb->next) {
+               st->cur_skb = st->cur_skb->next;
+               st->frag_idx = 0;
                goto next_skb;
        }
 
        return 0;
 }
+EXPORT_SYMBOL(skb_seq_read);
 
 /**
  * skb_abort_seq_read - Abort a sequential read of skb data
@@ -2278,6 +2346,7 @@ void skb_abort_seq_read(struct skb_seq_state *st)
        if (st->frag_data)
                kunmap_skb_frag(st->frag_data);
 }
+EXPORT_SYMBOL(skb_abort_seq_read);
 
 #define TS_SKB_CB(state)       ((struct skb_seq_state *) &((state)->cb))
 
@@ -2320,6 +2389,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
        ret = textsearch_find(config, state);
        return (ret <= to - from ? ret : UINT_MAX);
 }
+EXPORT_SYMBOL(skb_find_text);
 
 /**
  * skb_append_datato_frags: - append the user data to a skb
@@ -2392,6 +2462,7 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 
        return 0;
 }
+EXPORT_SYMBOL(skb_append_datato_frags);
 
 /**
  *     skb_pull_rcsum - pull skb and update receive checksum
@@ -2506,7 +2577,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                                          skb_network_header_len(skb));
                skb_copy_from_linear_data(skb, nskb->data, doffset);
 
-               if (pos >= offset + len)
+               if (fskb != skb_shinfo(skb)->frag_list)
                        continue;
 
                if (!sg) {
@@ -2560,7 +2631,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                        } else
                                skb_get(fskb2);
 
-                       BUG_ON(skb_shinfo(nskb)->frag_list);
+                       SKB_FRAG_ASSERT(nskb);
                        skb_shinfo(nskb)->frag_list = fskb2;
                }
 
@@ -2579,24 +2650,58 @@ err:
        }
        return ERR_PTR(err);
 }
-
 EXPORT_SYMBOL_GPL(skb_segment);
 
 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 {
        struct sk_buff *p = *head;
        struct sk_buff *nskb;
+       struct skb_shared_info *skbinfo = skb_shinfo(skb);
+       struct skb_shared_info *pinfo = skb_shinfo(p);
        unsigned int headroom;
-       unsigned int hlen = p->data - skb_mac_header(p);
+       unsigned int len = skb_gro_len(skb);
+       unsigned int offset = skb_gro_offset(skb);
+       unsigned int headlen = skb_headlen(skb);
 
-       if (hlen + p->len + skb->len >= 65536)
+       if (p->len + len >= 65536)
                return -E2BIG;
 
-       if (skb_shinfo(p)->frag_list)
+       if (pinfo->frag_list)
                goto merge;
+       else if (headlen <= offset) {
+               skb_frag_t *frag;
+               skb_frag_t *frag2;
+               int i = skbinfo->nr_frags;
+               int nr_frags = pinfo->nr_frags + i;
+
+               offset -= headlen;
+
+               if (nr_frags > MAX_SKB_FRAGS)
+                       return -E2BIG;
+
+               pinfo->nr_frags = nr_frags;
+               skbinfo->nr_frags = 0;
+
+               frag = pinfo->frags + nr_frags;
+               frag2 = skbinfo->frags + i;
+               do {
+                       *--frag = *--frag2;
+               } while (--i);
+
+               frag->page_offset += offset;
+               frag->size -= offset;
+
+               skb->truesize -= skb->data_len;
+               skb->len -= skb->data_len;
+               skb->data_len = 0;
+
+               NAPI_GRO_CB(skb)->free = 1;
+               goto done;
+       } else if (skb_gro_len(p) != pinfo->gso_size)
+               return -E2BIG;
 
        headroom = skb_headroom(p);
-       nskb = netdev_alloc_skb(p->dev, headroom);
+       nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
        if (unlikely(!nskb))
                return -ENOMEM;
 
@@ -2604,16 +2709,20 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        nskb->mac_len = p->mac_len;
 
        skb_reserve(nskb, headroom);
+       __skb_put(nskb, skb_gro_offset(p));
 
-       skb_set_mac_header(nskb, -hlen);
+       skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
        skb_set_network_header(nskb, skb_network_offset(p));
        skb_set_transport_header(nskb, skb_transport_offset(p));
 
-       memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen);
+       __skb_pull(p, skb_gro_offset(p));
+       memcpy(skb_mac_header(nskb), skb_mac_header(p),
+              p->data - skb_mac_header(p));
 
        *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
        skb_shinfo(nskb)->frag_list = p;
-       skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size;
+       skb_shinfo(nskb)->gso_size = pinfo->gso_size;
+       pinfo->gso_size = 0;
        skb_header_release(p);
        nskb->prev = p;
 
@@ -2628,14 +2737,23 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        p = nskb;
 
 merge:
-       NAPI_GRO_CB(p)->count++;
+       if (offset > headlen) {
+               skbinfo->frags[0].page_offset += offset - headlen;
+               skbinfo->frags[0].size -= offset - headlen;
+               offset = headlen;
+       }
+
+       __skb_pull(skb, offset);
+
        p->prev->next = skb;
        p->prev = skb;
        skb_header_release(skb);
 
-       p->data_len += skb->len;
-       p->truesize += skb->len;
-       p->len += skb->len;
+done:
+       NAPI_GRO_CB(p)->count++;
+       p->data_len += len;
+       p->truesize += len;
+       p->len += len;
 
        NAPI_GRO_CB(skb)->same_flow = 1;
        return 0;
@@ -2672,6 +2790,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
 {
        int start = skb_headlen(skb);
        int i, copy = start - offset;
+       struct sk_buff *frag_iter;
        int elt = 0;
 
        if (copy > 0) {
@@ -2705,26 +2824,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
                start = end;
        }
 
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
-               for (; list; list = list->next) {
-                       int end;
+       skb_walk_frags(skb, frag_iter) {
+               int end;
 
-                       WARN_ON(start > offset + len);
+               WARN_ON(start > offset + len);
 
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               if (copy > len)
-                                       copy = len;
-                               elt += __skb_to_sgvec(list, sg+elt, offset - start,
-                                                     copy);
-                               if ((len -= copy) == 0)
-                                       return elt;
-                               offset += copy;
-                       }
-                       start = end;
+               end = start + frag_iter->len;
+               if ((copy = end - offset) > 0) {
+                       if (copy > len)
+                               copy = len;
+                       elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
+                                             copy);
+                       if ((len -= copy) == 0)
+                               return elt;
+                       offset += copy;
                }
+               start = end;
        }
        BUG_ON(len);
        return elt;
@@ -2738,6 +2853,7 @@ int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int le
 
        return nsg;
 }
+EXPORT_SYMBOL_GPL(skb_to_sgvec);
 
 /**
  *     skb_cow_data - Check that a socket buffer's data buffers are writable
@@ -2771,7 +2887,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
                return -ENOMEM;
 
        /* Easy case. Most of packets will go this way. */
-       if (!skb_shinfo(skb)->frag_list) {
+       if (!skb_has_frags(skb)) {
                /* A little of trouble, not enough of space for trailer.
                 * This should not happen, when stack is tuned to generate
                 * good frames. OK, on miss we reallocate and reserve even more
@@ -2806,7 +2922,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
 
                if (skb1->next == NULL && tailbits) {
                        if (skb_shinfo(skb1)->nr_frags ||
-                           skb_shinfo(skb1)->frag_list ||
+                           skb_has_frags(skb1) ||
                            skb_tailroom(skb1) < tailbits)
                                ntail = tailbits + 128;
                }
@@ -2815,7 +2931,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
                    skb_cloned(skb1) ||
                    ntail ||
                    skb_shinfo(skb1)->nr_frags ||
-                   skb_shinfo(skb1)->frag_list) {
+                   skb_has_frags(skb1)) {
                        struct sk_buff *skb2;
 
                        /* Fuck, we are miserable poor guys... */
@@ -2847,6 +2963,45 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
 
        return elt;
 }
+EXPORT_SYMBOL_GPL(skb_cow_data);
+
+void skb_tstamp_tx(struct sk_buff *orig_skb,
+               struct skb_shared_hwtstamps *hwtstamps)
+{
+       struct sock *sk = orig_skb->sk;
+       struct sock_exterr_skb *serr;
+       struct sk_buff *skb;
+       int err;
+
+       if (!sk)
+               return;
+
+       skb = skb_clone(orig_skb, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       if (hwtstamps) {
+               *skb_hwtstamps(skb) =
+                       *hwtstamps;
+       } else {
+               /*
+                * no hardware time stamps available,
+                * so keep the skb_shared_tx and only
+                * store software time stamp
+                */
+               skb->tstamp = ktime_get_real();
+       }
+
+       serr = SKB_EXT_ERR(skb);
+       memset(serr, 0, sizeof(*serr));
+       serr->ee.ee_errno = ENOMSG;
+       serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+       err = sock_queue_err_skb(sk, skb);
+       if (err)
+               kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(skb_tstamp_tx);
+
 
 /**
  * skb_partial_csum_set - set up and verify partial csum values for packet
@@ -2862,12 +3017,12 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
  */
 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
 {
-       if (unlikely(start > skb->len - 2) ||
-           unlikely((int)start + off > skb->len - 2)) {
+       if (unlikely(start > skb_headlen(skb)) ||
+           unlikely((int)start + off > skb_headlen(skb) - 2)) {
                if (net_ratelimit())
                        printk(KERN_WARNING
                               "bad partial csum: csum=%u/%u len=%u\n",
-                              start, off, skb->len);
+                              start, off, skb_headlen(skb));
                return false;
        }
        skb->ip_summed = CHECKSUM_PARTIAL;
@@ -2875,6 +3030,7 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
        skb->csum_offset = off;
        return true;
 }
+EXPORT_SYMBOL_GPL(skb_partial_csum_set);
 
 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
 {
@@ -2882,42 +3038,4 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb)
                pr_warning("%s: received packets cannot be forwarded"
                           " while LRO is enabled\n", skb->dev->name);
 }
-
-EXPORT_SYMBOL(___pskb_trim);
-EXPORT_SYMBOL(__kfree_skb);
-EXPORT_SYMBOL(kfree_skb);
-EXPORT_SYMBOL(__pskb_pull_tail);
-EXPORT_SYMBOL(__alloc_skb);
-EXPORT_SYMBOL(__netdev_alloc_skb);
-EXPORT_SYMBOL(pskb_copy);
-EXPORT_SYMBOL(pskb_expand_head);
-EXPORT_SYMBOL(skb_checksum);
-EXPORT_SYMBOL(skb_clone);
-EXPORT_SYMBOL(skb_copy);
-EXPORT_SYMBOL(skb_copy_and_csum_bits);
-EXPORT_SYMBOL(skb_copy_and_csum_dev);
-EXPORT_SYMBOL(skb_copy_bits);
-EXPORT_SYMBOL(skb_copy_expand);
-EXPORT_SYMBOL(skb_over_panic);
-EXPORT_SYMBOL(skb_pad);
-EXPORT_SYMBOL(skb_realloc_headroom);
-EXPORT_SYMBOL(skb_under_panic);
-EXPORT_SYMBOL(skb_dequeue);
-EXPORT_SYMBOL(skb_dequeue_tail);
-EXPORT_SYMBOL(skb_insert);
-EXPORT_SYMBOL(skb_queue_purge);
-EXPORT_SYMBOL(skb_queue_head);
-EXPORT_SYMBOL(skb_queue_tail);
-EXPORT_SYMBOL(skb_unlink);
-EXPORT_SYMBOL(skb_append);
-EXPORT_SYMBOL(skb_split);
-EXPORT_SYMBOL(skb_prepare_seq_read);
-EXPORT_SYMBOL(skb_seq_read);
-EXPORT_SYMBOL(skb_abort_seq_read);
-EXPORT_SYMBOL(skb_find_text);
-EXPORT_SYMBOL(skb_append_datato_frags);
 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
-
-EXPORT_SYMBOL_GPL(skb_to_sgvec);
-EXPORT_SYMBOL_GPL(skb_cow_data);
-EXPORT_SYMBOL_GPL(skb_partial_csum_set);