cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits: net
[safe/jmp/linux-2.6] / net / core / skbuff.c
index 2c218a0..b8d0abb 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     Routines having to do with the 'struct sk_buff' memory handlers.
  *
- *     Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
+ *     Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
  *                     Florian La Roche <rzsfl@rz.uni-sb.de>
  *
  *     Fixes:
@@ -149,7 +149,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 
 void skb_truesize_bug(struct sk_buff *skb)
 {
-       printk(KERN_ERR "SKB BUG: Invalid truesize (%u) "
+       WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
               "len=%u, sizeof(sk_buff)=%Zd\n",
               skb->truesize, skb->len, sizeof(struct sk_buff));
 }
@@ -263,6 +263,26 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
        return skb;
 }
 
+struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
+{
+       int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+       struct page *page;
+
+       page = alloc_pages_node(node, gfp_mask, 0);
+       return page;
+}
+EXPORT_SYMBOL(__netdev_alloc_page);
+
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+               int size)
+{
+       skb_fill_page_desc(skb, i, page, off, size);
+       skb->len += size;
+       skb->data_len += size;
+       skb->truesize += size;
+}
+EXPORT_SYMBOL(skb_add_rx_frag);
+
 /**
  *     dev_alloc_skb - allocate an skbuff for receiving
  *     @length: length to allocate
@@ -429,6 +449,18 @@ void kfree_skb(struct sk_buff *skb)
        __kfree_skb(skb);
 }
 
+/**
+ *     skb_recycle_check - check if skb can be reused for receive
+ *     @skb: buffer
+ *     @skb_size: minimum receive buffer size
+ *
+ *     Checks that the skb passed in is not shared or cloned, and
+ *     that it is linear and its head portion at least as large as
+ *     skb_size so that it can be recycled as a receive buffer.
+ *     If these conditions are met, this function does any necessary
+ *     reference count dropping and cleans up the skbuff as if it
+ *     just came from __alloc_skb().
+ */
 int skb_recycle_check(struct sk_buff *skb, int skb_size)
 {
        struct skb_shared_info *shinfo;
@@ -454,8 +486,8 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
        shinfo->frag_list = NULL;
 
        memset(skb, 0, offsetof(struct sk_buff, tail));
-       skb_reset_tail_pointer(skb);
        skb->data = skb->head + NET_SKB_PAD;
+       skb_reset_tail_pointer(skb);
 
        return 1;
 }
@@ -469,7 +501,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->network_header     = old->network_header;
        new->mac_header         = old->mac_header;
        new->dst                = dst_clone(old->dst);
-#ifdef CONFIG_INET
+#ifdef CONFIG_XFRM
        new->sp                 = secpath_get(old->sp);
 #endif
        memcpy(new->cb, old->cb, sizeof(old->cb));
@@ -524,6 +556,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
        C(truesize);
 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
        C(do_not_encrypt);
+       C(requeue);
 #endif
        atomic_set(&n->users, 1);
 
@@ -738,6 +771,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 #endif
        long off;
 
+       BUG_ON(nhead < 0);
+
        if (skb_shared(skb))
                BUG();
 
@@ -1983,6 +2018,148 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
                skb_split_no_header(skb, skb1, len, pos);
 }
 
+/* Shifting from/to a cloned skb is a no-go.
+ *
+ * Caller cannot keep skb_shinfo related pointers past calling here!
+ */
+static int skb_prepare_for_shift(struct sk_buff *skb)
+{
+       return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
+/**
+ * skb_shift - Shifts paged data partially from skb to another
+ * @tgt: buffer into which tail data gets added
+ * @skb: buffer from which the paged data comes from
+ * @shiftlen: shift up to this many bytes
+ *
+ * Attempts to shift up to shiftlen worth of bytes, which may be less than
+ * the length of the skb, from tgt to skb. Returns number bytes shifted.
+ * It's up to caller to free skb if everything was shifted.
+ *
+ * If @tgt runs out of frags, the whole operation is aborted.
+ *
+ * Skb cannot include anything else but paged data while tgt is allowed
+ * to have non-paged data as well.
+ *
+ * TODO: full sized shift could be optimized but that would need
+ * specialized skb free'er to handle frags without up-to-date nr_frags.
+ */
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
+{
+       int from, to, merge, todo;
+       struct skb_frag_struct *fragfrom, *fragto;
+
+       BUG_ON(shiftlen > skb->len);
+       BUG_ON(skb_headlen(skb));       /* Would corrupt stream */
+
+       todo = shiftlen;
+       from = 0;
+       to = skb_shinfo(tgt)->nr_frags;
+       fragfrom = &skb_shinfo(skb)->frags[from];
+
+       /* Actual merge is delayed until the point when we know we can
+        * commit all, so that we don't have to undo partial changes
+        */
+       if (!to ||
+           !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
+               merge = -1;
+       } else {
+               merge = to - 1;
+
+               todo -= fragfrom->size;
+               if (todo < 0) {
+                       if (skb_prepare_for_shift(skb) ||
+                           skb_prepare_for_shift(tgt))
+                               return 0;
+
+                       /* All previous frag pointers might be stale! */
+                       fragfrom = &skb_shinfo(skb)->frags[from];
+                       fragto = &skb_shinfo(tgt)->frags[merge];
+
+                       fragto->size += shiftlen;
+                       fragfrom->size -= shiftlen;
+                       fragfrom->page_offset += shiftlen;
+
+                       goto onlymerged;
+               }
+
+               from++;
+       }
+
+       /* Skip full, not-fitting skb to avoid expensive operations */
+       if ((shiftlen == skb->len) &&
+           (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
+               return 0;
+
+       if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
+               return 0;
+
+       while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
+               if (to == MAX_SKB_FRAGS)
+                       return 0;
+
+               fragfrom = &skb_shinfo(skb)->frags[from];
+               fragto = &skb_shinfo(tgt)->frags[to];
+
+               if (todo >= fragfrom->size) {
+                       *fragto = *fragfrom;
+                       todo -= fragfrom->size;
+                       from++;
+                       to++;
+
+               } else {
+                       get_page(fragfrom->page);
+                       fragto->page = fragfrom->page;
+                       fragto->page_offset = fragfrom->page_offset;
+                       fragto->size = todo;
+
+                       fragfrom->page_offset += todo;
+                       fragfrom->size -= todo;
+                       todo = 0;
+
+                       to++;
+                       break;
+               }
+       }
+
+       /* Ready to "commit" this state change to tgt */
+       skb_shinfo(tgt)->nr_frags = to;
+
+       if (merge >= 0) {
+               fragfrom = &skb_shinfo(skb)->frags[0];
+               fragto = &skb_shinfo(tgt)->frags[merge];
+
+               fragto->size += fragfrom->size;
+               put_page(fragfrom->page);
+       }
+
+       /* Reposition in the original skb */
+       to = 0;
+       while (from < skb_shinfo(skb)->nr_frags)
+               skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
+       skb_shinfo(skb)->nr_frags = to;
+
+       BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
+
+onlymerged:
+       /* Most likely the tgt won't ever need its checksum anymore, skb on
+        * the other hand might need it if it needs to be resent
+        */
+       tgt->ip_summed = CHECKSUM_PARTIAL;
+       skb->ip_summed = CHECKSUM_PARTIAL;
+
+       /* Yak, is it really working this way? Some helper please? */
+       skb->len -= shiftlen;
+       skb->data_len -= shiftlen;
+       skb->truesize -= shiftlen;
+       tgt->len += shiftlen;
+       tgt->data_len += shiftlen;
+       tgt->truesize += shiftlen;
+
+       return shiftlen;
+}
+
 /**
  * skb_prepare_seq_read - Prepare a sequential read of skb data
  * @skb: the buffer to read
@@ -2251,6 +2428,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
 {
        struct sk_buff *segs = NULL;
        struct sk_buff *tail = NULL;
+       struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
        unsigned int mss = skb_shinfo(skb)->gso_size;
        unsigned int doffset = skb->data - skb_mac_header(skb);
        unsigned int offset = doffset;
@@ -2270,7 +2448,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                struct sk_buff *nskb;
                skb_frag_t *frag;
                int hsize;
-               int k;
                int size;
 
                len = skb->len - offset;
@@ -2283,9 +2460,36 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                if (hsize > len || !sg)
                        hsize = len;
 
-               nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
-               if (unlikely(!nskb))
-                       goto err;
+               if (!hsize && i >= nfrags) {
+                       BUG_ON(fskb->len != len);
+
+                       pos += len;
+                       nskb = skb_clone(fskb, GFP_ATOMIC);
+                       fskb = fskb->next;
+
+                       if (unlikely(!nskb))
+                               goto err;
+
+                       hsize = skb_end_pointer(nskb) - nskb->head;
+                       if (skb_cow_head(nskb, doffset + headroom)) {
+                               kfree_skb(nskb);
+                               goto err;
+                       }
+
+                       nskb->truesize += skb_end_pointer(nskb) - nskb->head -
+                                         hsize;
+                       skb_release_head_state(nskb);
+                       __skb_push(nskb, doffset);
+               } else {
+                       nskb = alloc_skb(hsize + doffset + headroom,
+                                        GFP_ATOMIC);
+
+                       if (unlikely(!nskb))
+                               goto err;
+
+                       skb_reserve(nskb, headroom);
+                       __skb_put(nskb, doffset);
+               }
 
                if (segs)
                        tail->next = nskb;
@@ -2296,13 +2500,15 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                __copy_skb_header(nskb, skb);
                nskb->mac_len = skb->mac_len;
 
-               skb_reserve(nskb, headroom);
                skb_reset_mac_header(nskb);
                skb_set_network_header(nskb, skb->mac_len);
                nskb->transport_header = (nskb->network_header +
                                          skb_network_header_len(skb));
-               skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
-                                         doffset);
+               skb_copy_from_linear_data(skb, nskb->data, doffset);
+
+               if (pos >= offset + len)
+                       continue;
+
                if (!sg) {
                        nskb->ip_summed = CHECKSUM_NONE;
                        nskb->csum = skb_copy_and_csum_bits(skb, offset,
@@ -2312,14 +2518,11 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                }
 
                frag = skb_shinfo(nskb)->frags;
-               k = 0;
 
                skb_copy_from_linear_data_offset(skb, offset,
                                                 skb_put(nskb, hsize), hsize);
 
-               while (pos < offset + len) {
-                       BUG_ON(i >= nfrags);
-
+               while (pos < offset + len && i < nfrags) {
                        *frag = skb_shinfo(skb)->frags[i];
                        get_page(frag->page);
                        size = frag->size;
@@ -2329,20 +2532,39 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                                frag->size -= offset - pos;
                        }
 
-                       k++;
+                       skb_shinfo(nskb)->nr_frags++;
 
                        if (pos + size <= offset + len) {
                                i++;
                                pos += size;
                        } else {
                                frag->size -= pos + size - (offset + len);
-                               break;
+                               goto skip_fraglist;
                        }
 
                        frag++;
                }
 
-               skb_shinfo(nskb)->nr_frags = k;
+               if (pos < offset + len) {
+                       struct sk_buff *fskb2 = fskb;
+
+                       BUG_ON(pos + fskb->len != offset + len);
+
+                       pos += fskb->len;
+                       fskb = fskb->next;
+
+                       if (fskb2->next) {
+                               fskb2 = skb_clone(fskb2, GFP_ATOMIC);
+                               if (!fskb2)
+                                       goto err;
+                       } else
+                               skb_get(fskb2);
+
+                       BUG_ON(skb_shinfo(nskb)->frag_list);
+                       skb_shinfo(nskb)->frag_list = fskb2;
+               }
+
+skip_fraglist:
                nskb->data_len = len - hsize;
                nskb->len += nskb->data_len;
                nskb->truesize += nskb->data_len;
@@ -2360,6 +2582,65 @@ err:
 
 EXPORT_SYMBOL_GPL(skb_segment);
 
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+       struct sk_buff *p = *head;
+       struct sk_buff *nskb;
+       unsigned int headroom;
+       unsigned int hlen = p->data - skb_mac_header(p);
+
+       if (hlen + p->len + skb->len >= 65536)
+               return -E2BIG;
+
+       if (skb_shinfo(p)->frag_list)
+               goto merge;
+
+       headroom = skb_headroom(p);
+       nskb = netdev_alloc_skb(p->dev, headroom);
+       if (unlikely(!nskb))
+               return -ENOMEM;
+
+       __copy_skb_header(nskb, p);
+       nskb->mac_len = p->mac_len;
+
+       skb_reserve(nskb, headroom);
+
+       skb_set_mac_header(nskb, -hlen);
+       skb_set_network_header(nskb, skb_network_offset(p));
+       skb_set_transport_header(nskb, skb_transport_offset(p));
+
+       memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen);
+
+       *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
+       skb_shinfo(nskb)->frag_list = p;
+       skb_header_release(p);
+       nskb->prev = p;
+
+       nskb->data_len += p->len;
+       nskb->truesize += p->len;
+       nskb->len += p->len;
+
+       *head = nskb;
+       nskb->next = p->next;
+       p->next = NULL;
+
+       p = nskb;
+
+merge:
+       NAPI_GRO_CB(p)->count++;
+       p->prev->next = skb;
+       p->prev = skb;
+       skb_header_release(skb);
+
+       p->data_len += skb->len;
+       p->truesize += skb->len;
+       p->len += skb->len;
+
+       NAPI_GRO_CB(skb)->same_flow = 1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(skb_gro_receive);
+
 void __init skb_init(void)
 {
        skbuff_head_cache = kmem_cache_create("skbuff_head_cache",