X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fskbuff.h;h=9dcf956ad18ab20ce4ebb5a7f2e33636be4a6551;hb=10add806c38c022d18af48f3ec28c91b4eaf7bb3;hp=17b3f70fbbc38454862a0bf2a4212fb894b26635;hpb=27ab2568649d5ba6c5a20212079b7c4f6da4ca0d;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 17b3f70..9dcf956 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -108,9 +108,6 @@ struct nf_bridge_info { atomic_t use; struct net_device *physindev; struct net_device *physoutdev; -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) - struct net_device *netoutdev; -#endif unsigned int mask; unsigned long data[32 / sizeof(unsigned long)]; }; @@ -149,8 +146,14 @@ struct skb_shared_info { unsigned short gso_segs; unsigned short gso_type; __be32 ip6_frag_id; +#ifdef CONFIG_HAS_DMA + unsigned int num_dma_maps; +#endif struct sk_buff *frag_list; skb_frag_t frags[MAX_SKB_FRAGS]; +#ifdef CONFIG_HAS_DMA + dma_addr_t dma_maps[MAX_SKB_FRAGS + 1]; +#endif }; /* We divide dataref into two halves. The higher 16 bits hold references @@ -235,6 +238,8 @@ typedef unsigned char *sk_buff_data_t; * @mark: Generic packet mark * @nfct: Associated connection, if any * @ipvs_property: skbuff is owned by ipvs + * @peeked: this packet has been seen already, so stats have been + * done for it, don't do them again * @nf_trace: netfilter packet trace flag * @nfctinfo: Relationship of this skb to the connection * @nfct_reasm: netfilter conntrack re-assembly pointer @@ -243,9 +248,15 @@ typedef unsigned char *sk_buff_data_t; * @queue_mapping: Queue mapping for multiqueue devices * @tc_index: Traffic control index * @tc_verd: traffic control verdict + * @ndisc_nodetype: router type (from link layer) + * @do_not_encrypt: set to prevent encryption of this frame + * @requeue: set to indicate that the wireless core should attempt + * a software retry on this frame if we failed to + * receive an ACK for it * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking + * @vlan_tci: vlan tag control information */ struct sk_buff { @@ -257,9 +268,13 @@ struct sk_buff { ktime_t tstamp; struct net_device *dev; - struct dst_entry *dst; + union { + struct dst_entry *dst; + struct rtable *rtable; + }; +#ifdef CONFIG_XFRM struct sec_path *sp; - +#endif /* * This is the control buffer. It is free to use for every * layer. Please put your private variables there. If you @@ -288,6 +303,7 @@ struct sk_buff { __u8 pkt_type:3, fclone:2, ipvs_property:1, + peeked:1, nf_trace:1; __be16 protocol; @@ -301,16 +317,21 @@ struct sk_buff { #endif int iif; -#ifdef CONFIG_NETDEVICES_MULTIQUEUE __u16 queue_mapping; -#endif #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ #ifdef CONFIG_NET_CLS_ACT __u16 tc_verd; /* traffic control verdict */ #endif #endif - /* 2 byte hole */ +#ifdef CONFIG_IPV6_NDISC_NODETYPE + __u8 ndisc_nodetype:2; +#endif +#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) + __u8 do_not_encrypt:1; + __u8 requeue:1; +#endif + /* 0/13/14 bit hole */ #ifdef CONFIG_NET_DMA dma_cookie_t dma_cookie; @@ -321,6 +342,8 @@ struct sk_buff { __u32 mark; + __u16 vlan_tci; + sk_buff_data_t transport_header; sk_buff_data_t network_header; sk_buff_data_t mac_header; @@ -341,6 +364,14 @@ struct sk_buff { #include +#ifdef CONFIG_HAS_DMA +#include +extern int skb_dma_map(struct device *dev, struct sk_buff *skb, + enum dma_data_direction dir); +extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb, + enum dma_data_direction dir); +#endif + extern void kfree_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); extern struct sk_buff *__alloc_skb(unsigned int size, @@ -357,6 +388,8 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, 1, -1); } +extern int skb_recycle_check(struct sk_buff *skb, int skb_size); + extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); @@ -383,15 +416,6 @@ extern void skb_over_panic(struct sk_buff *skb, int len, void *here); extern void skb_under_panic(struct sk_buff *skb, int len, void *here); -extern void skb_truesize_bug(struct sk_buff *skb); - -static inline void skb_truesize_check(struct sk_buff *skb) -{ - int len = sizeof(struct sk_buff) + skb->len; - - if (unlikely((int)skb->truesize < len)) - skb_truesize_bug(skb); -} extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, int getfrag(void *from, char *to, int offset, @@ -447,6 +471,68 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) } /** + * skb_queue_is_last - check if skb is the last entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the last buffer on the list. + */ +static inline bool skb_queue_is_last(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return (skb->next == (struct sk_buff *) list); +} + +/** + * skb_queue_is_first - check if skb is the first entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the first buffer on the list. + */ +static inline bool skb_queue_is_first(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return (skb->prev == (struct sk_buff *) list); +} + +/** + * skb_queue_next - return the next packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the next packet in @list after @skb. It is only valid to + * call this if skb_queue_is_last() evaluates to false. + */ +static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_last(list, skb)); + return skb->next; +} + +/** + * skb_queue_prev - return the prev packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the prev packet in @list before @skb. It is only valid to + * call this if skb_queue_is_first() evaluates to false. + */ +static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_first(list, skb)); + return skb->prev; +} + +/** * skb_get - reference buffer * @skb: buffer to reference * @@ -634,6 +720,22 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } +/** + * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head + * @list: queue to initialize + * + * This initializes only the list and queue length aspects of + * an sk_buff_head object. This allows to initialize the list + * aspects of an sk_buff_head without reinitializing things like + * the spinlock. It can also be used for on-stack sk_buff_head + * objects where the spinlock is known to not be used. + */ +static inline void __skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} + /* * This function creates a split out lock class for each invocation; * this is needed for now since a whole lot of users of the skb-queue @@ -645,8 +747,7 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) static inline void skb_queue_head_init(struct sk_buff_head *list) { spin_lock_init(&list->lock); - list->prev = list->next = (struct sk_buff *)list; - list->qlen = 0; + __skb_queue_head_init(list); } static inline void skb_queue_head_init_class(struct sk_buff_head *list, @@ -657,11 +758,98 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list, } /* - * Insert an sk_buff at the start of a list. + * Insert an sk_buff on a list. * * The "__skb_xxxx()" functions are the non-atomic ones that * can only be called with interrupts disabled. */ +extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); +static inline void __skb_insert(struct sk_buff *newsk, + struct sk_buff *prev, struct sk_buff *next, + struct sk_buff_head *list) +{ + newsk->next = next; + newsk->prev = prev; + next->prev = prev->next = newsk; + list->qlen++; +} + +static inline void __skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *next) +{ + struct sk_buff *first = list->next; + struct sk_buff *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * skb_queue_splice - join two skb lists, this is designed for stacks + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} + +/** + * skb_queue_splice_tail - join two skb lists, each list being a queue + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice_tail(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} /** * __skb_queue_after - queue a buffer at the list head @@ -678,13 +866,17 @@ static inline void __skb_queue_after(struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *newsk) { - struct sk_buff *next; - list->qlen++; + __skb_insert(newsk, prev, prev->next, list); +} - next = prev->next; - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; +extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, + struct sk_buff_head *list); + +static inline void __skb_queue_before(struct sk_buff_head *list, + struct sk_buff *next, + struct sk_buff *newsk) +{ + __skb_insert(newsk, next->prev, next, list); } /** @@ -718,66 +910,7 @@ extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) { - struct sk_buff *prev, *next; - - list->qlen++; - next = (struct sk_buff *)list; - prev = next->prev; - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; -} - - -/** - * __skb_dequeue - remove from the head of the queue - * @list: list to dequeue from - * - * Remove the head of the list. This function does not take any locks - * so must be used with appropriate locks held only. The head item is - * returned or %NULL if the list is empty. - */ -extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); -static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) -{ - struct sk_buff *next, *prev, *result; - - prev = (struct sk_buff *) list; - next = prev->next; - result = NULL; - if (next != prev) { - result = next; - next = next->next; - list->qlen--; - next->prev = prev; - prev->next = next; - result->next = result->prev = NULL; - } - return result; -} - - -/* - * Insert a packet on a list. - */ -extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); -static inline void __skb_insert(struct sk_buff *newsk, - struct sk_buff *prev, struct sk_buff *next, - struct sk_buff_head *list) -{ - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; - list->qlen++; -} - -/* - * Place a packet after a given packet in a list. - */ -extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); -static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) -{ - __skb_insert(newsk, old, old->next, list); + __skb_queue_before(list, (struct sk_buff *)list, newsk); } /* @@ -797,8 +930,22 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) prev->next = next; } - -/* XXX: more streamlined implementation */ +/** + * __skb_dequeue - remove from the head of the queue + * @list: list to dequeue from + * + * Remove the head of the list. This function does not take any locks + * so must be used with appropriate locks held only. The head item is + * returned or %NULL if the list is empty. + */ +extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); +static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) +{ + struct sk_buff *skb = skb_peek(list); + if (skb) + __skb_unlink(skb, list); + return skb; +} /** * __skb_dequeue_tail - remove from the tail of the queue @@ -848,6 +995,9 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i, skb_shinfo(skb)->nr_frags = i + 1; } +extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size); + #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) @@ -889,6 +1039,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) /* * Add data to an sk_buff */ +extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) { unsigned char *tmp = skb_tail_pointer(skb); @@ -898,26 +1049,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) return tmp; } -/** - * skb_put - add data to a buffer - * @skb: buffer to use - * @len: amount of data to add - * - * This function extends the used data area of the buffer. If this would - * exceed the total buffer size the kernel will panic. A pointer to the - * first byte of the extra data is returned. - */ -static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) -{ - unsigned char *tmp = skb_tail_pointer(skb); - SKB_LINEAR_ASSERT(skb); - skb->tail += len; - skb->len += len; - if (unlikely(skb->tail > skb->end)) - skb_over_panic(skb, len, current_text_addr()); - return tmp; -} - +extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) { skb->data -= len; @@ -925,24 +1057,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) return skb->data; } -/** - * skb_push - add data to the start of a buffer - * @skb: buffer to use - * @len: amount of data to add - * - * This function extends the used data area of the buffer at the buffer - * start. If this would exceed the total buffer headroom the kernel will - * panic. A pointer to the first byte of the extra data is returned. - */ -static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) -{ - skb->data -= len; - skb->len += len; - if (unlikely(skb->datahead)) - skb_under_panic(skb, len, current_text_addr()); - return skb->data; -} - +extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) { skb->len -= len; @@ -950,27 +1065,12 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) return skb->data += len; } -/** - * skb_pull - remove data from the start of a buffer - * @skb: buffer to use - * @len: amount of data to remove - * - * This function removes data from the start of a buffer, returning - * the memory to the headroom. A pointer to the next data in the buffer - * is returned. Once the data has been pulled future pushes will overwrite - * the old data. - */ -static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) -{ - return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); -} - extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) { if (len > skb_headlen(skb) && - !__pskb_pull_tail(skb, len-skb_headlen(skb))) + !__pskb_pull_tail(skb, len - skb_headlen(skb))) return NULL; skb->len -= len; return skb->data += len; @@ -987,7 +1087,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) return 1; if (unlikely(len > skb->len)) return 0; - return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; + return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; } /** @@ -1205,21 +1305,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) skb_set_tail_pointer(skb, len); } -/** - * skb_trim - remove end from a buffer - * @skb: buffer to alter - * @len: new length - * - * Cut the length of a buffer down by removing data from the tail. If - * the buffer is already under the length specified it is not modified. - * The skb must be linear. - */ -static inline void skb_trim(struct sk_buff *skb, unsigned int len) -{ - if (skb->len > len) - __skb_trim(skb, len); -} - +extern void skb_trim(struct sk_buff *skb, unsigned int len); static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) { @@ -1302,22 +1388,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, return skb; } -/** - * dev_alloc_skb - allocate an skbuff for receiving - * @length: length to allocate - * - * Allocate a new &sk_buff and assign it a usage count of one. The - * buffer has unspecified headroom built in. Users should allocate - * the headroom they think they need without accounting for the - * built in space. The built in space is used for optimisations. - * - * %NULL is returned if there is no free memory. Although this function - * allocates memory it can be called from an interrupt. - */ -static inline struct sk_buff *dev_alloc_skb(unsigned int length) -{ - return __dev_alloc_skb(length, GFP_ATOMIC); -} +extern struct sk_buff *dev_alloc_skb(unsigned int length); extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask); @@ -1341,6 +1412,26 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, return __netdev_alloc_skb(dev, length, GFP_ATOMIC); } +extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); + +/** + * netdev_alloc_page - allocate a page for ps-rx on a specific device + * @dev: network device to receive on + * + * Allocate a new page node local to the specified device. + * + * %NULL is returned if there is no free memory. + */ +static inline struct page *netdev_alloc_page(struct net_device *dev) +{ + return __netdev_alloc_page(dev, GFP_ATOMIC); +} + +static inline void netdev_free_page(struct net_device *dev, struct page *page) +{ + __free_page(page); +} + /** * skb_clone_writable - is the header of a clone writable * @skb: buffer to check @@ -1419,7 +1510,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) unsigned int size = skb->len; if (likely(size >= len)) return 0; - return skb_pad(skb, len-size); + return skb_pad(skb, len - size); } static inline int skb_add_data(struct sk_buff *skb, @@ -1532,12 +1623,23 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) skb != (struct sk_buff *)(queue); \ skb = tmp, tmp = skb->next) +#define skb_queue_walk_from(queue, skb) \ + for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ + skb = skb->next) + +#define skb_queue_walk_from_safe(queue, skb, tmp) \ + for (tmp = skb->next; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->next) + #define skb_queue_reverse_walk(queue, skb) \ for (skb = (queue)->prev; \ prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ skb = skb->prev) +extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, + int *peeked, int *err); extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); extern unsigned int datagram_poll(struct file *file, struct socket *sock, @@ -1548,6 +1650,10 @@ extern int skb_copy_datagram_iovec(const struct sk_buff *from, extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, struct iovec *iov); +extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, + int offset, + struct iovec *from, + int len); extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); @@ -1568,8 +1674,12 @@ extern int skb_splice_bits(struct sk_buff *skb, extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); extern void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); +extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, + int shiftlen); extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); +extern int skb_gro_receive(struct sk_buff **head, + struct sk_buff *skb); static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) @@ -1772,27 +1882,31 @@ static inline void skb_init_secmark(struct sk_buff *skb) static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE skb->queue_mapping = queue_mapping; -#endif } static inline u16 skb_get_queue_mapping(struct sk_buff *skb) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE return skb->queue_mapping; -#else - return 0; -#endif } static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE to->queue_mapping = from->queue_mapping; -#endif } +#ifdef CONFIG_XFRM +static inline struct sec_path *skb_sec_path(struct sk_buff *skb) +{ + return skb->sp; +} +#else +static inline struct sec_path *skb_sec_path(struct sk_buff *skb) +{ + return NULL; +} +#endif + static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_size; @@ -1803,6 +1917,20 @@ static inline int skb_is_gso_v6(const struct sk_buff *skb) return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; } +extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); + +static inline bool skb_warn_if_lro(const struct sk_buff *skb) +{ + /* LRO sets gso_size but not gso_type, whereas if GSO is really + * wanted then gso_type will be set. */ + struct skb_shared_info *shinfo = skb_shinfo(skb); + if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { + __skb_warn_lro_forwarding(skb); + return true; + } + return false; +} + static inline void skb_forward_csum(struct sk_buff *skb) { /* Unfortunately we don't support this one. Any brave souls? */ @@ -1810,5 +1938,6 @@ static inline void skb_forward_csum(struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; } +bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */