X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fskbuff.h;h=cfcc45b3bef0e7a1f1c582786aed7fc1d7bfdafc;hb=ca6d1b1333bc2e61e37982de1f28d8604c232414;hp=1d649f3eb006b786131b7d7204ef4c3b9806b614;hpb=b30973f877fea1a3fb84e05599890fcc082a88e5;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1d649f3..cfcc45b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -27,20 +27,23 @@ #include #include #include +#include #define HAVE_ALLOC_SKB /* For the drivers to know */ #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ +/* Don't change this without changing skb_csum_unnecessary! */ #define CHECKSUM_NONE 0 -#define CHECKSUM_PARTIAL 1 -#define CHECKSUM_UNNECESSARY 2 -#define CHECKSUM_COMPLETE 3 +#define CHECKSUM_UNNECESSARY 1 +#define CHECKSUM_COMPLETE 2 +#define CHECKSUM_PARTIAL 3 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ ~(SMP_CACHE_BYTES - 1)) -#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \ - sizeof(struct skb_shared_info)) & \ - ~(SMP_CACHE_BYTES - 1)) +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define SKB_MAX_ORDER(X, ORDER) \ + SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) @@ -61,13 +64,20 @@ * is able to produce some skb->csum, it MUST use COMPLETE, * not UNNECESSARY. * + * PARTIAL: identical to the case for output below. This may occur + * on a packet received directly from another Linux OS, e.g., + * a virtualised Linux kernel on the same host. The packet can + * be treated in the same way as UNNECESSARY except that on + * output (i.e., forwarding) the checksum must be filled in + * by the OS or the hardware. + * * B. Checksumming on output. * * NONE: skb is checksummed by protocol or csum is not required. * * PARTIAL: device is required to csum packet as seen by hard_start_xmit - * from skb->h.raw to the end and to record the checksum - * at skb->h.raw+skb->csum. + * from skb->csum_start to the end and to record the checksum + * at skb->csum_start + skb->csum_offset. * * Device must show its capabilities in dev->features, set * at device setup time. @@ -78,33 +88,31 @@ * TCP/UDP over IPv4. Sigh. Vendors like this * way by an unknown reason. Though, see comment above * about CHECKSUM_UNNECESSARY. 8) + * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. * * Any questions? No questions, good. --ANK */ struct net_device; +struct scatterlist; +struct pipe_inode_info; -#ifdef CONFIG_NETFILTER +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { atomic_t use; - void (*destroy)(struct nf_conntrack *); }; +#endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info { atomic_t use; struct net_device *physindev; struct net_device *physoutdev; -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) - struct net_device *netoutdev; -#endif unsigned int mask; unsigned long data[32 / sizeof(unsigned long)]; }; #endif -#endif - struct sk_buff_head { /* These two members must be first. */ struct sk_buff *next; @@ -123,8 +131,8 @@ typedef struct skb_frag_struct skb_frag_t; struct skb_frag_struct { struct page *page; - __u16 page_offset; - __u16 size; + __u32 page_offset; + __u32 size; }; /* This data is invariant across clones and lives at @@ -144,8 +152,8 @@ struct skb_shared_info { /* We divide dataref into two halves. The higher 16 bits hold references * to the payload part of skb->data. The lower 16 bits hold references to - * the entire skb->data. It is up to the users of the skb to agree on - * where the payload starts. + * the entire skb->data. A clone of a headerless skb holds the length of + * the header in skb->hdr_len. * * All users must obey the rule that the skb->data reference count must be * greater than or equal to the payload reference count. @@ -156,11 +164,6 @@ struct skb_shared_info { #define SKB_DATAREF_SHIFT 16 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) -struct skb_timeval { - u32 off_sec; - u32 off_usec; -}; - enum { SKB_FCLONE_UNAVAILABLE, @@ -181,6 +184,16 @@ enum { SKB_GSO_TCPV6 = 1 << 4, }; +#if BITS_PER_LONG > 32 +#define NET_SKBUFF_DATA_USES_OFFSET 1 +#endif + +#ifdef NET_SKBUFF_DATA_USES_OFFSET +typedef unsigned int sk_buff_data_t; +#else +typedef unsigned char *sk_buff_data_t; +#endif + /** * struct sk_buff - socket buffer * @next: Next buffer in list @@ -188,17 +201,19 @@ enum { * @sk: Socket we are owned by * @tstamp: Time we arrived * @dev: Device we arrived on/are leaving by - * @input_dev: Device we arrived on - * @h: Transport layer header - * @nh: Network layer header - * @mac: Link layer header + * @transport_header: Transport layer header + * @network_header: Network layer header + * @mac_header: Link layer header * @dst: destination entry * @sp: the security path, used for xfrm * @cb: Control buffer. Free for use by every layer. Put private vars here * @len: Length of actual data * @data_len: Data length * @mac_len: Length of link layer header - * @csum: Checksum + * @hdr_len: writable header length of cloned skb + * @csum: Checksum (must include start/offset pair) + * @csum_start: Offset from skb->head where checksumming should start + * @csum_offset: Offset from csum_start where checksum should be stored * @local_df: allow local fragmentation * @cloned: Head may be cloned (check refcnt to be sure) * @nohdr: Payload reference only, must not modify header @@ -217,14 +232,22 @@ enum { * @mark: Generic packet mark * @nfct: Associated connection, if any * @ipvs_property: skbuff is owned by ipvs + * @peeked: this packet has been seen already, so stats have been + * done for it, don't do them again + * @nf_trace: netfilter packet trace flag * @nfctinfo: Relationship of this skb to the connection * @nfct_reasm: netfilter conntrack re-assembly pointer * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c + * @iif: ifindex of device we arrived on + * @queue_mapping: Queue mapping for multiqueue devices * @tc_index: Traffic control index * @tc_verd: traffic control verdict + * @ndisc_nodetype: router type (from link layer) + * @do_not_encrypt: set to prevent encryption of this frame * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking + * @vlan_tci: vlan tag control information */ struct sk_buff { @@ -233,32 +256,13 @@ struct sk_buff { struct sk_buff *prev; struct sock *sk; - struct skb_timeval tstamp; + ktime_t tstamp; struct net_device *dev; - struct net_device *input_dev; - - union { - struct tcphdr *th; - struct udphdr *uh; - struct icmphdr *icmph; - struct igmphdr *igmph; - struct iphdr *ipiph; - struct ipv6hdr *ipv6h; - unsigned char *raw; - } h; - - union { - struct iphdr *iph; - struct ipv6hdr *ipv6h; - struct arphdr *arph; - unsigned char *raw; - } nh; union { - unsigned char *raw; - } mac; - - struct dst_entry *dst; + struct dst_entry *dst; + struct rtable *rtable; + }; struct sec_path *sp; /* @@ -270,11 +274,15 @@ struct sk_buff { char cb[48]; unsigned int len, - data_len, - mac_len; + data_len; + __u16 mac_len, + hdr_len; union { __wsum csum; - __u32 csum_offset; + struct { + __u16 csum_start; + __u16 csum_offset; + }; }; __u32 priority; __u8 local_df:1, @@ -284,25 +292,36 @@ struct sk_buff { nfctinfo:3; __u8 pkt_type:3, fclone:2, - ipvs_property:1; + ipvs_property:1, + peeked:1, + nf_trace:1; __be16 protocol; void (*destructor)(struct sk_buff *skb); -#ifdef CONFIG_NETFILTER - struct nf_conntrack *nfct; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + struct nf_conntrack *nfct; struct sk_buff *nfct_reasm; #endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info *nf_bridge; #endif -#endif /* CONFIG_NETFILTER */ + + int iif; + __u16 queue_mapping; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ #ifdef CONFIG_NET_CLS_ACT __u16 tc_verd; /* traffic control verdict */ #endif #endif +#ifdef CONFIG_IPV6_NDISC_NODETYPE + __u8 ndisc_nodetype:2; +#endif +#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) + __u8 do_not_encrypt:1; +#endif + /* 0/13/14 bit hole */ + #ifdef CONFIG_NET_DMA dma_cookie_t dma_cookie; #endif @@ -312,13 +331,18 @@ struct sk_buff { __u32 mark; + __u16 vlan_tci; + + sk_buff_data_t transport_header; + sk_buff_data_t network_header; + sk_buff_data_t mac_header; /* These elements must be at the end, see alloc_skb() for details. */ + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head, + *data; unsigned int truesize; atomic_t users; - unsigned char *head, - *data, - *tail, - *end; }; #ifdef __KERNEL__ @@ -345,10 +369,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, 1, -1); } -extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, - unsigned int size, - gfp_t priority); -extern void kfree_skbmem(struct sk_buff *skb); +extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); extern struct sk_buff *skb_copy(const struct sk_buff *skb, @@ -363,6 +384,11 @@ extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); +extern int skb_to_sgvec(struct sk_buff *skb, + struct scatterlist *sg, int offset, + int len); +extern int skb_cow_data(struct sk_buff *skb, int tailbits, + struct sk_buff **trailer); extern int skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) kfree_skb(a) extern void skb_over_panic(struct sk_buff *skb, int len, @@ -373,7 +399,9 @@ extern void skb_truesize_bug(struct sk_buff *skb); static inline void skb_truesize_check(struct sk_buff *skb) { - if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len)) + int len = sizeof(struct sk_buff) + skb->len; + + if (unlikely((int)skb->truesize < len)) skb_truesize_bug(skb); } @@ -404,8 +432,20 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config, struct ts_state *state); +#ifdef NET_SKBUFF_DATA_USES_OFFSET +static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) +{ + return skb->head + skb->end; +} +#else +static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) +{ + return skb->end; +} +#endif + /* Internal */ -#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) +#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) /** * skb_queue_empty - check if a queue is empty @@ -621,12 +661,29 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) list->qlen = 0; } +static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) +{ + skb_queue_head_init(list); + lockdep_set_class(&list->lock, class); +} + /* - * Insert an sk_buff at the start of a list. + * Insert an sk_buff on a list. * * The "__skb_xxxx()" functions are the non-atomic ones that * can only be called with interrupts disabled. */ +extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); +static inline void __skb_insert(struct sk_buff *newsk, + struct sk_buff *prev, struct sk_buff *next, + struct sk_buff_head *list) +{ + newsk->next = next; + newsk->prev = prev; + next->prev = prev->next = newsk; + list->qlen++; +} /** * __skb_queue_after - queue a buffer at the list head @@ -643,13 +700,17 @@ static inline void __skb_queue_after(struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *newsk) { - struct sk_buff *next; - list->qlen++; + __skb_insert(newsk, prev, prev->next, list); +} - next = prev->next; - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; +extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, + struct sk_buff_head *list); + +static inline void __skb_queue_before(struct sk_buff_head *list, + struct sk_buff *next, + struct sk_buff *newsk) +{ + __skb_insert(newsk, next->prev, next, list); } /** @@ -683,66 +744,7 @@ extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) { - struct sk_buff *prev, *next; - - list->qlen++; - next = (struct sk_buff *)list; - prev = next->prev; - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; -} - - -/** - * __skb_dequeue - remove from the head of the queue - * @list: list to dequeue from - * - * Remove the head of the list. This function does not take any locks - * so must be used with appropriate locks held only. The head item is - * returned or %NULL if the list is empty. - */ -extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); -static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) -{ - struct sk_buff *next, *prev, *result; - - prev = (struct sk_buff *) list; - next = prev->next; - result = NULL; - if (next != prev) { - result = next; - next = next->next; - list->qlen--; - next->prev = prev; - prev->next = next; - result->next = result->prev = NULL; - } - return result; -} - - -/* - * Insert a packet on a list. - */ -extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); -static inline void __skb_insert(struct sk_buff *newsk, - struct sk_buff *prev, struct sk_buff *next, - struct sk_buff_head *list) -{ - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; - list->qlen++; -} - -/* - * Place a packet after a given packet in a list. - */ -extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); -static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) -{ - __skb_insert(newsk, old, old->next, list); + __skb_queue_before(list, (struct sk_buff *)list, newsk); } /* @@ -762,8 +764,22 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) prev->next = next; } - -/* XXX: more streamlined implementation */ +/** + * __skb_dequeue - remove from the head of the queue + * @list: list to dequeue from + * + * Remove the head of the list. This function does not take any locks + * so must be used with appropriate locks held only. The head item is + * returned or %NULL if the list is empty. + */ +extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); +static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) +{ + struct sk_buff *skb = skb_peek(list); + if (skb) + __skb_unlink(skb, list); + return skb; +} /** * __skb_dequeue_tail - remove from the tail of the queue @@ -817,38 +833,54 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i, #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) -/* - * Add data to an sk_buff - */ -static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) +#ifdef NET_SKBUFF_DATA_USES_OFFSET +static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) { - unsigned char *tmp = skb->tail; - SKB_LINEAR_ASSERT(skb); - skb->tail += len; - skb->len += len; - return tmp; + return skb->head + skb->tail; } -/** - * skb_put - add data to a buffer - * @skb: buffer to use - * @len: amount of data to add - * - * This function extends the used data area of the buffer. If this would - * exceed the total buffer size the kernel will panic. A pointer to the - * first byte of the extra data is returned. +static inline void skb_reset_tail_pointer(struct sk_buff *skb) +{ + skb->tail = skb->data - skb->head; +} + +static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) +{ + skb_reset_tail_pointer(skb); + skb->tail += offset; +} +#else /* NET_SKBUFF_DATA_USES_OFFSET */ +static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) +{ + return skb->tail; +} + +static inline void skb_reset_tail_pointer(struct sk_buff *skb) +{ + skb->tail = skb->data; +} + +static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) +{ + skb->tail = skb->data + offset; +} + +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + +/* + * Add data to an sk_buff */ -static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) +extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); +static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) { - unsigned char *tmp = skb->tail; + unsigned char *tmp = skb_tail_pointer(skb); SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; - if (unlikely(skb->tail>skb->end)) - skb_over_panic(skb, len, current_text_addr()); return tmp; } +extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) { skb->data -= len; @@ -856,24 +888,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) return skb->data; } -/** - * skb_push - add data to the start of a buffer - * @skb: buffer to use - * @len: amount of data to add - * - * This function extends the used data area of the buffer at the buffer - * start. If this would exceed the total buffer headroom the kernel will - * panic. A pointer to the first byte of the extra data is returned. - */ -static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) -{ - skb->data -= len; - skb->len += len; - if (unlikely(skb->datahead)) - skb_under_panic(skb, len, current_text_addr()); - return skb->data; -} - +extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) { skb->len -= len; @@ -881,21 +896,6 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) return skb->data += len; } -/** - * skb_pull - remove data from the start of a buffer - * @skb: buffer to use - * @len: amount of data to remove - * - * This function removes data from the start of a buffer, returning - * the memory to the headroom. A pointer to the next data in the buffer - * is returned. Once the data has been pulled future pushes will overwrite - * the old data. - */ -static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) -{ - return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); -} - extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) @@ -927,7 +927,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) * * Return the number of bytes of free space at the head of an &sk_buff. */ -static inline int skb_headroom(const struct sk_buff *skb) +static inline unsigned int skb_headroom(const struct sk_buff *skb) { return skb->data - skb->head; } @@ -957,6 +957,130 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +#ifdef NET_SKBUFF_DATA_USES_OFFSET +static inline unsigned char *skb_transport_header(const struct sk_buff *skb) +{ + return skb->head + skb->transport_header; +} + +static inline void skb_reset_transport_header(struct sk_buff *skb) +{ + skb->transport_header = skb->data - skb->head; +} + +static inline void skb_set_transport_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_transport_header(skb); + skb->transport_header += offset; +} + +static inline unsigned char *skb_network_header(const struct sk_buff *skb) +{ + return skb->head + skb->network_header; +} + +static inline void skb_reset_network_header(struct sk_buff *skb) +{ + skb->network_header = skb->data - skb->head; +} + +static inline void skb_set_network_header(struct sk_buff *skb, const int offset) +{ + skb_reset_network_header(skb); + skb->network_header += offset; +} + +static inline unsigned char *skb_mac_header(const struct sk_buff *skb) +{ + return skb->head + skb->mac_header; +} + +static inline int skb_mac_header_was_set(const struct sk_buff *skb) +{ + return skb->mac_header != ~0U; +} + +static inline void skb_reset_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->data - skb->head; +} + +static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) +{ + skb_reset_mac_header(skb); + skb->mac_header += offset; +} + +#else /* NET_SKBUFF_DATA_USES_OFFSET */ + +static inline unsigned char *skb_transport_header(const struct sk_buff *skb) +{ + return skb->transport_header; +} + +static inline void skb_reset_transport_header(struct sk_buff *skb) +{ + skb->transport_header = skb->data; +} + +static inline void skb_set_transport_header(struct sk_buff *skb, + const int offset) +{ + skb->transport_header = skb->data + offset; +} + +static inline unsigned char *skb_network_header(const struct sk_buff *skb) +{ + return skb->network_header; +} + +static inline void skb_reset_network_header(struct sk_buff *skb) +{ + skb->network_header = skb->data; +} + +static inline void skb_set_network_header(struct sk_buff *skb, const int offset) +{ + skb->network_header = skb->data + offset; +} + +static inline unsigned char *skb_mac_header(const struct sk_buff *skb) +{ + return skb->mac_header; +} + +static inline int skb_mac_header_was_set(const struct sk_buff *skb) +{ + return skb->mac_header != NULL; +} + +static inline void skb_reset_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->data; +} + +static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) +{ + skb->mac_header = skb->data + offset; +} +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + +static inline int skb_transport_offset(const struct sk_buff *skb) +{ + return skb_transport_header(skb) - skb->data; +} + +static inline u32 skb_network_header_len(const struct sk_buff *skb) +{ + return skb->transport_header - skb->network_header; +} + +static inline int skb_network_offset(const struct sk_buff *skb) +{ + return skb_network_header(skb) - skb->data; +} + /* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the @@ -1008,25 +1132,11 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) WARN_ON(1); return; } - skb->len = len; - skb->tail = skb->data + len; -} - -/** - * skb_trim - remove end from a buffer - * @skb: buffer to alter - * @len: new length - * - * Cut the length of a buffer down by removing data from the tail. If - * the buffer is already under the length specified it is not modified. - * The skb must be linear. - */ -static inline void skb_trim(struct sk_buff *skb, unsigned int len) -{ - if (skb->len > len) - __skb_trim(skb, len); + skb->len = len; + skb_set_tail_pointer(skb, len); } +extern void skb_trim(struct sk_buff *skb, unsigned int len); static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) { @@ -1109,22 +1219,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, return skb; } -/** - * dev_alloc_skb - allocate an skbuff for receiving - * @length: length to allocate - * - * Allocate a new &sk_buff and assign it a usage count of one. The - * buffer has unspecified headroom built in. Users should allocate - * the headroom they think they need without accounting for the - * built in space. The built in space is used for optimisations. - * - * %NULL is returned if there is no free memory. Although this function - * allocates memory it can be called from an interrupt. - */ -static inline struct sk_buff *dev_alloc_skb(unsigned int length) -{ - return __dev_alloc_skb(length, GFP_ATOMIC); -} +extern struct sk_buff *dev_alloc_skb(unsigned int length); extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask); @@ -1149,6 +1244,36 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, } /** + * skb_clone_writable - is the header of a clone writable + * @skb: buffer to check + * @len: length up to which to write + * + * Returns true if modifying the header part of the cloned buffer + * does not requires the data to be copied. + */ +static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) +{ + return !skb_header_cloned(skb) && + skb_headroom(skb) + len <= skb->hdr_len; +} + +static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, + int cloned) +{ + int delta = 0; + + if (headroom < NET_SKB_PAD) + headroom = NET_SKB_PAD; + if (headroom > skb_headroom(skb)) + delta = headroom - skb_headroom(skb); + + if (delta || cloned) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} + +/** * skb_cow - copy header of skb when it is required * @skb: buffer to cow * @headroom: needed headroom @@ -1162,16 +1287,22 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, */ static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) { - int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - - skb_headroom(skb); - - if (delta < 0) - delta = 0; + return __skb_cow(skb, headroom, skb_cloned(skb)); +} - if (delta || skb_cloned(skb)) - return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & - ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); - return 0; +/** + * skb_cow_head - skb_cow but only making the head writable + * @skb: buffer to cow + * @headroom: needed headroom + * + * This function is identical to skb_cow except that we replace the + * skb_cloned check by skb_header_cloned. It should be used when + * you only need to push on some header and do not need to modify + * the data. + */ +static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + return __skb_cow(skb, headroom, skb_header_cloned(skb)); } /** @@ -1298,12 +1429,19 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ skb = skb->next) +#define skb_queue_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->next, tmp = skb->next; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->next) + #define skb_queue_reverse_walk(queue, skb) \ for (skb = (queue)->prev; \ prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ skb = skb->prev) +extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, + int *peeked, int *err); extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); extern unsigned int datagram_poll(struct file *file, struct socket *sock, @@ -1315,17 +1453,22 @@ extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, struct iovec *iov); extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); -extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, +extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); extern __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); -extern int skb_store_bits(const struct sk_buff *skb, int offset, - void *from, int len); +extern int skb_store_bits(struct sk_buff *skb, int offset, + const void *from, int len); extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, __wsum csum); +extern int skb_splice_bits(struct sk_buff *skb, + unsigned int offset, + struct pipe_inode_info *pipe, + unsigned int len, + unsigned int flags); extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); extern void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); @@ -1346,8 +1489,36 @@ static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, return buffer; } +static inline void skb_copy_from_linear_data(const struct sk_buff *skb, + void *to, + const unsigned int len) +{ + memcpy(to, skb->data, len); +} + +static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, + const int offset, void *to, + const unsigned int len) +{ + memcpy(to, skb->data + offset, len); +} + +static inline void skb_copy_to_linear_data(struct sk_buff *skb, + const void *from, + const unsigned int len) +{ + memcpy(skb->data, from, len); +} + +static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, + const int offset, + const void *from, + const unsigned int len) +{ + memcpy(skb->data + offset, from, len); +} + extern void skb_init(void); -extern void skb_add_mtu(int mtu); /** * skb_get_timestamp - get timestamp from a skb @@ -1360,29 +1531,32 @@ extern void skb_add_mtu(int mtu); */ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp) { - stamp->tv_sec = skb->tstamp.off_sec; - stamp->tv_usec = skb->tstamp.off_usec; + *stamp = ktime_to_timeval(skb->tstamp); } -/** - * skb_set_timestamp - set timestamp of a skb - * @skb: skb to set stamp of - * @stamp: pointer to struct timeval to get stamp from - * - * Timestamps are stored in the skb as offsets to a base timestamp. - * This function converts a struct timeval to an offset and stores - * it in the skb. - */ -static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) +static inline void __net_timestamp(struct sk_buff *skb) +{ + skb->tstamp = ktime_get_real(); +} + +static inline ktime_t net_timedelta(ktime_t t) { - skb->tstamp.off_sec = stamp->tv_sec; - skb->tstamp.off_usec = stamp->tv_usec; + return ktime_sub(ktime_get_real(), t); } -extern void __net_timestamp(struct sk_buff *skb); +static inline ktime_t net_invalid_timestamp(void) +{ + return ktime_set(0, 0); +} +extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); +static inline int skb_csum_unnecessary(const struct sk_buff *skb) +{ + return skb->ip_summed & CHECKSUM_UNNECESSARY; +} + /** * skb_checksum_complete - Calculate checksum of an entire packet * @skb: packet to process @@ -1399,24 +1573,24 @@ extern __sum16 __skb_checksum_complete(struct sk_buff *skb); * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the * hardware has already verified the correctness of the checksum. */ -static inline unsigned int skb_checksum_complete(struct sk_buff *skb) +static inline __sum16 skb_checksum_complete(struct sk_buff *skb) { - return skb->ip_summed != CHECKSUM_UNNECESSARY && - __skb_checksum_complete(skb); + return skb_csum_unnecessary(skb) ? + 0 : __skb_checksum_complete(skb); } -#ifdef CONFIG_NETFILTER +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +extern void nf_conntrack_destroy(struct nf_conntrack *nfct); static inline void nf_conntrack_put(struct nf_conntrack *nfct) { if (nfct && atomic_dec_and_test(&nfct->use)) - nfct->destroy(nfct); + nf_conntrack_destroy(nfct); } static inline void nf_conntrack_get(struct nf_conntrack *nfct) { if (nfct) atomic_inc(&nfct->use); } -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) static inline void nf_conntrack_get_reasm(struct sk_buff *skb) { if (skb) @@ -1442,9 +1616,9 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) #endif /* CONFIG_BRIDGE_NETFILTER */ static inline void nf_reset(struct sk_buff *skb) { +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb->nfct); skb->nfct = NULL; -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put_reasm(skb->nfct_reasm); skb->nfct_reasm = NULL; #endif @@ -1454,9 +1628,33 @@ static inline void nf_reset(struct sk_buff *skb) #endif } -#else /* CONFIG_NETFILTER */ -static inline void nf_reset(struct sk_buff *skb) {} -#endif /* CONFIG_NETFILTER */ +/* Note: This doesn't put any conntrack and bridge info in dst. */ +static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) +{ +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + dst->nfct = src->nfct; + nf_conntrack_get(src->nfct); + dst->nfctinfo = src->nfctinfo; + dst->nfct_reasm = src->nfct_reasm; + nf_conntrack_get_reasm(src->nfct_reasm); +#endif +#ifdef CONFIG_BRIDGE_NETFILTER + dst->nf_bridge = src->nf_bridge; + nf_bridge_get(src->nf_bridge); +#endif +} + +static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) +{ +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(dst->nfct); + nf_conntrack_put_reasm(dst->nfct_reasm); +#endif +#ifdef CONFIG_BRIDGE_NETFILTER + nf_bridge_put(dst->nf_bridge); +#endif + __nf_copy(dst, src); +} #ifdef CONFIG_NETWORK_SECMARK static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) @@ -1476,10 +1674,52 @@ static inline void skb_init_secmark(struct sk_buff *skb) { } #endif +static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) +{ + skb->queue_mapping = queue_mapping; +} + +static inline u16 skb_get_queue_mapping(struct sk_buff *skb) +{ + return skb->queue_mapping; +} + +static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) +{ + to->queue_mapping = from->queue_mapping; +} + static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_size; } +static inline int skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} + +extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); + +static inline bool skb_warn_if_lro(const struct sk_buff *skb) +{ + /* LRO sets gso_size but not gso_type, whereas if GSO is really + * wanted then gso_type will be set. */ + struct skb_shared_info *shinfo = skb_shinfo(skb); + if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { + __skb_warn_lro_forwarding(skb); + return true; + } + return false; +} + +static inline void skb_forward_csum(struct sk_buff *skb) +{ + /* Unfortunately we don't support this one. Any brave souls? */ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; +} + +bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */