unsigned short gso_segs;
unsigned short gso_type;
__be32 ip6_frag_id;
+#ifdef CONFIG_HAS_DMA
+ unsigned int num_dma_maps;
+#endif
struct sk_buff *frag_list;
skb_frag_t frags[MAX_SKB_FRAGS];
+#ifdef CONFIG_HAS_DMA
+ dma_addr_t dma_maps[MAX_SKB_FRAGS + 1];
+#endif
};
/* We divide dataref into two halves. The higher 16 bits hold references
* @queue_mapping: Queue mapping for multiqueue devices
* @tc_index: Traffic control index
* @tc_verd: traffic control verdict
+ * @ndisc_nodetype: router type (from link layer)
+ * @do_not_encrypt: set to prevent encryption of this frame
+ * @requeue: set to indicate that the wireless core should attempt
+ * a software retry on this frame if we failed to
+ * receive an ACK for it
* @dma_cookie: a cookie to one of several possible DMA operations
* done by skb DMA functions
* @secmark: security marking
+ * @vlan_tci: vlan tag control information
*/
struct sk_buff {
struct dst_entry *dst;
struct rtable *rtable;
};
+#ifdef CONFIG_XFRM
struct sec_path *sp;
-
+#endif
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
#endif
int iif;
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
__u16 queue_mapping;
-#endif
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
#ifdef CONFIG_NET_CLS_ACT
__u16 tc_verd; /* traffic control verdict */
#endif
#endif
- /* 2 byte hole */
+#ifdef CONFIG_IPV6_NDISC_NODETYPE
+ __u8 ndisc_nodetype:2;
+#endif
+#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
+ __u8 do_not_encrypt:1;
+ __u8 requeue:1;
+#endif
+ /* 0/13/14 bit hole */
#ifdef CONFIG_NET_DMA
dma_cookie_t dma_cookie;
__u32 mark;
+ __u16 vlan_tci;
+
sk_buff_data_t transport_header;
sk_buff_data_t network_header;
sk_buff_data_t mac_header;
#include <asm/system.h>
+#ifdef CONFIG_HAS_DMA
+#include <linux/dma-mapping.h>
+extern int skb_dma_map(struct device *dev, struct sk_buff *skb,
+ enum dma_data_direction dir);
+extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
+ enum dma_data_direction dir);
+#endif
+
extern void kfree_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
return __alloc_skb(size, priority, 1, -1);
}
+extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
+
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
gfp_t priority);
}
/**
+ * skb_queue_is_last - check if skb is the last entry in the queue
+ * @list: queue head
+ * @skb: buffer
+ *
+ * Returns true if @skb is the last buffer on the list.
+ */
+static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ return (skb->next == (struct sk_buff *) list);
+}
+
+/**
+ * skb_queue_is_first - check if skb is the first entry in the queue
+ * @list: queue head
+ * @skb: buffer
+ *
+ * Returns true if @skb is the first buffer on the list.
+ */
+static inline bool skb_queue_is_first(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ return (skb->prev == (struct sk_buff *) list);
+}
+
+/**
+ * skb_queue_next - return the next packet in the queue
+ * @list: queue head
+ * @skb: current buffer
+ *
+ * Return the next packet in @list after @skb. It is only valid to
+ * call this if skb_queue_is_last() evaluates to false.
+ */
+static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ /* This BUG_ON may seem severe, but if we just return then we
+ * are going to dereference garbage.
+ */
+ BUG_ON(skb_queue_is_last(list, skb));
+ return skb->next;
+}
+
+/**
+ * skb_queue_prev - return the prev packet in the queue
+ * @list: queue head
+ * @skb: current buffer
+ *
+ * Return the prev packet in @list before @skb. It is only valid to
+ * call this if skb_queue_is_first() evaluates to false.
+ */
+static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ /* This BUG_ON may seem severe, but if we just return then we
+ * are going to dereference garbage.
+ */
+ BUG_ON(skb_queue_is_first(list, skb));
+ return skb->prev;
+}
+
+/**
* skb_get - reference buffer
* @skb: buffer to reference
*
return list_->qlen;
}
+/**
+ * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
+ * @list: queue to initialize
+ *
+ * This initializes only the list and queue length aspects of
+ * an sk_buff_head object. This allows to initialize the list
+ * aspects of an sk_buff_head without reinitializing things like
+ * the spinlock. It can also be used for on-stack sk_buff_head
+ * objects where the spinlock is known to not be used.
+ */
+static inline void __skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
/*
* This function creates a split out lock class for each invocation;
* this is needed for now since a whole lot of users of the skb-queue
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
- list->prev = list->next = (struct sk_buff *)list;
- list->qlen = 0;
+ __skb_queue_head_init(list);
}
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
}
/*
- * Insert an sk_buff at the start of a list.
+ * Insert an sk_buff on a list.
*
* The "__skb_xxxx()" functions are the non-atomic ones that
* can only be called with interrupts disabled.
*/
+extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
+static inline void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff *prev, struct sk_buff *next,
+ struct sk_buff_head *list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = prev->next = newsk;
+ list->qlen++;
+}
+
+static inline void __skb_queue_splice(const struct sk_buff_head *list,
+ struct sk_buff *prev,
+ struct sk_buff *next)
+{
+ struct sk_buff *first = list->next;
+ struct sk_buff *last = list->prev;
+
+ first->prev = prev;
+ prev->next = first;
+
+ last->next = next;
+ next->prev = last;
+}
+
+/**
+ * skb_queue_splice - join two skb lists, this is designed for stacks
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ */
+static inline void skb_queue_splice(const struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, (struct sk_buff *) head, head->next);
+ head->qlen += list->qlen;
+ }
+}
+
+/**
+ * skb_queue_splice - join two skb lists and reinitialise the emptied list
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ *
+ * The list at @list is reinitialised
+ */
+static inline void skb_queue_splice_init(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, (struct sk_buff *) head, head->next);
+ head->qlen += list->qlen;
+ __skb_queue_head_init(list);
+ }
+}
+
+/**
+ * skb_queue_splice_tail - join two skb lists, each list being a queue
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ */
+static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
+ head->qlen += list->qlen;
+ }
+}
+
+/**
+ * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ *
+ * Each of the lists is a queue.
+ * The list at @list is reinitialised
+ */
+static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
+ head->qlen += list->qlen;
+ __skb_queue_head_init(list);
+ }
+}
/**
* __skb_queue_after - queue a buffer at the list head
struct sk_buff *prev,
struct sk_buff *newsk)
{
- struct sk_buff *next;
- list->qlen++;
+ __skb_insert(newsk, prev, prev->next, list);
+}
- next = prev->next;
- newsk->next = next;
- newsk->prev = prev;
- next->prev = prev->next = newsk;
+extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
+ struct sk_buff_head *list);
+
+static inline void __skb_queue_before(struct sk_buff_head *list,
+ struct sk_buff *next,
+ struct sk_buff *newsk)
+{
+ __skb_insert(newsk, next->prev, next, list);
}
/**
static inline void __skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *newsk)
{
- struct sk_buff *prev, *next;
-
- list->qlen++;
- next = (struct sk_buff *)list;
- prev = next->prev;
- newsk->next = next;
- newsk->prev = prev;
- next->prev = prev->next = newsk;
-}
-
-
-/**
- * __skb_dequeue - remove from the head of the queue
- * @list: list to dequeue from
- *
- * Remove the head of the list. This function does not take any locks
- * so must be used with appropriate locks held only. The head item is
- * returned or %NULL if the list is empty.
- */
-extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
-static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
-{
- struct sk_buff *next, *prev, *result;
-
- prev = (struct sk_buff *) list;
- next = prev->next;
- result = NULL;
- if (next != prev) {
- result = next;
- next = next->next;
- list->qlen--;
- next->prev = prev;
- prev->next = next;
- result->next = result->prev = NULL;
- }
- return result;
-}
-
-
-/*
- * Insert a packet on a list.
- */
-extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
-static inline void __skb_insert(struct sk_buff *newsk,
- struct sk_buff *prev, struct sk_buff *next,
- struct sk_buff_head *list)
-{
- newsk->next = next;
- newsk->prev = prev;
- next->prev = prev->next = newsk;
- list->qlen++;
-}
-
-/*
- * Place a packet after a given packet in a list.
- */
-extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
-static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
-{
- __skb_insert(newsk, old, old->next, list);
+ __skb_queue_before(list, (struct sk_buff *)list, newsk);
}
/*
prev->next = next;
}
-
-/* XXX: more streamlined implementation */
+/**
+ * __skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The head item is
+ * returned or %NULL if the list is empty.
+ */
+extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *skb = skb_peek(list);
+ if (skb)
+ __skb_unlink(skb, list);
+ return skb;
+}
/**
* __skb_dequeue_tail - remove from the tail of the queue
skb_shinfo(skb)->nr_frags = i + 1;
}
+extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
+ int off, int size);
+
#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb_headlen(skb) &&
- !__pskb_pull_tail(skb, len-skb_headlen(skb)))
+ !__pskb_pull_tail(skb, len - skb_headlen(skb)))
return NULL;
skb->len -= len;
return skb->data += len;
return 1;
if (unlikely(len > skb->len))
return 0;
- return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
+ return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
}
/**
skb_set_tail_pointer(skb, len);
}
-/**
- * skb_trim - remove end from a buffer
- * @skb: buffer to alter
- * @len: new length
- *
- * Cut the length of a buffer down by removing data from the tail. If
- * the buffer is already under the length specified it is not modified.
- * The skb must be linear.
- */
-static inline void skb_trim(struct sk_buff *skb, unsigned int len)
-{
- if (skb->len > len)
- __skb_trim(skb, len);
-}
-
+extern void skb_trim(struct sk_buff *skb, unsigned int len);
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}
+extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
+
+/**
+ * netdev_alloc_page - allocate a page for ps-rx on a specific device
+ * @dev: network device to receive on
+ *
+ * Allocate a new page node local to the specified device.
+ *
+ * %NULL is returned if there is no free memory.
+ */
+static inline struct page *netdev_alloc_page(struct net_device *dev)
+{
+ return __netdev_alloc_page(dev, GFP_ATOMIC);
+}
+
+static inline void netdev_free_page(struct net_device *dev, struct page *page)
+{
+ __free_page(page);
+}
+
/**
* skb_clone_writable - is the header of a clone writable
* @skb: buffer to check
unsigned int size = skb->len;
if (likely(size >= len))
return 0;
- return skb_pad(skb, len-size);
+ return skb_pad(skb, len - size);
}
static inline int skb_add_data(struct sk_buff *skb,
skb != (struct sk_buff *)(queue); \
skb = tmp, tmp = skb->next)
+#define skb_queue_walk_from(queue, skb) \
+ for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
+ skb = skb->next)
+
+#define skb_queue_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->next; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->next)
+
#define skb_queue_reverse_walk(queue, skb) \
for (skb = (queue)->prev; \
prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
int hlen,
struct iovec *iov);
+extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
+ int offset,
+ struct iovec *from,
+ int len);
extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
unsigned int flags);
extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
extern void skb_split(struct sk_buff *skb,
struct sk_buff *skb1, const u32 len);
+extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
+ int shiftlen);
extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
skb->queue_mapping = queue_mapping;
-#endif
}
static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
return skb->queue_mapping;
-#else
- return 0;
-#endif
}
static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
to->queue_mapping = from->queue_mapping;
-#endif
}
+#ifdef CONFIG_XFRM
+static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
+{
+ return skb->sp;
+}
+#else
+static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
+{
+ return NULL;
+}
+#endif
+
static inline int skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
+extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+
+static inline bool skb_warn_if_lro(const struct sk_buff *skb)
+{
+ /* LRO sets gso_size but not gso_type, whereas if GSO is really
+ * wanted then gso_type will be set. */
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
+ __skb_warn_lro_forwarding(skb);
+ return true;
+ }
+ return false;
+}
+
static inline void skb_forward_csum(struct sk_buff *skb)
{
/* Unfortunately we don't support this one. Any brave souls? */