/**
* struct sock_common - minimal network layer representation of sockets
* @skc_node: main hash linkage for various protocol lookup tables
- * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
+ * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
* @skc_refcnt: reference count
+ * @skc_tx_queue_mapping: tx queue number for this connection
* @skc_hash: hash value used with various protocol lookup tables
+ * @skc_u16hashes: two u16 hash values used by UDP lookup tables
* @skc_family: network address family
* @skc_state: Connection state
* @skc_reuse: %SO_REUSEADDR setting
* @skc_bound_dev_if: bound device index if != 0
* @skc_bind_node: bind hash linkage for various protocol lookup tables
+ * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
* @skc_prot: protocol handlers inside a network family
* @skc_net: reference to the network namespace of this socket
*
struct hlist_nulls_node skc_nulls_node;
};
atomic_t skc_refcnt;
+ int skc_tx_queue_mapping;
- unsigned int skc_hash;
+ union {
+ unsigned int skc_hash;
+ __u16 skc_u16hashes[2];
+ };
unsigned short skc_family;
volatile unsigned char skc_state;
unsigned char skc_reuse;
int skc_bound_dev_if;
- struct hlist_node skc_bind_node;
+ union {
+ struct hlist_node skc_bind_node;
+ struct hlist_nulls_node skc_portaddr_node;
+ };
struct proto *skc_prot;
#ifdef CONFIG_NET_NS
struct net *skc_net;
#define sk_node __sk_common.skc_node
#define sk_nulls_node __sk_common.skc_nulls_node
#define sk_refcnt __sk_common.skc_refcnt
+#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
#define sk_copy_start __sk_common.skc_hash
#define sk_hash __sk_common.skc_hash
struct {
struct sk_buff *head;
struct sk_buff *tail;
+ int len;
+ int limit;
} sk_backlog;
wait_queue_head_t *sk_sleep;
struct dst_entry *sk_dst_cache;
/*
* Hashed lists helper routines
*/
+static inline struct sock *sk_entry(const struct hlist_node *node)
+{
+ return hlist_entry(node, struct sock, sk_node);
+}
+
static inline struct sock *__sk_head(const struct hlist_head *head)
{
return hlist_entry(head->first, struct sock, sk_node);
__hlist_del(&sk->sk_node);
}
+/* NB: equivalent to hlist_del_init_rcu */
static __inline__ int __sk_del_node_init(struct sock *sk)
{
if (sk_hashed(sk)) {
}
return rc;
}
+#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
{
__sk_add_node(sk, list);
}
+static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ hlist_add_head_rcu(&sk->sk_node, list);
+}
+
static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
#define sk_for_each(__sk, node, list) \
hlist_for_each_entry(__sk, node, list, sk_node)
+#define sk_for_each_rcu(__sk, node, list) \
+ hlist_for_each_entry_rcu(__sk, node, list, sk_node)
#define sk_nulls_for_each(__sk, node, list) \
hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \
SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */
SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
+ SOCK_FASYNC, /* fasync() active */
+ SOCK_RXQ_OVFL,
};
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
return sk->sk_wmem_queued < sk->sk_sndbuf;
}
-/* The per-socket spinlock must be held here. */
-static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+/* OOB backlog add */
+static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
if (!sk->sk_backlog.tail) {
sk->sk_backlog.head = sk->sk_backlog.tail = skb;
skb->next = NULL;
}
+/* The per-socket spinlock must be held here. */
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+ if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
+ return -ENOBUFS;
+
+ __sk_add_backlog(sk, skb);
+ sk->sk_backlog.len += skb->truesize;
+ return 0;
+}
+
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
return sk->sk_backlog_rcv(sk, skb);
extern void sock_init_data(struct socket *sock, struct sock *sk);
/**
- * sk_filter_release: Release a socket filter
+ * sk_filter_release - release a socket filter
* @fp: filter to remove
*
* Remove a filter from a socket and release its resources.
extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested);
+static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+{
+ sk->sk_tx_queue_mapping = tx_queue;
+}
+
+static inline void sk_tx_queue_clear(struct sock *sk)
+{
+ sk->sk_tx_queue_mapping = -1;
+}
+
+static inline int sk_tx_queue_get(const struct sock *sk)
+{
+ return sk->sk_tx_queue_mapping;
+}
+
+static inline bool sk_tx_queue_recorded(const struct sock *sk)
+{
+ return (sk && sk->sk_tx_queue_mapping >= 0);
+}
+
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
+ sk_tx_queue_clear(sk);
sk->sk_socket = sock;
}
{
struct dst_entry *old_dst;
+ sk_tx_queue_clear(sk);
old_dst = sk->sk_dst_cache;
sk->sk_dst_cache = dst;
dst_release(old_dst);
{
struct dst_entry *old_dst;
+ sk_tx_queue_clear(sk);
old_dst = sk->sk_dst_cache;
sk->sk_dst_cache = NULL;
dst_release(old_dst);
static inline void sk_wake_async(struct sock *sk, int how, int band)
{
- if (sk->sk_socket && sk->sk_socket->fasync_list)
+ if (sock_flag(sk, SOCK_FASYNC))
sock_wake_async(sk->sk_socket, how, band);
}
sk->sk_stamp = kt;
}
+extern void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb);
+
/**
* sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @msg: outgoing packet