#include <linux/skbuff.h> /* struct sk_buff */
#include <linux/mm.h>
#include <linux/security.h>
+#include <linux/slab.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
/**
* struct sock_common - minimal network layer representation of sockets
* @skc_node: main hash linkage for various protocol lookup tables
- * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
+ * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
* @skc_refcnt: reference count
+ * @skc_tx_queue_mapping: tx queue number for this connection
* @skc_hash: hash value used with various protocol lookup tables
+ * @skc_u16hashes: two u16 hash values used by UDP lookup tables
* @skc_family: network address family
* @skc_state: Connection state
* @skc_reuse: %SO_REUSEADDR setting
* @skc_bound_dev_if: bound device index if != 0
* @skc_bind_node: bind hash linkage for various protocol lookup tables
+ * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
* @skc_prot: protocol handlers inside a network family
* @skc_net: reference to the network namespace of this socket
*
struct hlist_nulls_node skc_nulls_node;
};
atomic_t skc_refcnt;
+ int skc_tx_queue_mapping;
- unsigned int skc_hash;
+ union {
+ unsigned int skc_hash;
+ __u16 skc_u16hashes[2];
+ };
unsigned short skc_family;
volatile unsigned char skc_state;
unsigned char skc_reuse;
int skc_bound_dev_if;
- struct hlist_node skc_bind_node;
+ union {
+ struct hlist_node skc_bind_node;
+ struct hlist_nulls_node skc_portaddr_node;
+ };
struct proto *skc_prot;
#ifdef CONFIG_NET_NS
struct net *skc_net;
* @sk_rcvlowat: %SO_RCVLOWAT setting
* @sk_rcvtimeo: %SO_RCVTIMEO setting
* @sk_sndtimeo: %SO_SNDTIMEO setting
+ * @sk_rxhash: flow hash received from netif layer
* @sk_filter: socket filtering instructions
* @sk_protinfo: private area, net family specific, when not using slab
* @sk_timer: sock cleanup timer
#define sk_node __sk_common.skc_node
#define sk_nulls_node __sk_common.skc_nulls_node
#define sk_refcnt __sk_common.skc_refcnt
+#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
#define sk_copy_start __sk_common.skc_hash
#define sk_hash __sk_common.skc_hash
#define sk_prot __sk_common.skc_prot
#define sk_net __sk_common.skc_net
kmemcheck_bitfield_begin(flags);
- unsigned char sk_shutdown : 2,
- sk_no_check : 2,
- sk_userlocks : 4;
+ unsigned int sk_shutdown : 2,
+ sk_no_check : 2,
+ sk_userlocks : 4,
+ sk_protocol : 8,
+ sk_type : 16;
kmemcheck_bitfield_end(flags);
- unsigned char sk_protocol;
- unsigned short sk_type;
int sk_rcvbuf;
socket_lock_t sk_lock;
/*
struct {
struct sk_buff *head;
struct sk_buff *tail;
+ int len;
} sk_backlog;
wait_queue_head_t *sk_sleep;
struct dst_entry *sk_dst_cache;
#ifdef CONFIG_XFRM
struct xfrm_policy *sk_policy[2];
#endif
- rwlock_t sk_dst_lock;
+ spinlock_t sk_dst_lock;
atomic_t sk_rmem_alloc;
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
int sk_gso_type;
unsigned int sk_gso_max_size;
int sk_rcvlowat;
+#ifdef CONFIG_RPS
+ __u32 sk_rxhash;
+#endif
unsigned long sk_flags;
unsigned long sk_lingertime;
struct sk_buff_head sk_error_queue;
/*
* Hashed lists helper routines
*/
+static inline struct sock *sk_entry(const struct hlist_node *node)
+{
+ return hlist_entry(node, struct sock, sk_node);
+}
+
static inline struct sock *__sk_head(const struct hlist_head *head)
{
return hlist_entry(head->first, struct sock, sk_node);
__hlist_del(&sk->sk_node);
}
+/* NB: equivalent to hlist_del_init_rcu */
static __inline__ int __sk_del_node_init(struct sock *sk)
{
if (sk_hashed(sk)) {
}
return rc;
}
+#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
{
__sk_add_node(sk, list);
}
+static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ hlist_add_head_rcu(&sk->sk_node, list);
+}
+
static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
#define sk_for_each(__sk, node, list) \
hlist_for_each_entry(__sk, node, list, sk_node)
+#define sk_for_each_rcu(__sk, node, list) \
+ hlist_for_each_entry_rcu(__sk, node, list, sk_node)
#define sk_nulls_for_each(__sk, node, list) \
hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \
SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */
SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
+ SOCK_FASYNC, /* fasync() active */
+ SOCK_RXQ_OVFL,
};
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
return sk->sk_wmem_queued < sk->sk_sndbuf;
}
-/* The per-socket spinlock must be held here. */
-static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+/* OOB backlog add */
+static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
if (!sk->sk_backlog.tail) {
sk->sk_backlog.head = sk->sk_backlog.tail = skb;
skb->next = NULL;
}
+/*
+ * Take into account size of receive queue and backlog queue
+ */
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+{
+ unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
+
+ return qsize + skb->truesize > sk->sk_rcvbuf;
+}
+
+/* The per-socket spinlock must be held here. */
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+ if (sk_rcvqueues_full(sk, skb))
+ return -ENOBUFS;
+
+ __sk_add_backlog(sk, skb);
+ sk->sk_backlog.len += skb->truesize;
+ return 0;
+}
+
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
return sk->sk_backlog_rcv(sk, skb);
}
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table *sock_flow_table;
+
+ rcu_read_lock();
+ sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
+ rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_reset_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table *sock_flow_table;
+
+ rcu_read_lock();
+ sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
+ rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
+{
+#ifdef CONFIG_RPS
+ if (unlikely(sk->sk_rxhash != rxhash)) {
+ sock_rps_reset_flow(sk);
+ sk->sk_rxhash = rxhash;
+ }
+#endif
+}
+
#define sk_wait_event(__sk, __timeo, __condition) \
({ int __rc; \
release_sock(__sk); \
extern void sock_init_data(struct socket *sock, struct sock *sk);
/**
- * sk_filter_release: Release a socket filter
+ * sk_filter_release - release a socket filter
* @fp: filter to remove
*
* Remove a filter from a socket and release its resources.
extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested);
+static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+{
+ sk->sk_tx_queue_mapping = tx_queue;
+}
+
+static inline void sk_tx_queue_clear(struct sock *sk)
+{
+ sk->sk_tx_queue_mapping = -1;
+}
+
+static inline int sk_tx_queue_get(const struct sock *sk)
+{
+ return sk->sk_tx_queue_mapping;
+}
+
+static inline bool sk_tx_queue_recorded(const struct sock *sk)
+{
+ return (sk && sk->sk_tx_queue_mapping >= 0);
+}
+
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
+ sk_tx_queue_clear(sk);
sk->sk_socket = sock;
}
+static inline wait_queue_head_t *sk_sleep(struct sock *sk)
+{
+ return sk->sk_sleep;
+}
/* Detach socket from process context.
* Announce socket dead, detach it from wait queue and inode.
* Note that parent inode held reference count on this struct sock,
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
- return sk->sk_dst_cache;
+ return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() ||
+ sock_owned_by_user(sk) ||
+ lockdep_is_held(&sk->sk_lock.slock));
}
static inline struct dst_entry *
{
struct dst_entry *dst;
- read_lock(&sk->sk_dst_lock);
- dst = sk->sk_dst_cache;
+ rcu_read_lock();
+ dst = rcu_dereference(sk->sk_dst_cache);
if (dst)
dst_hold(dst);
- read_unlock(&sk->sk_dst_lock);
+ rcu_read_unlock();
return dst;
}
+extern void sk_reset_txq(struct sock *sk);
+
+static inline void dst_negative_advice(struct sock *sk)
+{
+ struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+
+ if (dst && dst->ops->negative_advice) {
+ ndst = dst->ops->negative_advice(dst);
+
+ if (ndst != dst) {
+ rcu_assign_pointer(sk->sk_dst_cache, ndst);
+ sk_reset_txq(sk);
+ }
+ }
+}
+
static inline void
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old_dst;
- old_dst = sk->sk_dst_cache;
- sk->sk_dst_cache = dst;
+ sk_tx_queue_clear(sk);
+ /*
+ * This can be called while sk is owned by the caller only,
+ * with no state that can be checked in a rcu_dereference_check() cond
+ */
+ old_dst = rcu_dereference_raw(sk->sk_dst_cache);
+ rcu_assign_pointer(sk->sk_dst_cache, dst);
dst_release(old_dst);
}
static inline void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
- write_lock(&sk->sk_dst_lock);
+ spin_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst);
- write_unlock(&sk->sk_dst_lock);
+ spin_unlock(&sk->sk_dst_lock);
}
static inline void
__sk_dst_reset(struct sock *sk)
{
- struct dst_entry *old_dst;
-
- old_dst = sk->sk_dst_cache;
- sk->sk_dst_cache = NULL;
- dst_release(old_dst);
+ __sk_dst_set(sk, NULL);
}
static inline void
sk_dst_reset(struct sock *sk)
{
- write_lock(&sk->sk_dst_lock);
+ spin_lock(&sk->sk_dst_lock);
__sk_dst_reset(sk);
- write_unlock(&sk->sk_dst_lock);
+ spin_unlock(&sk->sk_dst_lock);
}
extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
* tp->rcv_nxt check sock_def_readable
* ... {
* schedule ...
- * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- * wake_up_interruptible(sk->sk_sleep)
+ * if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ * wake_up_interruptible(sk_sleep(sk))
* ...
* }
*
* This memory barrier is paired in the sock_poll_wait.
*/
smp_mb__after_lock();
- return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
+ return sk_sleep(sk) && waitqueue_active(sk_sleep(sk));
}
/**
static inline void sk_wake_async(struct sock *sk, int how, int band)
{
- if (sk->sk_socket && sk->sk_socket->fasync_list)
+ if (sock_flag(sk, SOCK_FASYNC))
sock_wake_async(sk->sk_socket, how, band);
}
sk->sk_stamp = kt;
}
+extern void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb);
+
/**
* sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @msg: outgoing packet