#include <linux/interrupt.h>
+#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/vmalloc.h>
#include <net/inet_connection_sock.h>
#include <net/inet_sock.h>
-#include <net/route.h>
#include <net/sock.h>
+#include <net/route.h>
#include <net/tcp_states.h>
+#include <net/netns/hash.h>
#include <asm/atomic.h>
#include <asm/byteorder.h>
/* This is for all connections with a full identity, no wildcards.
- * New scheme, half the table is for TIME_WAIT, the other half is
- * for the rest. I'll experiment with dynamic table growth later.
+ * One chain is dedicated to TIME_WAIT sockets.
+ * I'll experiment with dynamic table growth later.
*/
struct inet_ehash_bucket {
- rwlock_t lock;
- struct hlist_head chain;
+ struct hlist_nulls_head chain;
+ struct hlist_nulls_head twchain;
};
/* There are a few simple rules, which allow for local port reuse by
* ports are created in O(1) time? I thought so. ;-) -DaveM
*/
struct inet_bind_bucket {
+#ifdef CONFIG_NET_NS
+ struct net *ib_net;
+#endif
unsigned short port;
signed short fastreuse;
+ int num_owners;
struct hlist_node node;
struct hlist_head owners;
};
-#define inet_bind_bucket_for_each(tb, node, head) \
- hlist_for_each_entry(tb, node, head, node)
+static inline struct net *ib_net(struct inet_bind_bucket *ib)
+{
+ return read_pnet(&ib->ib_net);
+}
+
+#define inet_bind_bucket_for_each(tb, pos, head) \
+ hlist_for_each_entry(tb, pos, head, node)
struct inet_bind_hashbucket {
spinlock_t lock;
struct hlist_head chain;
};
+/*
+ * Sockets can be hashed in established or listening table
+ * We must use different 'nulls' end-of-chain value for listening
+ * hash table, or we might find a socket that was closed and
+ * reallocated/inserted into established hash table
+ */
+#define LISTENING_NULLS_BASE (1U << 29)
+struct inet_listen_hashbucket {
+ spinlock_t lock;
+ struct hlist_nulls_head head;
+};
+
/* This is for listening sockets, thus all sockets which possess wildcards. */
#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
*
* TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
*
- * First half of the table is for sockets not in TIME_WAIT, second half
- * is for TIME_WAIT sockets only.
+ * TIME_WAIT sockets use a separate chain (twchain).
*/
struct inet_ehash_bucket *ehash;
+ spinlock_t *ehash_locks;
+ unsigned int ehash_mask;
+ unsigned int ehash_locks_mask;
/* Ok, let's try this, I give up, we do need a local binding
* TCP hash as well as the others for fast bind/connect.
*/
struct inet_bind_hashbucket *bhash;
- int bhash_size;
- unsigned int ehash_size;
+ unsigned int bhash_size;
+ /* 4 bytes hole on 64 bit */
- /* All sockets in TCP_LISTEN state will be in here. This is the only
- * table where wildcard'd TCP sockets can exist. Hash function here
- * is just local port number.
- */
- struct hlist_head listening_hash[INET_LHTABLE_SIZE];
+ struct kmem_cache *bind_bucket_cachep;
/* All the above members are written once at bootup and
* never written again _or_ are predominantly read-access.
*
* Now align to a new cache line as all the following members
- * are often dirty.
+ * might be often dirty.
+ */
+ /* All sockets in TCP_LISTEN state will be in here. This is the only
+ * table where wildcard'd TCP sockets can exist. Hash function here
+ * is just local port number.
*/
- rwlock_t lhash_lock ____cacheline_aligned;
- atomic_t lhash_users;
- wait_queue_head_t lhash_wait;
- kmem_cache_t *bind_bucket_cachep;
+ struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
+ ____cacheline_aligned_in_smp;
+
+ atomic_t bsockets;
};
static inline struct inet_ehash_bucket *inet_ehash_bucket(
struct inet_hashinfo *hashinfo,
unsigned int hash)
{
- return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
+ return &hashinfo->ehash[hash & hashinfo->ehash_mask];
}
-extern struct inet_bind_bucket *
- inet_bind_bucket_create(kmem_cache_t *cachep,
- struct inet_bind_hashbucket *head,
- const unsigned short snum);
-extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
- struct inet_bind_bucket *tb);
-
-static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
+static inline spinlock_t *inet_ehash_lockp(
+ struct inet_hashinfo *hashinfo,
+ unsigned int hash)
{
- return lport & (bhash_size - 1);
+ return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
}
-extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- const unsigned short snum);
-
-/* These can have wildcards, don't try too hard. */
-static inline int inet_lhashfn(const unsigned short num)
+static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
{
- return num & (INET_LHTABLE_SIZE - 1);
+ unsigned int i, size = 256;
+#if defined(CONFIG_PROVE_LOCKING)
+ unsigned int nr_pcpus = 2;
+#else
+ unsigned int nr_pcpus = num_possible_cpus();
+#endif
+ if (nr_pcpus >= 4)
+ size = 512;
+ if (nr_pcpus >= 8)
+ size = 1024;
+ if (nr_pcpus >= 16)
+ size = 2048;
+ if (nr_pcpus >= 32)
+ size = 4096;
+ if (sizeof(spinlock_t) != 0) {
+#ifdef CONFIG_NUMA
+ if (size * sizeof(spinlock_t) > PAGE_SIZE)
+ hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
+ else
+#endif
+ hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t),
+ GFP_KERNEL);
+ if (!hashinfo->ehash_locks)
+ return ENOMEM;
+ for (i = 0; i < size; i++)
+ spin_lock_init(&hashinfo->ehash_locks[i]);
+ }
+ hashinfo->ehash_locks_mask = size - 1;
+ return 0;
}
-static inline int inet_sk_listen_hashfn(const struct sock *sk)
+static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
- return inet_lhashfn(inet_sk(sk)->num);
+ if (hashinfo->ehash_locks) {
+#ifdef CONFIG_NUMA
+ unsigned int size = (hashinfo->ehash_locks_mask + 1) *
+ sizeof(spinlock_t);
+ if (size > PAGE_SIZE)
+ vfree(hashinfo->ehash_locks);
+ else
+#endif
+ kfree(hashinfo->ehash_locks);
+ hashinfo->ehash_locks = NULL;
+ }
}
-/* Caller must disable local BH processing. */
-static inline void __inet_inherit_port(struct inet_hashinfo *table,
- struct sock *sk, struct sock *child)
-{
- const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
- struct inet_bind_hashbucket *head = &table->bhash[bhash];
- struct inet_bind_bucket *tb;
-
- spin_lock(&head->lock);
- tb = inet_csk(sk)->icsk_bind_hash;
- sk_add_bind_node(child, &tb->owners);
- inet_csk(child)->icsk_bind_hash = tb;
- spin_unlock(&head->lock);
-}
+extern struct inet_bind_bucket *
+ inet_bind_bucket_create(struct kmem_cache *cachep,
+ struct net *net,
+ struct inet_bind_hashbucket *head,
+ const unsigned short snum);
+extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
+ struct inet_bind_bucket *tb);
-static inline void inet_inherit_port(struct inet_hashinfo *table,
- struct sock *sk, struct sock *child)
+static inline int inet_bhashfn(struct net *net,
+ const __u16 lport, const int bhash_size)
{
- local_bh_disable();
- __inet_inherit_port(table, sk, child);
- local_bh_enable();
+ return (lport + net_hash_mix(net)) & (bhash_size - 1);
}
-extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);
-
-extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
+extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ const unsigned short snum);
-/*
- * - We may sleep inside this lock.
- * - If sleeping is not required (or called from BH),
- * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
- */
-static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
+/* These can have wildcards, don't try too hard. */
+static inline int inet_lhashfn(struct net *net, const unsigned short num)
{
- /* read_lock synchronizes to candidates to writers */
- read_lock(&hashinfo->lhash_lock);
- atomic_inc(&hashinfo->lhash_users);
- read_unlock(&hashinfo->lhash_lock);
+ return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
}
-static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
+static inline int inet_sk_listen_hashfn(const struct sock *sk)
{
- if (atomic_dec_and_test(&hashinfo->lhash_users))
- wake_up(&hashinfo->lhash_wait);
+ return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
}
-static inline void __inet_hash(struct inet_hashinfo *hashinfo,
- struct sock *sk, const int listen_possible)
-{
- struct hlist_head *list;
- rwlock_t *lock;
-
- BUG_TRAP(sk_unhashed(sk));
- if (listen_possible && sk->sk_state == TCP_LISTEN) {
- list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
- lock = &hashinfo->lhash_lock;
- inet_listen_wlock(hashinfo);
- } else {
- struct inet_ehash_bucket *head;
- sk->sk_hash = inet_sk_ehashfn(sk);
- head = inet_ehash_bucket(hashinfo, sk->sk_hash);
- list = &head->chain;
- lock = &head->lock;
- write_lock(lock);
- }
- __sk_add_node(sk, list);
- sock_prot_inc_use(sk->sk_prot);
- write_unlock(lock);
- if (listen_possible && sk->sk_state == TCP_LISTEN)
- wake_up(&hashinfo->lhash_wait);
-}
+/* Caller must disable local BH processing. */
+extern void __inet_inherit_port(struct sock *sk, struct sock *child);
-static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
-{
- if (sk->sk_state != TCP_CLOSE) {
- local_bh_disable();
- __inet_hash(hashinfo, sk, 1);
- local_bh_enable();
- }
-}
+extern void inet_put_port(struct sock *sk);
-static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
-{
- rwlock_t *lock;
-
- if (sk_unhashed(sk))
- goto out;
-
- if (sk->sk_state == TCP_LISTEN) {
- local_bh_disable();
- inet_listen_wlock(hashinfo);
- lock = &hashinfo->lhash_lock;
- } else {
- lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock;
- write_lock_bh(lock);
- }
+void inet_hashinfo_init(struct inet_hashinfo *h);
- if (__sk_del_node_init(sk))
- sock_prot_dec_use(sk->sk_prot);
- write_unlock_bh(lock);
-out:
- if (sk->sk_state == TCP_LISTEN)
- wake_up(&hashinfo->lhash_wait);
-}
+extern int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+extern void inet_hash(struct sock *sk);
+extern void inet_unhash(struct sock *sk);
-static inline int inet_iif(const struct sk_buff *skb)
-{
- return ((struct rtable *)skb->dst)->rt_iif;
-}
-
-extern struct sock *__inet_lookup_listener(const struct hlist_head *head,
- const u32 daddr,
+extern struct sock *__inet_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 daddr,
const unsigned short hnum,
const int dif);
-/* Optimize the common listener case. */
-static inline struct sock *
- inet_lookup_listener(struct inet_hashinfo *hashinfo,
- const u32 daddr,
- const unsigned short hnum, const int dif)
+static inline struct sock *inet_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ __be32 daddr, __be16 dport, int dif)
{
- struct sock *sk = NULL;
- const struct hlist_head *head;
-
- read_lock(&hashinfo->lhash_lock);
- head = &hashinfo->listening_hash[inet_lhashfn(hnum)];
- if (!hlist_empty(head)) {
- const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
-
- if (inet->num == hnum && !sk->sk_node.next &&
- (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
- (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
- !sk->sk_bound_dev_if)
- goto sherry_cache;
- sk = __inet_lookup_listener(head, daddr, hnum, dif);
- }
- if (sk) {
-sherry_cache:
- sock_hold(sk);
- }
- read_unlock(&hashinfo->lhash_lock);
- return sk;
+ return __inet_lookup_listener(net, hashinfo, daddr, ntohs(dport), dif);
}
/* Socket demux engine toys. */
+/* What happens here is ugly; there's a pair of adjacent fields in
+ struct inet_sock; __be16 dport followed by __u16 num. We want to
+ search by pair, so we combine the keys into a single 32bit value
+ and compare with 32bit value read from &...->dport. Let's at least
+ make sure that it's not mixed with anything else...
+ On 64bit targets we combine comparisons with pair of adjacent __be32
+ fields in the same way.
+*/
+typedef __u32 __bitwise __portpair;
#ifdef __BIG_ENDIAN
#define INET_COMBINED_PORTS(__sport, __dport) \
- (((__u32)(__sport) << 16) | (__u32)(__dport))
+ ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
#else /* __LITTLE_ENDIAN */
#define INET_COMBINED_PORTS(__sport, __dport) \
- (((__u32)(__dport) << 16) | (__u32)(__sport))
+ ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
#endif
#if (BITS_PER_LONG == 64)
+typedef __u64 __bitwise __addrpair;
#ifdef __BIG_ENDIAN
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
- const __u64 __name = (((__u64)(__saddr)) << 32) | ((__u64)(__daddr));
+ const __addrpair __name = (__force __addrpair) ( \
+ (((__force __u64)(__be32)(__saddr)) << 32) | \
+ ((__force __u64)(__be32)(__daddr)));
#else /* __LITTLE_ENDIAN */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
- const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr));
+ const __addrpair __name = (__force __addrpair) ( \
+ (((__force __u64)(__be32)(__daddr)) << 32) | \
+ ((__force __u64)(__be32)(__saddr)));
#endif /* __BIG_ENDIAN */
-#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
- (((__sk)->sk_hash == (__hash)) && \
- ((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
- ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
+#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
+ (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
+ ((*((__addrpair *)&(inet_sk(__sk)->inet_daddr))) == (__cookie)) && \
+ ((*((__portpair *)&(inet_sk(__sk)->inet_dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-#define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
- (((__sk)->sk_hash == (__hash)) && \
- ((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
- ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
+#define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
+ (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
+ ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
+ ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
-#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
- (((__sk)->sk_hash == (__hash)) && \
- (inet_sk(__sk)->daddr == (__saddr)) && \
- (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
- ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
+#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
+ (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
+ (inet_sk(__sk)->inet_daddr == (__saddr)) && \
+ (inet_sk(__sk)->inet_rcv_saddr == (__daddr)) && \
+ ((*((__portpair *)&(inet_sk(__sk)->inet_dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-#define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
- (((__sk)->sk_hash == (__hash)) && \
+#define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
+ (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
(inet_twsk(__sk)->tw_daddr == (__saddr)) && \
(inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
- ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
+ ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#endif /* 64-bit arch */
*
* Local BH must be disabled here.
*/
+extern struct sock * __inet_lookup_established(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const u16 hnum, const int dif);
+
static inline struct sock *
- __inet_lookup_established(struct inet_hashinfo *hashinfo,
- const u32 saddr, const u16 sport,
- const u32 daddr, const u16 hnum,
- const int dif)
+ inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const __be16 dport,
+ const int dif)
{
- INET_ADDR_COOKIE(acookie, saddr, daddr)
- const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
- struct sock *sk;
- const struct hlist_node *node;
- /* Optimize here for direct hit, only listening connections can
- * have wildcards anyways.
- */
- unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
- struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
-
- prefetch(head->chain.first);
- read_lock(&head->lock);
- sk_for_each(sk, node, &head->chain) {
- if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
- goto hit; /* You sunk my battleship! */
- }
-
- /* Must check for a TIME_WAIT'er before going to listener hash. */
- sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
- if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
- goto hit;
- }
- sk = NULL;
-out:
- read_unlock(&head->lock);
- return sk;
-hit:
- sock_hold(sk);
- goto out;
+ return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
+ ntohs(dport), dif);
}
-static inline struct sock *__inet_lookup(struct inet_hashinfo *hashinfo,
- const u32 saddr, const u16 sport,
- const u32 daddr, const u16 hnum,
+static inline struct sock *__inet_lookup(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const __be16 dport,
const int dif)
{
- struct sock *sk = __inet_lookup_established(hashinfo, saddr, sport, daddr,
- hnum, dif);
- return sk ? : inet_lookup_listener(hashinfo, daddr, hnum, dif);
+ u16 hnum = ntohs(dport);
+ struct sock *sk = __inet_lookup_established(net, hashinfo,
+ saddr, sport, daddr, hnum, dif);
+
+ return sk ? : __inet_lookup_listener(net, hashinfo, daddr, hnum, dif);
}
-static inline struct sock *inet_lookup(struct inet_hashinfo *hashinfo,
- const u32 saddr, const u16 sport,
- const u32 daddr, const u16 dport,
+static inline struct sock *inet_lookup(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const __be16 dport,
const int dif)
{
struct sock *sk;
local_bh_disable();
- sk = __inet_lookup(hashinfo, saddr, sport, daddr, ntohs(dport), dif);
+ sk = __inet_lookup(net, hashinfo, saddr, sport, daddr, dport, dif);
local_bh_enable();
return sk;
}
+static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
+ struct sk_buff *skb,
+ const __be16 sport,
+ const __be16 dport)
+{
+ struct sock *sk;
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (unlikely(sk = skb_steal_sock(skb)))
+ return sk;
+ else
+ return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
+ iph->saddr, sport,
+ iph->daddr, dport, inet_iif(skb));
+}
+
+extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ struct sock *sk,
+ u32 port_offset,
+ int (*check_established)(struct inet_timewait_death_row *,
+ struct sock *, __u16, struct inet_timewait_sock **),
+ int (*hash)(struct sock *sk, struct inet_timewait_sock *twp));
+
extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
#endif /* _INET_HASHTABLES_H */