X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=net%2Fnetfilter%2Fnf_conntrack_core.c;h=f27c99246a4cce80a46f65ce809ea01c982883d8;hb=4ae127d1b6c71f9240dd4245f240e6dd8fc98014;hp=e581190fb6c374ff691679e780dffeec6c213cd3;hpb=6f912042256c12b0927438122594f5379b364f5d;p=safe%2Fjmp%2Flinux-2.6 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index e581190..f27c9924 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -9,27 +9,8 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * 23 Apr 2001: Harald Welte - * - new API and handling of conntrack/nat helpers - * - now capable of multiple expectations for one master - * 16 Jul 2002: Harald Welte - * - add usage/reference counts to ip_conntrack_expect - * - export ip_conntrack[_expect]_{find_get,put} functions - * 16 Dec 2003: Yasuyuki Kozakai @USAGI - * - generalize L3 protocol denendent part. - * 23 Mar 2004: Yasuyuki Kozakai @USAGI - * - add support various size of conntrack structures. - * 26 Jan 2006: Harald Welte - * - restructure nf_conn (introduce nf_conn_help) - * - redesign 'features' how they were originally intended - * 26 Feb 2006: Pablo Neira Ayuso - * - add support for L3 protocol module load on demand. - * - * Derived from net/ipv4/netfilter/ip_conntrack_core.c */ -#include #include #include #include @@ -47,244 +28,64 @@ #include #include #include - -/* This rwlock protects the main hash table, protocol/helper/expected - registrations, conntrack timers*/ -#define ASSERT_READ_LOCK(x) -#define ASSERT_WRITE_LOCK(x) +#include #include #include -#include +#include +#include #include #include -#include +#include #define NF_CONNTRACK_VERSION "0.5.0" -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -DEFINE_RWLOCK(nf_conntrack_lock); +DEFINE_SPINLOCK(nf_conntrack_lock); +EXPORT_SYMBOL_GPL(nf_conntrack_lock); /* nf_conntrack_standalone needs this */ atomic_t nf_conntrack_count = ATOMIC_INIT(0); +EXPORT_SYMBOL_GPL(nf_conntrack_count); -void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL; -LIST_HEAD(nf_conntrack_expect_list); -struct nf_conntrack_protocol **nf_ct_protos[PF_MAX]; -struct nf_conntrack_l3proto *nf_ct_l3protos[PF_MAX]; -static LIST_HEAD(helpers); -unsigned int nf_conntrack_htable_size = 0; -int nf_conntrack_max; -struct list_head *nf_conntrack_hash; -static kmem_cache_t *nf_conntrack_expect_cachep; -struct nf_conn nf_conntrack_untracked; -unsigned int nf_ct_log_invalid; -static LIST_HEAD(unconfirmed); -static int nf_conntrack_vmalloc; - -static unsigned int nf_conntrack_next_id; -static unsigned int nf_conntrack_expect_next_id; -#ifdef CONFIG_NF_CONNTRACK_EVENTS -ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain); -ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain); - -DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); - -/* deliver cached events and clear cache entry - must be called with locally - * disabled softirqs */ -static inline void -__nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache) -{ - DEBUGP("ecache: delivering events for %p\n", ecache->ct); - if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct) - && ecache->events) - atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events, - ecache->ct); - - ecache->events = 0; - nf_ct_put(ecache->ct); - ecache->ct = NULL; -} - -/* Deliver all cached events for a particular conntrack. This is called - * by code prior to async packet handling for freeing the skb */ -void nf_ct_deliver_cached_events(const struct nf_conn *ct) -{ - struct nf_conntrack_ecache *ecache; +unsigned int nf_conntrack_htable_size __read_mostly; +EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); - local_bh_disable(); - ecache = &__get_cpu_var(nf_conntrack_ecache); - if (ecache->ct == ct) - __nf_ct_deliver_cached_events(ecache); - local_bh_enable(); -} +int nf_conntrack_max __read_mostly; +EXPORT_SYMBOL_GPL(nf_conntrack_max); -/* Deliver cached events for old pending events, if current conntrack != old */ -void __nf_ct_event_cache_init(struct nf_conn *ct) -{ - struct nf_conntrack_ecache *ecache; - - /* take care of delivering potentially old events */ - ecache = &__get_cpu_var(nf_conntrack_ecache); - BUG_ON(ecache->ct == ct); - if (ecache->ct) - __nf_ct_deliver_cached_events(ecache); - /* initialize for this conntrack/packet */ - ecache->ct = ct; - nf_conntrack_get(&ct->ct_general); -} +struct hlist_head *nf_conntrack_hash __read_mostly; +EXPORT_SYMBOL_GPL(nf_conntrack_hash); -/* flush the event cache - touches other CPU's data and must not be called - * while packets are still passing through the code */ -static void nf_ct_event_cache_flush(void) -{ - struct nf_conntrack_ecache *ecache; - int cpu; +struct nf_conn nf_conntrack_untracked __read_mostly; +EXPORT_SYMBOL_GPL(nf_conntrack_untracked); - for_each_possible_cpu(cpu) { - ecache = &per_cpu(nf_conntrack_ecache, cpu); - if (ecache->ct) - nf_ct_put(ecache->ct); - } -} -#else -static inline void nf_ct_event_cache_flush(void) {} -#endif /* CONFIG_NF_CONNTRACK_EVENTS */ +unsigned int nf_ct_log_invalid __read_mostly; +HLIST_HEAD(unconfirmed); +static int nf_conntrack_vmalloc __read_mostly; +static struct kmem_cache *nf_conntrack_cachep __read_mostly; DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); -/* - * This scheme offers various size of "struct nf_conn" dependent on - * features(helper, nat, ...) - */ - -#define NF_CT_FEATURES_NAMELEN 256 -static struct { - /* name of slab cache. printed in /proc/slabinfo */ - char *name; - - /* size of slab cache */ - size_t size; - - /* slab cache pointer */ - kmem_cache_t *cachep; - - /* allocated slab cache + modules which uses this slab cache */ - int use; - - /* Initialization */ - int (*init_conntrack)(struct nf_conn *, u_int32_t); - -} nf_ct_cache[NF_CT_F_NUM]; - -/* protect members of nf_ct_cache except of "use" */ -DEFINE_RWLOCK(nf_ct_cache_lock); - -/* This avoids calling kmem_cache_create() with same name simultaneously */ -static DEFINE_MUTEX(nf_ct_cache_mutex); - -extern struct nf_conntrack_protocol nf_conntrack_generic_protocol; -struct nf_conntrack_protocol * -__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol) -{ - if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL)) - return &nf_conntrack_generic_protocol; - - return nf_ct_protos[l3proto][protocol]; -} - -/* this is guaranteed to always return a valid protocol helper, since - * it falls back to generic_protocol */ -struct nf_conntrack_protocol * -nf_ct_proto_find_get(u_int16_t l3proto, u_int8_t protocol) -{ - struct nf_conntrack_protocol *p; - - preempt_disable(); - p = __nf_ct_proto_find(l3proto, protocol); - if (p) { - if (!try_module_get(p->me)) - p = &nf_conntrack_generic_protocol; - } - preempt_enable(); - - return p; -} - -void nf_ct_proto_put(struct nf_conntrack_protocol *p) -{ - module_put(p->me); -} - -struct nf_conntrack_l3proto * -nf_ct_l3proto_find_get(u_int16_t l3proto) -{ - struct nf_conntrack_l3proto *p; - - preempt_disable(); - p = __nf_ct_l3proto_find(l3proto); - if (p) { - if (!try_module_get(p->me)) - p = &nf_conntrack_generic_l3proto; - } - preempt_enable(); - - return p; -} - -void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p) -{ - module_put(p->me); -} - -int -nf_ct_l3proto_try_module_get(unsigned short l3proto) -{ - int ret; - struct nf_conntrack_l3proto *p; - -retry: p = nf_ct_l3proto_find_get(l3proto); - if (p == &nf_conntrack_generic_l3proto) { - ret = request_module("nf_conntrack-%d", l3proto); - if (!ret) - goto retry; - - return -EPROTOTYPE; - } - - return 0; -} - -void nf_ct_l3proto_module_put(unsigned short l3proto) -{ - struct nf_conntrack_l3proto *p; - - preempt_disable(); - p = __nf_ct_l3proto_find(l3proto); - preempt_enable(); - - module_put(p->me); -} - static int nf_conntrack_hash_rnd_initted; static unsigned int nf_conntrack_hash_rnd; static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, unsigned int size, unsigned int rnd) { - unsigned int a, b; - a = jhash((void *)tuple->src.u3.all, sizeof(tuple->src.u3.all), - ((tuple->src.l3num) << 16) | tuple->dst.protonum); - b = jhash((void *)tuple->dst.u3.all, sizeof(tuple->dst.u3.all), - (tuple->src.u.all << 16) | tuple->dst.u.all); + unsigned int n; + u_int32_t h; + + /* The direction must be ignored, so we hash everything up to the + * destination ports (which is a multiple of 4) and treat the last + * three bytes manually. + */ + n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); + h = jhash2((u32 *)tuple, n, + rnd ^ (((__force __u16)tuple->dst.u.all << 16) | + tuple->dst.protonum)); - return jhash_2words(a, b, rnd) % size; + return ((u64)h * size) >> 32; } static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) @@ -293,121 +94,7 @@ static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) nf_conntrack_hash_rnd); } -int nf_conntrack_register_cache(u_int32_t features, const char *name, - size_t size) -{ - int ret = 0; - char *cache_name; - kmem_cache_t *cachep; - - DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", - features, name, size); - - if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) { - DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n", - features); - return -EINVAL; - } - - mutex_lock(&nf_ct_cache_mutex); - - write_lock_bh(&nf_ct_cache_lock); - /* e.g: multiple helpers are loaded */ - if (nf_ct_cache[features].use > 0) { - DEBUGP("nf_conntrack_register_cache: already resisterd.\n"); - if ((!strncmp(nf_ct_cache[features].name, name, - NF_CT_FEATURES_NAMELEN)) - && nf_ct_cache[features].size == size) { - DEBUGP("nf_conntrack_register_cache: reusing.\n"); - nf_ct_cache[features].use++; - ret = 0; - } else - ret = -EBUSY; - - write_unlock_bh(&nf_ct_cache_lock); - mutex_unlock(&nf_ct_cache_mutex); - return ret; - } - write_unlock_bh(&nf_ct_cache_lock); - - /* - * The memory space for name of slab cache must be alive until - * cache is destroyed. - */ - cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC); - if (cache_name == NULL) { - DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n"); - ret = -ENOMEM; - goto out_up_mutex; - } - - if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN) - >= NF_CT_FEATURES_NAMELEN) { - printk("nf_conntrack_register_cache: name too long\n"); - ret = -EINVAL; - goto out_free_name; - } - - cachep = kmem_cache_create(cache_name, size, 0, 0, - NULL, NULL); - if (!cachep) { - printk("nf_conntrack_register_cache: Can't create slab cache " - "for the features = 0x%x\n", features); - ret = -ENOMEM; - goto out_free_name; - } - - write_lock_bh(&nf_ct_cache_lock); - nf_ct_cache[features].use = 1; - nf_ct_cache[features].size = size; - nf_ct_cache[features].cachep = cachep; - nf_ct_cache[features].name = cache_name; - write_unlock_bh(&nf_ct_cache_lock); - - goto out_up_mutex; - -out_free_name: - kfree(cache_name); -out_up_mutex: - mutex_unlock(&nf_ct_cache_mutex); - return ret; -} - -/* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ -void nf_conntrack_unregister_cache(u_int32_t features) -{ - kmem_cache_t *cachep; - char *name; - - /* - * This assures that kmem_cache_create() isn't called before destroying - * slab cache. - */ - DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features); - mutex_lock(&nf_ct_cache_mutex); - - write_lock_bh(&nf_ct_cache_lock); - if (--nf_ct_cache[features].use > 0) { - write_unlock_bh(&nf_ct_cache_lock); - mutex_unlock(&nf_ct_cache_mutex); - return; - } - cachep = nf_ct_cache[features].cachep; - name = nf_ct_cache[features].name; - nf_ct_cache[features].cachep = NULL; - nf_ct_cache[features].name = NULL; - nf_ct_cache[features].size = 0; - write_unlock_bh(&nf_ct_cache_lock); - - synchronize_net(); - - kmem_cache_destroy(cachep); - kfree(name); - - mutex_unlock(&nf_ct_cache_mutex); -} - -int +bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, unsigned int dataoff, @@ -415,147 +102,74 @@ nf_ct_get_tuple(const struct sk_buff *skb, u_int8_t protonum, struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, - const struct nf_conntrack_protocol *protocol) + const struct nf_conntrack_l4proto *l4proto) { - NF_CT_TUPLE_U_BLANK(tuple); + memset(tuple, 0, sizeof(*tuple)); tuple->src.l3num = l3num; if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) - return 0; + return false; tuple->dst.protonum = protonum; tuple->dst.dir = IP_CT_DIR_ORIGINAL; - return protocol->pkt_to_tuple(skb, dataoff, tuple); -} - -int -nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, - const struct nf_conntrack_tuple *orig, - const struct nf_conntrack_l3proto *l3proto, - const struct nf_conntrack_protocol *protocol) -{ - NF_CT_TUPLE_U_BLANK(inverse); - - inverse->src.l3num = orig->src.l3num; - if (l3proto->invert_tuple(inverse, orig) == 0) - return 0; - - inverse->dst.dir = !orig->dst.dir; - - inverse->dst.protonum = orig->dst.protonum; - return protocol->invert_tuple(inverse, orig); -} - -/* nf_conntrack_expect helper functions */ -void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) -{ - struct nf_conn_help *master_help = nfct_help(exp->master); - - NF_CT_ASSERT(master_help); - ASSERT_WRITE_LOCK(&nf_conntrack_lock); - NF_CT_ASSERT(!timer_pending(&exp->timeout)); - - list_del(&exp->list); - NF_CT_STAT_INC(expect_delete); - master_help->expecting--; - nf_conntrack_expect_put(exp); + return l4proto->pkt_to_tuple(skb, dataoff, tuple); } +EXPORT_SYMBOL_GPL(nf_ct_get_tuple); -static void expectation_timed_out(unsigned long ul_expect) +bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, + u_int16_t l3num, struct nf_conntrack_tuple *tuple) { - struct nf_conntrack_expect *exp = (void *)ul_expect; + struct nf_conntrack_l3proto *l3proto; + struct nf_conntrack_l4proto *l4proto; + unsigned int protoff; + u_int8_t protonum; + int ret; - write_lock_bh(&nf_conntrack_lock); - nf_ct_unlink_expect(exp); - write_unlock_bh(&nf_conntrack_lock); - nf_conntrack_expect_put(exp); -} + rcu_read_lock(); -struct nf_conntrack_expect * -__nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple) -{ - struct nf_conntrack_expect *i; - - list_for_each_entry(i, &nf_conntrack_expect_list, list) { - if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) { - atomic_inc(&i->use); - return i; - } + l3proto = __nf_ct_l3proto_find(l3num); + ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); + if (ret != NF_ACCEPT) { + rcu_read_unlock(); + return false; } - return NULL; -} -/* Just find a expectation corresponding to a tuple. */ -struct nf_conntrack_expect * -nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple) -{ - struct nf_conntrack_expect *i; - - read_lock_bh(&nf_conntrack_lock); - i = __nf_conntrack_expect_find(tuple); - read_unlock_bh(&nf_conntrack_lock); + l4proto = __nf_ct_l4proto_find(l3num, protonum); - return i; -} + ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, + l3proto, l4proto); -/* If an expectation for this connection is found, it gets delete from - * global list then returned. */ -static struct nf_conntrack_expect * -find_expectation(const struct nf_conntrack_tuple *tuple) -{ - struct nf_conntrack_expect *i; - - list_for_each_entry(i, &nf_conntrack_expect_list, list) { - /* If master is not in hash table yet (ie. packet hasn't left - this machine yet), how can other end know about expected? - Hence these are not the droids you are looking for (if - master ct never got confirmed, we'd hold a reference to it - and weird things would happen to future packets). */ - if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) - && nf_ct_is_confirmed(i->master)) { - if (i->flags & NF_CT_EXPECT_PERMANENT) { - atomic_inc(&i->use); - return i; - } else if (del_timer(&i->timeout)) { - nf_ct_unlink_expect(i); - return i; - } - } - } - return NULL; + rcu_read_unlock(); + return ret; } +EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); -/* delete all expectations for this conntrack */ -void nf_ct_remove_expectations(struct nf_conn *ct) +bool +nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, + const struct nf_conntrack_tuple *orig, + const struct nf_conntrack_l3proto *l3proto, + const struct nf_conntrack_l4proto *l4proto) { - struct nf_conntrack_expect *i, *tmp; - struct nf_conn_help *help = nfct_help(ct); + memset(inverse, 0, sizeof(*inverse)); - /* Optimization: most connection never expect any others. */ - if (!help || help->expecting == 0) - return; + inverse->src.l3num = orig->src.l3num; + if (l3proto->invert_tuple(inverse, orig) == 0) + return false; - list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) { - if (i->master == ct && del_timer(&i->timeout)) { - nf_ct_unlink_expect(i); - nf_conntrack_expect_put(i); - } - } + inverse->dst.dir = !orig->dst.dir; + + inverse->dst.protonum = orig->dst.protonum; + return l4proto->invert_tuple(inverse, orig); } +EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); static void clean_from_lists(struct nf_conn *ct) { - unsigned int ho, hr; - - DEBUGP("clean_from_lists(%p)\n", ct); - ASSERT_WRITE_LOCK(&nf_conntrack_lock); - - ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); - LIST_DELETE(&nf_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]); - LIST_DELETE(&nf_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]); + pr_debug("clean_from_lists(%p)\n", ct); + hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); + hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode); /* Destroy all pending expectations */ nf_ct_remove_expectations(ct); @@ -565,10 +179,9 @@ static void destroy_conntrack(struct nf_conntrack *nfct) { struct nf_conn *ct = (struct nf_conn *)nfct; - struct nf_conntrack_l3proto *l3proto; - struct nf_conntrack_protocol *proto; + struct nf_conntrack_l4proto *l4proto; - DEBUGP("destroy_conntrack(%p)\n", ct); + pr_debug("destroy_conntrack(%p)\n", ct); NF_CT_ASSERT(atomic_read(&nfct->use) == 0); NF_CT_ASSERT(!timer_pending(&ct->timeout)); @@ -578,18 +191,14 @@ destroy_conntrack(struct nf_conntrack *nfct) /* To make sure we don't get any weird locking issues here: * destroy_conntrack() MUST NOT be called with a write lock * to nf_conntrack_lock!!! -HW */ - l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num); - if (l3proto && l3proto->destroy) - l3proto->destroy(ct); + rcu_read_lock(); + l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + if (l4proto && l4proto->destroy) + l4proto->destroy(ct); - proto = __nf_ct_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); - if (proto && proto->destroy) - proto->destroy(ct); + rcu_read_unlock(); - if (nf_conntrack_destroyed) - nf_conntrack_destroyed(ct); - - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); /* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, * before connection is in the list, so we need to clean here, @@ -598,87 +207,96 @@ destroy_conntrack(struct nf_conntrack *nfct) /* We overload first tuple to link into unconfirmed list. */ if (!nf_ct_is_confirmed(ct)) { - BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list)); - list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); + BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode)); + hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); } NF_CT_STAT_INC(delete); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); if (ct->master) nf_ct_put(ct->master); - DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct); + pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); nf_conntrack_free(ct); } static void death_by_timeout(unsigned long ul_conntrack) { struct nf_conn *ct = (void *)ul_conntrack; + struct nf_conn_help *help = nfct_help(ct); + struct nf_conntrack_helper *helper; - write_lock_bh(&nf_conntrack_lock); + if (help) { + rcu_read_lock(); + helper = rcu_dereference(help->helper); + if (helper && helper->destroy) + helper->destroy(ct); + rcu_read_unlock(); + } + + spin_lock_bh(&nf_conntrack_lock); /* Inside lock so preempt is disabled on module removal path. * Otherwise we can get spurious warnings. */ NF_CT_STAT_INC(delete_list); clean_from_lists(ct); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); nf_ct_put(ct); } -static inline int -conntrack_tuple_cmp(const struct nf_conntrack_tuple_hash *i, - const struct nf_conntrack_tuple *tuple, - const struct nf_conn *ignored_conntrack) -{ - ASSERT_READ_LOCK(&nf_conntrack_lock); - return nf_ct_tuplehash_to_ctrack(i) != ignored_conntrack - && nf_ct_tuple_equal(tuple, &i->tuple); -} - struct nf_conntrack_tuple_hash * -__nf_conntrack_find(const struct nf_conntrack_tuple *tuple, - const struct nf_conn *ignored_conntrack) +__nf_conntrack_find(const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_tuple_hash *h; + struct hlist_node *n; unsigned int hash = hash_conntrack(tuple); - ASSERT_READ_LOCK(&nf_conntrack_lock); - list_for_each_entry(h, &nf_conntrack_hash[hash], list) { - if (conntrack_tuple_cmp(h, tuple, ignored_conntrack)) { + /* Disable BHs the entire time since we normally need to disable them + * at least once for the stats anyway. + */ + local_bh_disable(); + hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { + if (nf_ct_tuple_equal(tuple, &h->tuple)) { NF_CT_STAT_INC(found); + local_bh_enable(); return h; } NF_CT_STAT_INC(searched); } + local_bh_enable(); return NULL; } +EXPORT_SYMBOL_GPL(__nf_conntrack_find); /* Find a connection corresponding to a tuple. */ struct nf_conntrack_tuple_hash * -nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple, - const struct nf_conn *ignored_conntrack) +nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_tuple_hash *h; + struct nf_conn *ct; - read_lock_bh(&nf_conntrack_lock); - h = __nf_conntrack_find(tuple, ignored_conntrack); - if (h) - atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use); - read_unlock_bh(&nf_conntrack_lock); + rcu_read_lock(); + h = __nf_conntrack_find(tuple); + if (h) { + ct = nf_ct_tuplehash_to_ctrack(h); + if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) + h = NULL; + } + rcu_read_unlock(); return h; } +EXPORT_SYMBOL_GPL(nf_conntrack_find_get); static void __nf_conntrack_hash_insert(struct nf_conn *ct, unsigned int hash, - unsigned int repl_hash) + unsigned int repl_hash) { - ct->id = ++nf_conntrack_next_id; - list_prepend(&nf_conntrack_hash[hash], - &ct->tuplehash[IP_CT_DIR_ORIGINAL].list); - list_prepend(&nf_conntrack_hash[repl_hash], - &ct->tuplehash[IP_CT_DIR_REPLY].list); + hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, + &nf_conntrack_hash[hash]); + hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, + &nf_conntrack_hash[repl_hash]); } void nf_conntrack_hash_insert(struct nf_conn *ct) @@ -688,20 +306,24 @@ void nf_conntrack_hash_insert(struct nf_conn *ct) hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); __nf_conntrack_hash_insert(ct, hash, repl_hash); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } +EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); /* Confirm a connection given skb; places it in hash table */ int -__nf_conntrack_confirm(struct sk_buff **pskb) +__nf_conntrack_confirm(struct sk_buff *skb) { unsigned int hash, repl_hash; + struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; + struct nf_conn_help *help; + struct hlist_node *n; enum ip_conntrack_info ctinfo; - ct = nf_ct_get(*pskb, &ctinfo); + ct = nf_ct_get(skb, &ctinfo); /* ipt_REJECT uses nf_conntrack_attach to attach related ICMP/TCP RST packets in other direction. Actual packet @@ -721,52 +343,53 @@ __nf_conntrack_confirm(struct sk_buff **pskb) /* No external references means noone else could have confirmed us. */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); - DEBUGP("Confirming conntrack %p\n", ct); + pr_debug("Confirming conntrack %p\n", ct); - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're not in the hash. If there is, we lost race. */ - if (!LIST_FIND(&nf_conntrack_hash[hash], - conntrack_tuple_cmp, - struct nf_conntrack_tuple_hash *, - &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL) - && !LIST_FIND(&nf_conntrack_hash[repl_hash], - conntrack_tuple_cmp, - struct nf_conntrack_tuple_hash *, - &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) { - struct nf_conn_help *help; - /* Remove from unconfirmed list */ - list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); - - __nf_conntrack_hash_insert(ct, hash, repl_hash); - /* Timer relative to confirmation time, not original - setting time, otherwise we'd get timer wrap in - weird delay cases. */ - ct->timeout.expires += jiffies; - add_timer(&ct->timeout); - atomic_inc(&ct->ct_general.use); - set_bit(IPS_CONFIRMED_BIT, &ct->status); - NF_CT_STAT_INC(insert); - write_unlock_bh(&nf_conntrack_lock); - help = nfct_help(ct); - if (help && help->helper) - nf_conntrack_event_cache(IPCT_HELPER, *pskb); + hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) + if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, + &h->tuple)) + goto out; + hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode) + if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, + &h->tuple)) + goto out; + + /* Remove from unconfirmed list */ + hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); + + __nf_conntrack_hash_insert(ct, hash, repl_hash); + /* Timer relative to confirmation time, not original + setting time, otherwise we'd get timer wrap in + weird delay cases. */ + ct->timeout.expires += jiffies; + add_timer(&ct->timeout); + atomic_inc(&ct->ct_general.use); + set_bit(IPS_CONFIRMED_BIT, &ct->status); + NF_CT_STAT_INC(insert); + spin_unlock_bh(&nf_conntrack_lock); + help = nfct_help(ct); + if (help && help->helper) + nf_conntrack_event_cache(IPCT_HELPER, skb); #ifdef CONFIG_NF_NAT_NEEDED - if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) || - test_bit(IPS_DST_NAT_DONE_BIT, &ct->status)) - nf_conntrack_event_cache(IPCT_NATINFO, *pskb); + if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) || + test_bit(IPS_DST_NAT_DONE_BIT, &ct->status)) + nf_conntrack_event_cache(IPCT_NATINFO, skb); #endif - nf_conntrack_event_cache(master_ct(ct) ? - IPCT_RELATED : IPCT_NEW, *pskb); - return NF_ACCEPT; - } + nf_conntrack_event_cache(master_ct(ct) ? + IPCT_RELATED : IPCT_NEW, skb); + return NF_ACCEPT; +out: NF_CT_STAT_INC(insert_failed); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return NF_DROP; } +EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); /* Returns true if a connection correspondings to the tuple (required for NAT). */ @@ -775,36 +398,58 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack) { struct nf_conntrack_tuple_hash *h; + struct hlist_node *n; + unsigned int hash = hash_conntrack(tuple); - read_lock_bh(&nf_conntrack_lock); - h = __nf_conntrack_find(tuple, ignored_conntrack); - read_unlock_bh(&nf_conntrack_lock); + /* Disable BHs the entire time since we need to disable them at + * least once for the stats anyway. + */ + rcu_read_lock_bh(); + hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { + if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && + nf_ct_tuple_equal(tuple, &h->tuple)) { + NF_CT_STAT_INC(found); + rcu_read_unlock_bh(); + return 1; + } + NF_CT_STAT_INC(searched); + } + rcu_read_unlock_bh(); - return h != NULL; + return 0; } +EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); + +#define NF_CT_EVICTION_RANGE 8 /* There's a small race here where we may free a just-assured connection. Too bad: we're in trouble anyway. */ -static inline int unreplied(const struct nf_conntrack_tuple_hash *i) +static noinline int early_drop(unsigned int hash) { - return !(test_bit(IPS_ASSURED_BIT, - &nf_ct_tuplehash_to_ctrack(i)->status)); -} - -static int early_drop(struct list_head *chain) -{ - /* Traverse backwards: gives us oldest, which is roughly LRU */ + /* Use oldest entry, which is roughly LRU */ struct nf_conntrack_tuple_hash *h; - struct nf_conn *ct = NULL; + struct nf_conn *ct = NULL, *tmp; + struct hlist_node *n; + unsigned int i, cnt = 0; int dropped = 0; - read_lock_bh(&nf_conntrack_lock); - h = LIST_FIND_B(chain, unreplied, struct nf_conntrack_tuple_hash *); - if (h) { - ct = nf_ct_tuplehash_to_ctrack(h); - atomic_inc(&ct->ct_general.use); + rcu_read_lock(); + for (i = 0; i < nf_conntrack_htable_size; i++) { + hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], + hnode) { + tmp = nf_ct_tuplehash_to_ctrack(h); + if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) + ct = tmp; + cnt++; + } + + if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) + ct = NULL; + if (ct || cnt >= NF_CT_EVICTION_RANGE) + break; + hash = (hash + 1) % nf_conntrack_htable_size; } - read_unlock_bh(&nf_conntrack_lock); + rcu_read_unlock(); if (!ct) return dropped; @@ -812,73 +457,30 @@ static int early_drop(struct list_head *chain) if (del_timer(&ct->timeout)) { death_by_timeout((unsigned long)ct); dropped = 1; - NF_CT_STAT_INC(early_drop); + NF_CT_STAT_INC_ATOMIC(early_drop); } nf_ct_put(ct); return dropped; } -static inline int helper_cmp(const struct nf_conntrack_helper *i, - const struct nf_conntrack_tuple *rtuple) -{ - return nf_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask); -} - -static struct nf_conntrack_helper * -__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple) -{ - return LIST_FIND(&helpers, helper_cmp, - struct nf_conntrack_helper *, - tuple); -} - -struct nf_conntrack_helper * -nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple) -{ - struct nf_conntrack_helper *helper; - - /* need nf_conntrack_lock to assure that helper exists until - * try_module_get() is called */ - read_lock_bh(&nf_conntrack_lock); - - helper = __nf_ct_helper_find(tuple); - if (helper) { - /* need to increase module usage count to assure helper will - * not go away while the caller is e.g. busy putting a - * conntrack in the hash that uses the helper */ - if (!try_module_get(helper->me)) - helper = NULL; - } - - read_unlock_bh(&nf_conntrack_lock); - - return helper; -} - -void nf_ct_helper_put(struct nf_conntrack_helper *helper) -{ - module_put(helper->me); -} - -static struct nf_conn * -__nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, - const struct nf_conntrack_tuple *repl, - const struct nf_conntrack_l3proto *l3proto) +struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, + const struct nf_conntrack_tuple *repl) { - struct nf_conn *conntrack = NULL; - u_int32_t features = 0; - struct nf_conntrack_helper *helper; + struct nf_conn *ct = NULL; if (unlikely(!nf_conntrack_hash_rnd_initted)) { get_random_bytes(&nf_conntrack_hash_rnd, 4); nf_conntrack_hash_rnd_initted = 1; } - if (nf_conntrack_max - && atomic_read(&nf_conntrack_count) >= nf_conntrack_max) { + /* We don't want any race condition at early drop stage */ + atomic_inc(&nf_conntrack_count); + + if (nf_conntrack_max && + unlikely(atomic_read(&nf_conntrack_count) > nf_conntrack_max)) { unsigned int hash = hash_conntrack(orig); - /* Try dropping from this hash chain. */ - if (!early_drop(&nf_conntrack_hash[hash])) { + if (!early_drop(hash)) { + atomic_dec(&nf_conntrack_count); if (net_ratelimit()) printk(KERN_WARNING "nf_conntrack: table full, dropping" @@ -887,133 +489,117 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, } } - /* find features needed by this conntrack. */ - features = l3proto->get_features(orig); - - /* FIXME: protect helper list per RCU */ - read_lock_bh(&nf_conntrack_lock); - helper = __nf_ct_helper_find(repl); - if (helper) - features |= NF_CT_F_HELP; - read_unlock_bh(&nf_conntrack_lock); - - DEBUGP("nf_conntrack_alloc: features=0x%x\n", features); - - read_lock_bh(&nf_ct_cache_lock); - - if (unlikely(!nf_ct_cache[features].use)) { - DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n", - features); - goto out; - } - - conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC); - if (conntrack == NULL) { - DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n"); - goto out; + ct = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC); + if (ct == NULL) { + pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); + atomic_dec(&nf_conntrack_count); + return ERR_PTR(-ENOMEM); } - memset(conntrack, 0, nf_ct_cache[features].size); - conntrack->features = features; - if (helper) { - struct nf_conn_help *help = nfct_help(conntrack); - NF_CT_ASSERT(help); - help->helper = helper; - } - - atomic_set(&conntrack->ct_general.use, 1); - conntrack->ct_general.destroy = destroy_conntrack; - conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; - conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; + atomic_set(&ct->ct_general.use, 1); + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; + ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; /* Don't set timer yet: wait for confirmation */ - init_timer(&conntrack->timeout); - conntrack->timeout.data = (unsigned long)conntrack; - conntrack->timeout.function = death_by_timeout; + setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); + INIT_RCU_HEAD(&ct->rcu); - atomic_inc(&nf_conntrack_count); -out: - read_unlock_bh(&nf_ct_cache_lock); - return conntrack; + return ct; } +EXPORT_SYMBOL_GPL(nf_conntrack_alloc); -struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, - const struct nf_conntrack_tuple *repl) +static void nf_conntrack_free_rcu(struct rcu_head *head) { - struct nf_conntrack_l3proto *l3proto; + struct nf_conn *ct = container_of(head, struct nf_conn, rcu); - l3proto = __nf_ct_l3proto_find(orig->src.l3num); - return __nf_conntrack_alloc(orig, repl, l3proto); + nf_ct_ext_free(ct); + kmem_cache_free(nf_conntrack_cachep, ct); + atomic_dec(&nf_conntrack_count); } -void nf_conntrack_free(struct nf_conn *conntrack) +void nf_conntrack_free(struct nf_conn *ct) { - u_int32_t features = conntrack->features; - NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM); - DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features, - conntrack); - kmem_cache_free(nf_ct_cache[features].cachep, conntrack); - atomic_dec(&nf_conntrack_count); + nf_ct_ext_destroy(ct); + call_rcu(&ct->rcu, nf_conntrack_free_rcu); } +EXPORT_SYMBOL_GPL(nf_conntrack_free); /* Allocate a new conntrack: we return -ENOMEM if classification failed due to stress. Otherwise it really is unclassifiable. */ static struct nf_conntrack_tuple_hash * init_conntrack(const struct nf_conntrack_tuple *tuple, struct nf_conntrack_l3proto *l3proto, - struct nf_conntrack_protocol *protocol, + struct nf_conntrack_l4proto *l4proto, struct sk_buff *skb, unsigned int dataoff) { - struct nf_conn *conntrack; + struct nf_conn *ct; + struct nf_conn_help *help; struct nf_conntrack_tuple repl_tuple; struct nf_conntrack_expect *exp; - if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, protocol)) { - DEBUGP("Can't invert tuple.\n"); + if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { + pr_debug("Can't invert tuple.\n"); return NULL; } - conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto); - if (conntrack == NULL || IS_ERR(conntrack)) { - DEBUGP("Can't allocate conntrack.\n"); - return (struct nf_conntrack_tuple_hash *)conntrack; + ct = nf_conntrack_alloc(tuple, &repl_tuple); + if (ct == NULL || IS_ERR(ct)) { + pr_debug("Can't allocate conntrack.\n"); + return (struct nf_conntrack_tuple_hash *)ct; } - if (!protocol->new(conntrack, skb, dataoff)) { - nf_conntrack_free(conntrack); - DEBUGP("init conntrack: can't track with proto module\n"); + if (!l4proto->new(ct, skb, dataoff)) { + nf_conntrack_free(ct); + pr_debug("init conntrack: can't track with proto module\n"); return NULL; } - write_lock_bh(&nf_conntrack_lock); - exp = find_expectation(tuple); - + spin_lock_bh(&nf_conntrack_lock); + exp = nf_ct_find_expectation(tuple); if (exp) { - DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n", - conntrack, exp); + pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", + ct, exp); /* Welcome, Mr. Bond. We've been expecting you... */ - __set_bit(IPS_EXPECTED_BIT, &conntrack->status); - conntrack->master = exp->master; + __set_bit(IPS_EXPECTED_BIT, &ct->status); + ct->master = exp->master; + if (exp->helper) { + help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); + if (help) + rcu_assign_pointer(help->helper, exp->helper); + } + #ifdef CONFIG_NF_CONNTRACK_MARK - conntrack->mark = exp->master->mark; + ct->mark = exp->master->mark; #endif - nf_conntrack_get(&conntrack->master->ct_general); +#ifdef CONFIG_NF_CONNTRACK_SECMARK + ct->secmark = exp->master->secmark; +#endif + nf_conntrack_get(&ct->master->ct_general); NF_CT_STAT_INC(expect_new); - } else + } else { + struct nf_conntrack_helper *helper; + + helper = __nf_ct_helper_find(&repl_tuple); + if (helper) { + help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); + if (help) + rcu_assign_pointer(help->helper, helper); + } NF_CT_STAT_INC(new); + } /* Overload tuple linked list to put us in unconfirmed list. */ - list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed); + hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, &unconfirmed); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); if (exp) { if (exp->expectfn) - exp->expectfn(conntrack, exp); - nf_conntrack_expect_put(exp); + exp->expectfn(ct, exp); + nf_ct_expect_put(exp); } - return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL]; + return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; } /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ @@ -1023,7 +609,7 @@ resolve_normal_ct(struct sk_buff *skb, u_int16_t l3num, u_int8_t protonum, struct nf_conntrack_l3proto *l3proto, - struct nf_conntrack_protocol *proto, + struct nf_conntrack_l4proto *l4proto, int *set_reply, enum ip_conntrack_info *ctinfo) { @@ -1031,17 +617,17 @@ resolve_normal_ct(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; - if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data), + if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, protonum, &tuple, l3proto, - proto)) { - DEBUGP("resolve_normal_ct: Can't get tuple\n"); + l4proto)) { + pr_debug("resolve_normal_ct: Can't get tuple\n"); return NULL; } /* look for tuple match */ - h = nf_conntrack_find_get(&tuple, NULL); + h = nf_conntrack_find_get(&tuple); if (!h) { - h = init_conntrack(&tuple, l3proto, proto, skb, dataoff); + h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff); if (!h) return NULL; if (IS_ERR(h)) @@ -1057,13 +643,14 @@ resolve_normal_ct(struct sk_buff *skb, } else { /* Once we've had two way comms, always ESTABLISHED. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { - DEBUGP("nf_conntrack_in: normal packet for %p\n", ct); + pr_debug("nf_conntrack_in: normal packet for %p\n", ct); *ctinfo = IP_CT_ESTABLISHED; } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { - DEBUGP("nf_conntrack_in: related packet for %p\n", ct); + pr_debug("nf_conntrack_in: related packet for %p\n", + ct); *ctinfo = IP_CT_RELATED; } else { - DEBUGP("nf_conntrack_in: new packet for %p\n", ct); + pr_debug("nf_conntrack_in: new packet for %p\n", ct); *ctinfo = IP_CT_NEW; } *set_reply = 0; @@ -1074,320 +661,134 @@ resolve_normal_ct(struct sk_buff *skb, } unsigned int -nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb) +nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff *skb) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; struct nf_conntrack_l3proto *l3proto; - struct nf_conntrack_protocol *proto; + struct nf_conntrack_l4proto *l4proto; unsigned int dataoff; u_int8_t protonum; int set_reply = 0; int ret; /* Previously seen (loopback or untracked)? Ignore. */ - if ((*pskb)->nfct) { - NF_CT_STAT_INC(ignore); + if (skb->nfct) { + NF_CT_STAT_INC_ATOMIC(ignore); return NF_ACCEPT; } + /* rcu_read_lock()ed by nf_hook_slow */ l3proto = __nf_ct_l3proto_find((u_int16_t)pf); - if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) { - DEBUGP("not prepared to track yet or error occured\n"); + ret = l3proto->get_l4proto(skb, skb_network_offset(skb), + &dataoff, &protonum); + if (ret <= 0) { + pr_debug("not prepared to track yet or error occured\n"); + NF_CT_STAT_INC_ATOMIC(error); + NF_CT_STAT_INC_ATOMIC(invalid); return -ret; } - proto = __nf_ct_proto_find((u_int16_t)pf, protonum); + l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum); /* It may be an special packet, error, unclean... * inverse of the return code tells to the netfilter * core what to do with the packet. */ - if (proto->error != NULL && - (ret = proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) { - NF_CT_STAT_INC(error); - NF_CT_STAT_INC(invalid); + if (l4proto->error != NULL && + (ret = l4proto->error(skb, dataoff, &ctinfo, pf, hooknum)) <= 0) { + NF_CT_STAT_INC_ATOMIC(error); + NF_CT_STAT_INC_ATOMIC(invalid); return -ret; } - ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, proto, + ct = resolve_normal_ct(skb, dataoff, pf, protonum, l3proto, l4proto, &set_reply, &ctinfo); if (!ct) { /* Not valid part of a connection */ - NF_CT_STAT_INC(invalid); + NF_CT_STAT_INC_ATOMIC(invalid); return NF_ACCEPT; } if (IS_ERR(ct)) { /* Too stressed to deal. */ - NF_CT_STAT_INC(drop); + NF_CT_STAT_INC_ATOMIC(drop); return NF_DROP; } - NF_CT_ASSERT((*pskb)->nfct); + NF_CT_ASSERT(skb->nfct); - ret = proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum); + ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); if (ret < 0) { /* Invalid: inverse of the return code tells * the netfilter core what to do */ - DEBUGP("nf_conntrack_in: Can't track with proto module\n"); - nf_conntrack_put((*pskb)->nfct); - (*pskb)->nfct = NULL; - NF_CT_STAT_INC(invalid); + pr_debug("nf_conntrack_in: Can't track with proto module\n"); + nf_conntrack_put(skb->nfct); + skb->nfct = NULL; + NF_CT_STAT_INC_ATOMIC(invalid); return -ret; } if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) - nf_conntrack_event_cache(IPCT_STATUS, *pskb); + nf_conntrack_event_cache(IPCT_STATUS, skb); return ret; } +EXPORT_SYMBOL_GPL(nf_conntrack_in); -int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, - const struct nf_conntrack_tuple *orig) -{ - return nf_ct_invert_tuple(inverse, orig, - __nf_ct_l3proto_find(orig->src.l3num), - __nf_ct_proto_find(orig->src.l3num, - orig->dst.protonum)); -} - -/* Would two expected things clash? */ -static inline int expect_clash(const struct nf_conntrack_expect *a, - const struct nf_conntrack_expect *b) -{ - /* Part covered by intersection of masks must be unequal, - otherwise they clash */ - struct nf_conntrack_tuple intersect_mask; - int count; - - intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num; - intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all; - intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all; - intersect_mask.dst.protonum = a->mask.dst.protonum - & b->mask.dst.protonum; - - for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){ - intersect_mask.src.u3.all[count] = - a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; - } - - for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){ - intersect_mask.dst.u3.all[count] = - a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count]; - } - - return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); -} - -static inline int expect_matches(const struct nf_conntrack_expect *a, - const struct nf_conntrack_expect *b) -{ - return a->master == b->master - && nf_ct_tuple_equal(&a->tuple, &b->tuple) - && nf_ct_tuple_equal(&a->mask, &b->mask); -} - -/* Generally a bad idea to call this: could have matched already. */ -void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp) -{ - struct nf_conntrack_expect *i; - - write_lock_bh(&nf_conntrack_lock); - /* choose the the oldest expectation to evict */ - list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { - if (expect_matches(i, exp) && del_timer(&i->timeout)) { - nf_ct_unlink_expect(i); - write_unlock_bh(&nf_conntrack_lock); - nf_conntrack_expect_put(i); - return; - } - } - write_unlock_bh(&nf_conntrack_lock); -} - -/* We don't increase the master conntrack refcount for non-fulfilled - * conntracks. During the conntrack destruction, the expectations are - * always killed before the conntrack itself */ -struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me) -{ - struct nf_conntrack_expect *new; - - new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC); - if (!new) { - DEBUGP("expect_related: OOM allocating expect\n"); - return NULL; - } - new->master = me; - atomic_set(&new->use, 1); - return new; -} - -void nf_conntrack_expect_put(struct nf_conntrack_expect *exp) -{ - if (atomic_dec_and_test(&exp->use)) - kmem_cache_free(nf_conntrack_expect_cachep, exp); -} - -static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp) -{ - struct nf_conn_help *master_help = nfct_help(exp->master); - - atomic_inc(&exp->use); - master_help->expecting++; - list_add(&exp->list, &nf_conntrack_expect_list); - - init_timer(&exp->timeout); - exp->timeout.data = (unsigned long)exp; - exp->timeout.function = expectation_timed_out; - exp->timeout.expires = jiffies + master_help->helper->timeout * HZ; - add_timer(&exp->timeout); - - exp->id = ++nf_conntrack_expect_next_id; - atomic_inc(&exp->use); - NF_CT_STAT_INC(expect_create); -} - -/* Race with expectations being used means we could have none to find; OK. */ -static void evict_oldest_expect(struct nf_conn *master) -{ - struct nf_conntrack_expect *i; - - list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { - if (i->master == master) { - if (del_timer(&i->timeout)) { - nf_ct_unlink_expect(i); - nf_conntrack_expect_put(i); - } - break; - } - } -} - -static inline int refresh_timer(struct nf_conntrack_expect *i) -{ - struct nf_conn_help *master_help = nfct_help(i->master); - - if (!del_timer(&i->timeout)) - return 0; - - i->timeout.expires = jiffies + master_help->helper->timeout*HZ; - add_timer(&i->timeout); - return 1; -} - -int nf_conntrack_expect_related(struct nf_conntrack_expect *expect) +bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, + const struct nf_conntrack_tuple *orig) { - struct nf_conntrack_expect *i; - struct nf_conn *master = expect->master; - struct nf_conn_help *master_help = nfct_help(master); - int ret; + bool ret; - NF_CT_ASSERT(master_help); - - DEBUGP("nf_conntrack_expect_related %p\n", related_to); - DEBUGP("tuple: "); NF_CT_DUMP_TUPLE(&expect->tuple); - DEBUGP("mask: "); NF_CT_DUMP_TUPLE(&expect->mask); - - write_lock_bh(&nf_conntrack_lock); - list_for_each_entry(i, &nf_conntrack_expect_list, list) { - if (expect_matches(i, expect)) { - /* Refresh timer: if it's dying, ignore.. */ - if (refresh_timer(i)) { - ret = 0; - goto out; - } - } else if (expect_clash(i, expect)) { - ret = -EBUSY; - goto out; - } - } - /* Will be over limit? */ - if (master_help->helper->max_expected && - master_help->expecting >= master_help->helper->max_expected) - evict_oldest_expect(master); - - nf_conntrack_expect_insert(expect); - nf_conntrack_expect_event(IPEXP_NEW, expect); - ret = 0; -out: - write_unlock_bh(&nf_conntrack_lock); + rcu_read_lock(); + ret = nf_ct_invert_tuple(inverse, orig, + __nf_ct_l3proto_find(orig->src.l3num), + __nf_ct_l4proto_find(orig->src.l3num, + orig->dst.protonum)); + rcu_read_unlock(); return ret; } +EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); -int nf_conntrack_helper_register(struct nf_conntrack_helper *me) +/* Alter reply tuple (maybe alter helper). This is for NAT, and is + implicitly racy: see __nf_conntrack_confirm */ +void nf_conntrack_alter_reply(struct nf_conn *ct, + const struct nf_conntrack_tuple *newreply) { - int ret; - BUG_ON(me->timeout == 0); - - ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help", - sizeof(struct nf_conn) - + sizeof(struct nf_conn_help) - + __alignof__(struct nf_conn_help)); - if (ret < 0) { - printk(KERN_ERR "nf_conntrack_helper_reigster: Unable to create slab cache for conntracks\n"); - return ret; - } - write_lock_bh(&nf_conntrack_lock); - list_prepend(&helpers, me); - write_unlock_bh(&nf_conntrack_lock); - - return 0; -} - -struct nf_conntrack_helper * -__nf_conntrack_helper_find_byname(const char *name) -{ - struct nf_conntrack_helper *h; + struct nf_conn_help *help = nfct_help(ct); + struct nf_conntrack_helper *helper; - list_for_each_entry(h, &helpers, list) { - if (!strcmp(h->name, name)) - return h; - } + /* Should be unconfirmed, so not in hash table yet */ + NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); - return NULL; -} + pr_debug("Altering reply tuple of %p to ", ct); + nf_ct_dump_tuple(newreply); -static inline int unhelp(struct nf_conntrack_tuple_hash *i, - const struct nf_conntrack_helper *me) -{ - struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i); - struct nf_conn_help *help = nfct_help(ct); + ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; + if (ct->master || (help && !hlist_empty(&help->expectations))) + return; - if (help && help->helper == me) { - nf_conntrack_event(IPCT_HELPER, ct); - help->helper = NULL; + rcu_read_lock(); + helper = __nf_ct_helper_find(newreply); + if (helper == NULL) { + if (help) + rcu_assign_pointer(help->helper, NULL); + goto out; } - return 0; -} -void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) -{ - unsigned int i; - struct nf_conntrack_expect *exp, *tmp; - - /* Need write lock here, to delete helper. */ - write_lock_bh(&nf_conntrack_lock); - LIST_DELETE(&helpers, me); - - /* Get rid of expectations */ - list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) { - struct nf_conn_help *help = nfct_help(exp->master); - if (help->helper == me && del_timer(&exp->timeout)) { - nf_ct_unlink_expect(exp); - nf_conntrack_expect_put(exp); - } + if (help == NULL) { + help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); + if (help == NULL) + goto out; + } else { + memset(&help->help, 0, sizeof(help->help)); } - /* Get rid of expecteds, set helpers to NULL. */ - LIST_FIND_W(&unconfirmed, unhelp, struct nf_conntrack_tuple_hash*, me); - for (i = 0; i < nf_conntrack_htable_size; i++) - LIST_FIND_W(&nf_conntrack_hash[i], unhelp, - struct nf_conntrack_tuple_hash *, me); - write_unlock_bh(&nf_conntrack_lock); - - /* Someone could be still looking at the helper in a bh. */ - synchronize_net(); + rcu_assign_pointer(help->helper, helper); +out: + rcu_read_unlock(); } +EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ void __nf_ct_refresh_acct(struct nf_conn *ct, @@ -1401,88 +802,113 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); NF_CT_ASSERT(skb); - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); + + /* Only update if this is not a fixed timeout */ + if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) + goto acct; /* If not in hash table, timer will not be active yet */ if (!nf_ct_is_confirmed(ct)) { ct->timeout.expires = extra_jiffies; event = IPCT_REFRESH; } else { - /* Need del_timer for race avoidance (may already be dying). */ - if (del_timer(&ct->timeout)) { - ct->timeout.expires = jiffies + extra_jiffies; + unsigned long newtime = jiffies + extra_jiffies; + + /* Only update the timeout if the new timeout is at least + HZ jiffies from the old timeout. Need del_timer for race + avoidance (may already be dying). */ + if (newtime - ct->timeout.expires >= HZ + && del_timer(&ct->timeout)) { + ct->timeout.expires = newtime; add_timer(&ct->timeout); event = IPCT_REFRESH; } } +acct: #ifdef CONFIG_NF_CT_ACCT if (do_acct) { ct->counters[CTINFO2DIR(ctinfo)].packets++; ct->counters[CTINFO2DIR(ctinfo)].bytes += - skb->len - (unsigned int)(skb->nh.raw - skb->data); - if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) - || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) - event |= IPCT_COUNTER_FILLING; + skb->len - skb_network_offset(skb); + + if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) + || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) + event |= IPCT_COUNTER_FILLING; } #endif - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); /* must be unlocked when calling event cache */ if (event) nf_conntrack_event_cache(event, skb); } +EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); + +void __nf_ct_kill_acct(struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + const struct sk_buff *skb, + int do_acct) +{ +#ifdef CONFIG_NF_CT_ACCT + if (do_acct) { + spin_lock_bh(&nf_conntrack_lock); + ct->counters[CTINFO2DIR(ctinfo)].packets++; + ct->counters[CTINFO2DIR(ctinfo)].bytes += + skb->len - skb_network_offset(skb); + spin_unlock_bh(&nf_conntrack_lock); + } +#endif + if (del_timer(&ct->timeout)) + ct->timeout.function((unsigned long)ct); +} +EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); -#if defined(CONFIG_NF_CT_NETLINK) || \ - defined(CONFIG_NF_CT_NETLINK_MODULE) +#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) #include #include #include - /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be * in ip_conntrack_core, since we don't want the protocols to autoload * or depend on ctnetlink */ -int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb, +int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t), - &tuple->src.u.tcp.port); - NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t), - &tuple->dst.u.tcp.port); + NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); + NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); return 0; -nfattr_failure: +nla_put_failure: return -1; } +EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); -static const size_t cta_min_proto[CTA_PROTO_MAX] = { - [CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t), - [CTA_PROTO_DST_PORT-1] = sizeof(u_int16_t) +const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { + [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, + [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, }; +EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); -int nf_ct_port_nfattr_to_tuple(struct nfattr *tb[], +int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t) { - if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1]) - return -EINVAL; - - if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) + if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) return -EINVAL; - t->src.u.tcp.port = - *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]); - t->dst.u.tcp.port = - *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]); + t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); + t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); return 0; } +EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); #endif /* Used by ipt_REJECT and ip6t_REJECT. */ -void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) +static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; @@ -1500,46 +926,43 @@ void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) nf_conntrack_get(nskb->nfct); } -static inline int -do_iter(const struct nf_conntrack_tuple_hash *i, - int (*iter)(struct nf_conn *i, void *data), - void *data) -{ - return iter(nf_ct_tuplehash_to_ctrack(i), data); -} - /* Bring out ya dead! */ -static struct nf_conntrack_tuple_hash * +static struct nf_conn * get_next_corpse(int (*iter)(struct nf_conn *i, void *data), void *data, unsigned int *bucket) { - struct nf_conntrack_tuple_hash *h = NULL; + struct nf_conntrack_tuple_hash *h; + struct nf_conn *ct; + struct hlist_node *n; - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { - h = LIST_FIND_W(&nf_conntrack_hash[*bucket], do_iter, - struct nf_conntrack_tuple_hash *, iter, data); - if (h) - break; - } - if (!h) - h = LIST_FIND_W(&unconfirmed, do_iter, - struct nf_conntrack_tuple_hash *, iter, data); - if (h) - atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use); - write_unlock_bh(&nf_conntrack_lock); - - return h; + hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) { + ct = nf_ct_tuplehash_to_ctrack(h); + if (iter(ct, data)) + goto found; + } + } + hlist_for_each_entry(h, n, &unconfirmed, hnode) { + ct = nf_ct_tuplehash_to_ctrack(h); + if (iter(ct, data)) + set_bit(IPS_DYING_BIT, &ct->status); + } + spin_unlock_bh(&nf_conntrack_lock); + return NULL; +found: + atomic_inc(&ct->ct_general.use); + spin_unlock_bh(&nf_conntrack_lock); + return ct; } void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data) { - struct nf_conntrack_tuple_hash *h; + struct nf_conn *ct; unsigned int bucket = 0; - while ((h = get_next_corpse(iter, data, &bucket)) != NULL) { - struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); + while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { /* Time to push up daises... */ if (del_timer(&ct->timeout)) death_by_timeout((unsigned long)ct); @@ -1548,33 +971,34 @@ nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data) nf_ct_put(ct); } } +EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); static int kill_all(struct nf_conn *i, void *data) { return 1; } -static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size) +void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size) { if (vmalloced) vfree(hash); else - free_pages((unsigned long)hash, - get_order(sizeof(struct list_head) * size)); + free_pages((unsigned long)hash, + get_order(sizeof(struct hlist_head) * size)); } +EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); -void nf_conntrack_flush() +void nf_conntrack_flush(void) { nf_ct_iterate_cleanup(kill_all, NULL); } +EXPORT_SYMBOL_GPL(nf_conntrack_flush); /* Mishearing the voices in his head, our hero wonders how he's supposed to kill the mall. */ void nf_conntrack_cleanup(void) { - int i; - - ip_ct_attach = NULL; + rcu_assign_pointer(ip_ct_attach, NULL); /* This makes sure all current packets have passed through netfilter framework. Roll on, two-stage module @@ -1592,65 +1016,59 @@ void nf_conntrack_cleanup(void) while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) schedule(); - for (i = 0; i < NF_CT_F_NUM; i++) { - if (nf_ct_cache[i].use == 0) - continue; + rcu_assign_pointer(nf_ct_destroy, NULL); - NF_CT_ASSERT(nf_ct_cache[i].use == 1); - nf_ct_cache[i].use = 1; - nf_conntrack_unregister_cache(i); - } - kmem_cache_destroy(nf_conntrack_expect_cachep); - free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, - nf_conntrack_htable_size); - - /* free l3proto protocol tables */ - for (i = 0; i < PF_MAX; i++) - if (nf_ct_protos[i]) { - kfree(nf_ct_protos[i]); - nf_ct_protos[i] = NULL; - } + kmem_cache_destroy(nf_conntrack_cachep); + nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc, + nf_conntrack_htable_size); + + nf_conntrack_proto_fini(); + nf_conntrack_helper_fini(); + nf_conntrack_expect_fini(); } -static struct list_head *alloc_hashtable(int size, int *vmalloced) +struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced) { - struct list_head *hash; - unsigned int i; + struct hlist_head *hash; + unsigned int size, i; + + *vmalloced = 0; - *vmalloced = 0; - hash = (void*)__get_free_pages(GFP_KERNEL, - get_order(sizeof(struct list_head) + size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); + hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN, + get_order(sizeof(struct hlist_head) * size)); - if (!hash) { + if (!hash) { *vmalloced = 1; printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); - hash = vmalloc(sizeof(struct list_head) * size); + hash = vmalloc(sizeof(struct hlist_head) * size); } if (hash) - for (i = 0; i < size; i++) - INIT_LIST_HEAD(&hash[i]); + for (i = 0; i < size; i++) + INIT_HLIST_HEAD(&hash[i]); return hash; } +EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); -int set_hashsize(const char *val, struct kernel_param *kp) +int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) { - int i, bucket, hashsize, vmalloced; - int old_vmalloced, old_size; + int i, bucket, vmalloced, old_vmalloced; + unsigned int hashsize, old_size; int rnd; - struct list_head *hash, *old_hash; + struct hlist_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; /* On boot, we can set this without any fancy locking. */ if (!nf_conntrack_htable_size) return param_set_uint(val, kp); - hashsize = simple_strtol(val, NULL, 0); + hashsize = simple_strtoul(val, NULL, 0); if (!hashsize) return -EINVAL; - hash = alloc_hashtable(hashsize, &vmalloced); + hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced); if (!hash) return -ENOMEM; @@ -1658,14 +1076,19 @@ int set_hashsize(const char *val, struct kernel_param *kp) * use a newrandom seed */ get_random_bytes(&rnd, 4); - write_lock_bh(&nf_conntrack_lock); + /* Lookups in the old hash might happen in parallel, which means we + * might get false negatives during connection lookup. New connections + * created because of a false negative won't make it into the hash + * though since that required taking the lock. + */ + spin_lock_bh(&nf_conntrack_lock); for (i = 0; i < nf_conntrack_htable_size; i++) { - while (!list_empty(&nf_conntrack_hash[i])) { - h = list_entry(nf_conntrack_hash[i].next, - struct nf_conntrack_tuple_hash, list); - list_del(&h->list); + while (!hlist_empty(&nf_conntrack_hash[i])) { + h = hlist_entry(nf_conntrack_hash[i].first, + struct nf_conntrack_tuple_hash, hnode); + hlist_del_rcu(&h->hnode); bucket = __hash_conntrack(&h->tuple, hashsize, rnd); - list_add_tail(&h->list, &hash[bucket]); + hlist_add_head(&h->hnode, &hash[bucket]); } } old_size = nf_conntrack_htable_size; @@ -1676,67 +1099,74 @@ int set_hashsize(const char *val, struct kernel_param *kp) nf_conntrack_vmalloc = vmalloced; nf_conntrack_hash = hash; nf_conntrack_hash_rnd = rnd; - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); - free_conntrack_hash(old_hash, old_vmalloced, old_size); + nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); return 0; } +EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); -module_param_call(hashsize, set_hashsize, param_get_uint, +module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, &nf_conntrack_htable_size, 0600); int __init nf_conntrack_init(void) { - unsigned int i; + int max_factor = 8; int ret; /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB - * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ + * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ if (!nf_conntrack_htable_size) { nf_conntrack_htable_size = (((num_physpages << PAGE_SHIFT) / 16384) - / sizeof(struct list_head)); + / sizeof(struct hlist_head)); if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) - nf_conntrack_htable_size = 8192; - if (nf_conntrack_htable_size < 16) - nf_conntrack_htable_size = 16; + nf_conntrack_htable_size = 16384; + if (nf_conntrack_htable_size < 32) + nf_conntrack_htable_size = 32; + + /* Use a max. factor of four by default to get the same max as + * with the old struct list_heads. When a table size is given + * we use the old value of 8 to avoid reducing the max. + * entries. */ + max_factor = 4; } - nf_conntrack_max = 8 * nf_conntrack_htable_size; - - printk("nf_conntrack version %s (%u buckets, %d max)\n", - NF_CONNTRACK_VERSION, nf_conntrack_htable_size, - nf_conntrack_max); - - nf_conntrack_hash = alloc_hashtable(nf_conntrack_htable_size, - &nf_conntrack_vmalloc); + nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, + &nf_conntrack_vmalloc); if (!nf_conntrack_hash) { printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); goto err_out; } - ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic", - sizeof(struct nf_conn)); - if (ret < 0) { + nf_conntrack_max = max_factor * nf_conntrack_htable_size; + + printk("nf_conntrack version %s (%u buckets, %d max)\n", + NF_CONNTRACK_VERSION, nf_conntrack_htable_size, + nf_conntrack_max); + + nf_conntrack_cachep = kmem_cache_create("nf_conntrack", + sizeof(struct nf_conn), + 0, 0, NULL); + if (!nf_conntrack_cachep) { printk(KERN_ERR "Unable to create nf_conn slab cache\n"); goto err_free_hash; } - nf_conntrack_expect_cachep = kmem_cache_create("nf_conntrack_expect", - sizeof(struct nf_conntrack_expect), - 0, 0, NULL, NULL); - if (!nf_conntrack_expect_cachep) { - printk(KERN_ERR "Unable to create nf_expect slab cache\n"); + ret = nf_conntrack_proto_init(); + if (ret < 0) goto err_free_conntrack_slab; - } - /* Don't NEED lock here, but good form anyway. */ - write_lock_bh(&nf_conntrack_lock); - for (i = 0; i < PF_MAX; i++) - nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto; - write_unlock_bh(&nf_conntrack_lock); + ret = nf_conntrack_expect_init(); + if (ret < 0) + goto out_fini_proto; + + ret = nf_conntrack_helper_init(); + if (ret < 0) + goto out_fini_expect; /* For use by REJECT target */ - ip_ct_attach = __nf_conntrack_attach; + rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); + rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); /* Set up fake conntrack: - to never be deleted, not in any hashes */ @@ -1746,11 +1176,15 @@ int __init nf_conntrack_init(void) return ret; +out_fini_expect: + nf_conntrack_expect_fini(); +out_fini_proto: + nf_conntrack_proto_fini(); err_free_conntrack_slab: - nf_conntrack_unregister_cache(NF_CT_F_BASIC); + kmem_cache_destroy(nf_conntrack_cachep); err_free_hash: - free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, - nf_conntrack_htable_size); + nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc, + nf_conntrack_htable_size); err_out: return -ENOMEM; }