netfilter: nf_conntrack: fix a race in __nf_conntrack_confirm against nf_ct_get_next_...
[safe/jmp/linux-2.6] / net / netfilter / nf_conntrack_core.c
index 9de4bd4..eeeb8bc 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/netdevice.h>
 #include <linux/socket.h>
 #include <linux/mm.h>
+#include <linux/nsproxy.h>
 #include <linux/rculist_nulls.h>
 
 #include <net/netfilter/nf_conntrack.h>
@@ -41,6 +42,7 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
 
@@ -67,7 +69,7 @@ static int nf_conntrack_hash_rnd_initted;
 static unsigned int nf_conntrack_hash_rnd;
 
 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
-                                 unsigned int size, unsigned int rnd)
+                                 u16 zone, unsigned int size, unsigned int rnd)
 {
        unsigned int n;
        u_int32_t h;
@@ -78,15 +80,16 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
         */
        n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
        h = jhash2((u32 *)tuple, n,
-                  rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
-                         tuple->dst.protonum));
+                  zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
+                                tuple->dst.protonum));
 
        return ((u64)h * size) >> 32;
 }
 
-static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
+static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
+                                      const struct nf_conntrack_tuple *tuple)
 {
-       return __hash_conntrack(tuple, nf_conntrack_htable_size,
+       return __hash_conntrack(tuple, zone, net->ct.htable_size,
                                nf_conntrack_hash_rnd);
 }
 
@@ -290,11 +293,12 @@ static void death_by_timeout(unsigned long ul_conntrack)
  * - Caller must lock nf_conntrack_lock before calling this function
  */
 struct nf_conntrack_tuple_hash *
-__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
+__nf_conntrack_find(struct net *net, u16 zone,
+                   const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       unsigned int hash = hash_conntrack(tuple);
+       unsigned int hash = hash_conntrack(net, zone, tuple);
 
        /* Disable BHs the entire time since we normally need to disable them
         * at least once for the stats anyway.
@@ -302,7 +306,8 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
        local_bh_disable();
 begin:
        hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
-               if (nf_ct_tuple_equal(tuple, &h->tuple)) {
+               if (nf_ct_tuple_equal(tuple, &h->tuple) &&
+                   nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
                        NF_CT_STAT_INC(net, found);
                        local_bh_enable();
                        return h;
@@ -314,8 +319,10 @@ begin:
         * not the expected one, we must restart lookup.
         * We probably met an item that was moved to another chain.
         */
-       if (get_nulls_value(n) != hash)
+       if (get_nulls_value(n) != hash) {
+               NF_CT_STAT_INC(net, search_restart);
                goto begin;
+       }
        local_bh_enable();
 
        return NULL;
@@ -324,21 +331,23 @@ EXPORT_SYMBOL_GPL(__nf_conntrack_find);
 
 /* Find a connection corresponding to a tuple. */
 struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
+nf_conntrack_find_get(struct net *net, u16 zone,
+                     const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
 
        rcu_read_lock();
 begin:
-       h = __nf_conntrack_find(net, tuple);
+       h = __nf_conntrack_find(net, zone, tuple);
        if (h) {
                ct = nf_ct_tuplehash_to_ctrack(h);
                if (unlikely(nf_ct_is_dying(ct) ||
                             !atomic_inc_not_zero(&ct->ct_general.use)))
                        h = NULL;
                else {
-                       if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
+                       if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
+                                    nf_ct_zone(ct) != zone)) {
                                nf_ct_put(ct);
                                goto begin;
                        }
@@ -364,10 +373,13 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
 
 void nf_conntrack_hash_insert(struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        unsigned int hash, repl_hash;
+       u16 zone;
 
-       hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+       zone = nf_ct_zone(ct);
+       hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+       repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        __nf_conntrack_hash_insert(ct, hash, repl_hash);
 }
@@ -384,6 +396,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        struct hlist_nulls_node *n;
        enum ip_conntrack_info ctinfo;
        struct net *net;
+       u16 zone;
 
        ct = nf_ct_get(skb, &ctinfo);
        net = nf_ct_net(ct);
@@ -395,8 +408,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
                return NF_ACCEPT;
 
-       hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+       zone = nf_ct_zone(ct);
+       hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+       repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        /* We're not in hash table, and we refuse to set up related
           connections for unconfirmed conns.  But packet copies and
@@ -410,16 +424,28 @@ __nf_conntrack_confirm(struct sk_buff *skb)
 
        spin_lock_bh(&nf_conntrack_lock);
 
+       /* We have to check the DYING flag inside the lock to prevent
+          a race against nf_ct_get_next_corpse() possibly called from
+          user context, else we insert an already 'dead' hash, blocking
+          further use of that particular connection -JM */
+
+       if (unlikely(nf_ct_is_dying(ct))) {
+               spin_unlock_bh(&nf_conntrack_lock);
+               return NF_ACCEPT;
+       }
+
        /* See if there's one in the list already, including reverse:
           NAT could have grabbed it without realizing, since we're
           not in the hash.  If there is, we lost race. */
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
-                                     &h->tuple))
+                                     &h->tuple) &&
+                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
-                                     &h->tuple))
+                                     &h->tuple) &&
+                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
 
        /* Remove from unconfirmed list */
@@ -466,15 +492,19 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
        struct net *net = nf_ct_net(ignored_conntrack);
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       unsigned int hash = hash_conntrack(tuple);
+       struct nf_conn *ct;
+       u16 zone = nf_ct_zone(ignored_conntrack);
+       unsigned int hash = hash_conntrack(net, zone, tuple);
 
        /* Disable BHs the entire time since we need to disable them at
         * least once for the stats anyway.
         */
        rcu_read_lock_bh();
        hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
-               if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
-                   nf_ct_tuple_equal(tuple, &h->tuple)) {
+               ct = nf_ct_tuplehash_to_ctrack(h);
+               if (ct != ignored_conntrack &&
+                   nf_ct_tuple_equal(tuple, &h->tuple) &&
+                   nf_ct_zone(ct) == zone) {
                        NF_CT_STAT_INC(net, found);
                        rcu_read_unlock_bh();
                        return 1;
@@ -501,7 +531,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
        int dropped = 0;
 
        rcu_read_lock();
-       for (i = 0; i < nf_conntrack_htable_size; i++) {
+       for (i = 0; i < net->ct.htable_size; i++) {
                hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
                                         hnnode) {
                        tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -521,7 +551,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
                if (cnt >= NF_CT_EVICTION_RANGE)
                        break;
 
-               hash = (hash + 1) % nf_conntrack_htable_size;
+               hash = (hash + 1) % net->ct.htable_size;
        }
        rcu_read_unlock();
 
@@ -537,7 +567,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
        return dropped;
 }
 
-struct nf_conn *nf_conntrack_alloc(struct net *net,
+struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
                                   const struct nf_conntrack_tuple *orig,
                                   const struct nf_conntrack_tuple *repl,
                                   gfp_t gfp)
@@ -555,7 +585,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
 
        if (nf_conntrack_max &&
            unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
-               unsigned int hash = hash_conntrack(orig);
+               unsigned int hash = hash_conntrack(net, zone, orig);
                if (!early_drop(net, hash)) {
                        atomic_dec(&net->ct.count);
                        if (net_ratelimit())
@@ -592,13 +622,28 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
 #ifdef CONFIG_NET_NS
        ct->ct_net = net;
 #endif
-
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       if (zone) {
+               struct nf_conntrack_zone *nf_ct_zone;
+
+               nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
+               if (!nf_ct_zone)
+                       goto out_free;
+               nf_ct_zone->id = zone;
+       }
+#endif
        /*
         * changes to lookup keys must be done before setting refcnt to 1
         */
        smp_wmb();
        atomic_set(&ct->ct_general.use, 1);
        return ct;
+
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+out_free:
+       kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+       return ERR_PTR(-ENOMEM);
+#endif
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
 
@@ -616,7 +661,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free);
 /* Allocate a new conntrack: we return -ENOMEM if classification
    failed due to stress.  Otherwise it really is unclassifiable. */
 static struct nf_conntrack_tuple_hash *
-init_conntrack(struct net *net,
+init_conntrack(struct net *net, struct nf_conn *tmpl,
               const struct nf_conntrack_tuple *tuple,
               struct nf_conntrack_l3proto *l3proto,
               struct nf_conntrack_l4proto *l4proto,
@@ -626,14 +671,16 @@ init_conntrack(struct net *net,
        struct nf_conn *ct;
        struct nf_conn_help *help;
        struct nf_conntrack_tuple repl_tuple;
+       struct nf_conntrack_ecache *ecache;
        struct nf_conntrack_expect *exp;
+       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
 
        if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
                pr_debug("Can't invert tuple.\n");
                return NULL;
        }
 
-       ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
+       ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC);
        if (IS_ERR(ct)) {
                pr_debug("Can't allocate conntrack.\n");
                return (struct nf_conntrack_tuple_hash *)ct;
@@ -646,10 +693,14 @@ init_conntrack(struct net *net,
        }
 
        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
-       nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
+
+       ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
+       nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
+                                ecache ? ecache->expmask : 0,
+                            GFP_ATOMIC);
 
        spin_lock_bh(&nf_conntrack_lock);
-       exp = nf_ct_find_expectation(net, tuple);
+       exp = nf_ct_find_expectation(net, zone, tuple);
        if (exp) {
                pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
                         ct, exp);
@@ -671,7 +722,7 @@ init_conntrack(struct net *net,
                nf_conntrack_get(&ct->master->ct_general);
                NF_CT_STAT_INC(net, expect_new);
        } else {
-               __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
+               __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
                NF_CT_STAT_INC(net, new);
        }
 
@@ -692,7 +743,7 @@ init_conntrack(struct net *net,
 
 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
 static inline struct nf_conn *
-resolve_normal_ct(struct net *net,
+resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
                  struct sk_buff *skb,
                  unsigned int dataoff,
                  u_int16_t l3num,
@@ -705,6 +756,7 @@ resolve_normal_ct(struct net *net,
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
+       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
 
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
                             dataoff, l3num, protonum, &tuple, l3proto,
@@ -714,9 +766,10 @@ resolve_normal_ct(struct net *net,
        }
 
        /* look for tuple match */
-       h = nf_conntrack_find_get(net, &tuple);
+       h = nf_conntrack_find_get(net, zone, &tuple);
        if (!h) {
-               h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff);
+               h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
+                                  skb, dataoff);
                if (!h)
                        return NULL;
                if (IS_ERR(h))
@@ -753,7 +806,7 @@ unsigned int
 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
                struct sk_buff *skb)
 {
-       struct nf_conn *ct;
+       struct nf_conn *ct, *tmpl = NULL;
        enum ip_conntrack_info ctinfo;
        struct nf_conntrack_l3proto *l3proto;
        struct nf_conntrack_l4proto *l4proto;
@@ -762,10 +815,14 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
        int set_reply = 0;
        int ret;
 
-       /* Previously seen (loopback or untracked)?  Ignore. */
        if (skb->nfct) {
-               NF_CT_STAT_INC_ATOMIC(net, ignore);
-               return NF_ACCEPT;
+               /* Previously seen (loopback or untracked)?  Ignore. */
+               tmpl = (struct nf_conn *)skb->nfct;
+               if (!nf_ct_is_template(tmpl)) {
+                       NF_CT_STAT_INC_ATOMIC(net, ignore);
+                       return NF_ACCEPT;
+               }
+               skb->nfct = NULL;
        }
 
        /* rcu_read_lock()ed by nf_hook_slow */
@@ -776,7 +833,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
                pr_debug("not prepared to track yet or error occured\n");
                NF_CT_STAT_INC_ATOMIC(net, error);
                NF_CT_STAT_INC_ATOMIC(net, invalid);
-               return -ret;
+               ret = -ret;
+               goto out;
        }
 
        l4proto = __nf_ct_l4proto_find(pf, protonum);
@@ -785,26 +843,30 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
         * inverse of the return code tells to the netfilter
         * core what to do with the packet. */
        if (l4proto->error != NULL) {
-               ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum);
+               ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
+                                    pf, hooknum);
                if (ret <= 0) {
                        NF_CT_STAT_INC_ATOMIC(net, error);
                        NF_CT_STAT_INC_ATOMIC(net, invalid);
-                       return -ret;
+                       ret = -ret;
+                       goto out;
                }
        }
 
-       ct = resolve_normal_ct(net, skb, dataoff, pf, protonum,
+       ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
                               l3proto, l4proto, &set_reply, &ctinfo);
        if (!ct) {
                /* Not valid part of a connection */
                NF_CT_STAT_INC_ATOMIC(net, invalid);
-               return NF_ACCEPT;
+               ret = NF_ACCEPT;
+               goto out;
        }
 
        if (IS_ERR(ct)) {
                /* Too stressed to deal. */
                NF_CT_STAT_INC_ATOMIC(net, drop);
-               return NF_DROP;
+               ret = NF_DROP;
+               goto out;
        }
 
        NF_CT_ASSERT(skb->nfct);
@@ -819,11 +881,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
                NF_CT_STAT_INC_ATOMIC(net, invalid);
                if (ret == -NF_DROP)
                        NF_CT_STAT_INC_ATOMIC(net, drop);
-               return -ret;
+               ret = -ret;
+               goto out;
        }
 
        if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
-               nf_conntrack_event_cache(IPCT_STATUS, ct);
+               nf_conntrack_event_cache(IPCT_REPLY, ct);
+out:
+       if (tmpl)
+               nf_ct_put(tmpl);
 
        return ret;
 }
@@ -862,7 +928,7 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
                return;
 
        rcu_read_lock();
-       __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
+       __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
        rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
@@ -936,6 +1002,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
 }
 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
+       .len    = sizeof(struct nf_conntrack_zone),
+       .align  = __alignof__(struct nf_conntrack_zone),
+       .id     = NF_CT_EXT_ZONE,
+};
+#endif
+
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
 
 #include <linux/netfilter/nfnetlink.h>
@@ -1012,7 +1086,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
        struct hlist_nulls_node *n;
 
        spin_lock_bh(&nf_conntrack_lock);
-       for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+       for (; *bucket < net->ct.htable_size; (*bucket)++) {
                hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        if (iter(ct, data))
@@ -1117,6 +1191,9 @@ static void nf_conntrack_cleanup_init_net(void)
 
        nf_conntrack_helper_fini();
        nf_conntrack_proto_fini();
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       nf_ct_extend_unregister(&nf_ct_zone_extend);
+#endif
 }
 
 static void nf_conntrack_cleanup_net(struct net *net)
@@ -1130,7 +1207,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
        }
 
        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            nf_conntrack_htable_size);
+                            net->ct.htable_size);
        nf_conntrack_ecache_fini(net);
        nf_conntrack_acct_fini(net);
        nf_conntrack_expect_fini(net);
@@ -1190,9 +1267,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
 {
        int i, bucket, vmalloced, old_vmalloced;
        unsigned int hashsize, old_size;
-       int rnd;
        struct hlist_nulls_head *hash, *old_hash;
        struct nf_conntrack_tuple_hash *h;
+       struct nf_conn *ct;
+
+       if (current->nsproxy->net_ns != &init_net)
+               return -EOPNOTSUPP;
 
        /* On boot, we can set this without any fancy locking. */
        if (!nf_conntrack_htable_size)
@@ -1206,33 +1286,31 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
        if (!hash)
                return -ENOMEM;
 
-       /* We have to rehahs for the new table anyway, so we also can
-        * use a newrandom seed */
-       get_random_bytes(&rnd, sizeof(rnd));
-
        /* Lookups in the old hash might happen in parallel, which means we
         * might get false negatives during connection lookup. New connections
         * created because of a false negative won't make it into the hash
         * though since that required taking the lock.
         */
        spin_lock_bh(&nf_conntrack_lock);
-       for (i = 0; i < nf_conntrack_htable_size; i++) {
+       for (i = 0; i < init_net.ct.htable_size; i++) {
                while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
                        h = hlist_nulls_entry(init_net.ct.hash[i].first,
                                        struct nf_conntrack_tuple_hash, hnnode);
+                       ct = nf_ct_tuplehash_to_ctrack(h);
                        hlist_nulls_del_rcu(&h->hnnode);
-                       bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
+                       bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
+                                                 hashsize,
+                                                 nf_conntrack_hash_rnd);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }
-       old_size = nf_conntrack_htable_size;
+       old_size = init_net.ct.htable_size;
        old_vmalloced = init_net.ct.hash_vmalloc;
        old_hash = init_net.ct.hash;
 
-       nf_conntrack_htable_size = hashsize;
+       init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
        init_net.ct.hash_vmalloc = vmalloced;
        init_net.ct.hash = hash;
-       nf_conntrack_hash_rnd = rnd;
        spin_unlock_bh(&nf_conntrack_lock);
 
        nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
@@ -1267,7 +1345,7 @@ static int nf_conntrack_init_init_net(void)
        }
        nf_conntrack_max = max_factor * nf_conntrack_htable_size;
 
-       printk("nf_conntrack version %s (%u buckets, %d max)\n",
+       printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
               NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
               nf_conntrack_max);
 
@@ -1279,6 +1357,11 @@ static int nf_conntrack_init_init_net(void)
        if (ret < 0)
                goto err_helper;
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       ret = nf_ct_extend_register(&nf_ct_zone_extend);
+       if (ret < 0)
+               goto err_extend;
+#endif
        /* Set up fake conntrack: to never be deleted, not in any hashes */
 #ifdef CONFIG_NET_NS
        nf_conntrack_untracked.ct_net = &init_net;
@@ -1289,6 +1372,10 @@ static int nf_conntrack_init_init_net(void)
 
        return 0;
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+err_extend:
+       nf_conntrack_helper_fini();
+#endif
 err_helper:
        nf_conntrack_proto_fini();
 err_proto:
@@ -1328,7 +1415,9 @@ static int nf_conntrack_init_net(struct net *net)
                ret = -ENOMEM;
                goto err_cache;
        }
-       net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
+
+       net->ct.htable_size = nf_conntrack_htable_size;
+       net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
                                             &net->ct.hash_vmalloc, 1);
        if (!net->ct.hash) {
                ret = -ENOMEM;
@@ -1353,7 +1442,7 @@ err_acct:
        nf_conntrack_expect_fini(net);
 err_expect:
        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            nf_conntrack_htable_size);
+                            net->ct.htable_size);
 err_hash:
        kmem_cache_destroy(net->ct.nf_conntrack_cachep);
 err_cache: