netfilter: netns nf_conntrack: pass netns pointer to nf_conntrack_in()
[safe/jmp/linux-2.6] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/proc_fs.h>
19 #include <linux/vmalloc.h>
20 #include <linux/stddef.h>
21 #include <linux/slab.h>
22 #include <linux/random.h>
23 #include <linux/jhash.h>
24 #include <linux/err.h>
25 #include <linux/percpu.h>
26 #include <linux/moduleparam.h>
27 #include <linux/notifier.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/socket.h>
31 #include <linux/mm.h>
32
33 #include <net/netfilter/nf_conntrack.h>
34 #include <net/netfilter/nf_conntrack_l3proto.h>
35 #include <net/netfilter/nf_conntrack_l4proto.h>
36 #include <net/netfilter/nf_conntrack_expect.h>
37 #include <net/netfilter/nf_conntrack_helper.h>
38 #include <net/netfilter/nf_conntrack_core.h>
39 #include <net/netfilter/nf_conntrack_extend.h>
40 #include <net/netfilter/nf_conntrack_acct.h>
41
42 #define NF_CONNTRACK_VERSION    "0.5.0"
43
44 DEFINE_SPINLOCK(nf_conntrack_lock);
45 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
46
47 unsigned int nf_conntrack_htable_size __read_mostly;
48 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
49
50 int nf_conntrack_max __read_mostly;
51 EXPORT_SYMBOL_GPL(nf_conntrack_max);
52
53 struct nf_conn nf_conntrack_untracked __read_mostly;
54 EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
55
56 unsigned int nf_ct_log_invalid __read_mostly;
57 static struct kmem_cache *nf_conntrack_cachep __read_mostly;
58
59 DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
60 EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
61
62 static int nf_conntrack_hash_rnd_initted;
63 static unsigned int nf_conntrack_hash_rnd;
64
65 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
66                                   unsigned int size, unsigned int rnd)
67 {
68         unsigned int n;
69         u_int32_t h;
70
71         /* The direction must be ignored, so we hash everything up to the
72          * destination ports (which is a multiple of 4) and treat the last
73          * three bytes manually.
74          */
75         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
76         h = jhash2((u32 *)tuple, n,
77                    rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
78                           tuple->dst.protonum));
79
80         return ((u64)h * size) >> 32;
81 }
82
83 static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
84 {
85         return __hash_conntrack(tuple, nf_conntrack_htable_size,
86                                 nf_conntrack_hash_rnd);
87 }
88
89 bool
90 nf_ct_get_tuple(const struct sk_buff *skb,
91                 unsigned int nhoff,
92                 unsigned int dataoff,
93                 u_int16_t l3num,
94                 u_int8_t protonum,
95                 struct nf_conntrack_tuple *tuple,
96                 const struct nf_conntrack_l3proto *l3proto,
97                 const struct nf_conntrack_l4proto *l4proto)
98 {
99         memset(tuple, 0, sizeof(*tuple));
100
101         tuple->src.l3num = l3num;
102         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
103                 return false;
104
105         tuple->dst.protonum = protonum;
106         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
107
108         return l4proto->pkt_to_tuple(skb, dataoff, tuple);
109 }
110 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
111
112 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
113                        u_int16_t l3num, struct nf_conntrack_tuple *tuple)
114 {
115         struct nf_conntrack_l3proto *l3proto;
116         struct nf_conntrack_l4proto *l4proto;
117         unsigned int protoff;
118         u_int8_t protonum;
119         int ret;
120
121         rcu_read_lock();
122
123         l3proto = __nf_ct_l3proto_find(l3num);
124         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
125         if (ret != NF_ACCEPT) {
126                 rcu_read_unlock();
127                 return false;
128         }
129
130         l4proto = __nf_ct_l4proto_find(l3num, protonum);
131
132         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
133                               l3proto, l4proto);
134
135         rcu_read_unlock();
136         return ret;
137 }
138 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
139
140 bool
141 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
142                    const struct nf_conntrack_tuple *orig,
143                    const struct nf_conntrack_l3proto *l3proto,
144                    const struct nf_conntrack_l4proto *l4proto)
145 {
146         memset(inverse, 0, sizeof(*inverse));
147
148         inverse->src.l3num = orig->src.l3num;
149         if (l3proto->invert_tuple(inverse, orig) == 0)
150                 return false;
151
152         inverse->dst.dir = !orig->dst.dir;
153
154         inverse->dst.protonum = orig->dst.protonum;
155         return l4proto->invert_tuple(inverse, orig);
156 }
157 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
158
159 static void
160 clean_from_lists(struct nf_conn *ct)
161 {
162         pr_debug("clean_from_lists(%p)\n", ct);
163         hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
164         hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
165
166         /* Destroy all pending expectations */
167         nf_ct_remove_expectations(ct);
168 }
169
170 static void
171 destroy_conntrack(struct nf_conntrack *nfct)
172 {
173         struct nf_conn *ct = (struct nf_conn *)nfct;
174         struct nf_conntrack_l4proto *l4proto;
175
176         pr_debug("destroy_conntrack(%p)\n", ct);
177         NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
178         NF_CT_ASSERT(!timer_pending(&ct->timeout));
179
180         nf_conntrack_event(IPCT_DESTROY, ct);
181         set_bit(IPS_DYING_BIT, &ct->status);
182
183         /* To make sure we don't get any weird locking issues here:
184          * destroy_conntrack() MUST NOT be called with a write lock
185          * to nf_conntrack_lock!!! -HW */
186         rcu_read_lock();
187         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
188         if (l4proto && l4proto->destroy)
189                 l4proto->destroy(ct);
190
191         rcu_read_unlock();
192
193         spin_lock_bh(&nf_conntrack_lock);
194         /* Expectations will have been removed in clean_from_lists,
195          * except TFTP can create an expectation on the first packet,
196          * before connection is in the list, so we need to clean here,
197          * too. */
198         nf_ct_remove_expectations(ct);
199
200         /* We overload first tuple to link into unconfirmed list. */
201         if (!nf_ct_is_confirmed(ct)) {
202                 BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
203                 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
204         }
205
206         NF_CT_STAT_INC(delete);
207         spin_unlock_bh(&nf_conntrack_lock);
208
209         if (ct->master)
210                 nf_ct_put(ct->master);
211
212         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
213         nf_conntrack_free(ct);
214 }
215
216 static void death_by_timeout(unsigned long ul_conntrack)
217 {
218         struct nf_conn *ct = (void *)ul_conntrack;
219         struct nf_conn_help *help = nfct_help(ct);
220         struct nf_conntrack_helper *helper;
221
222         if (help) {
223                 rcu_read_lock();
224                 helper = rcu_dereference(help->helper);
225                 if (helper && helper->destroy)
226                         helper->destroy(ct);
227                 rcu_read_unlock();
228         }
229
230         spin_lock_bh(&nf_conntrack_lock);
231         /* Inside lock so preempt is disabled on module removal path.
232          * Otherwise we can get spurious warnings. */
233         NF_CT_STAT_INC(delete_list);
234         clean_from_lists(ct);
235         spin_unlock_bh(&nf_conntrack_lock);
236         nf_ct_put(ct);
237 }
238
239 struct nf_conntrack_tuple_hash *
240 __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
241 {
242         struct nf_conntrack_tuple_hash *h;
243         struct hlist_node *n;
244         unsigned int hash = hash_conntrack(tuple);
245
246         /* Disable BHs the entire time since we normally need to disable them
247          * at least once for the stats anyway.
248          */
249         local_bh_disable();
250         hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
251                 if (nf_ct_tuple_equal(tuple, &h->tuple)) {
252                         NF_CT_STAT_INC(found);
253                         local_bh_enable();
254                         return h;
255                 }
256                 NF_CT_STAT_INC(searched);
257         }
258         local_bh_enable();
259
260         return NULL;
261 }
262 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
263
264 /* Find a connection corresponding to a tuple. */
265 struct nf_conntrack_tuple_hash *
266 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
267 {
268         struct nf_conntrack_tuple_hash *h;
269         struct nf_conn *ct;
270
271         rcu_read_lock();
272         h = __nf_conntrack_find(net, tuple);
273         if (h) {
274                 ct = nf_ct_tuplehash_to_ctrack(h);
275                 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
276                         h = NULL;
277         }
278         rcu_read_unlock();
279
280         return h;
281 }
282 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
283
284 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
285                                        unsigned int hash,
286                                        unsigned int repl_hash)
287 {
288         struct net *net = nf_ct_net(ct);
289
290         hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
291                            &net->ct.hash[hash]);
292         hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
293                            &net->ct.hash[repl_hash]);
294 }
295
296 void nf_conntrack_hash_insert(struct nf_conn *ct)
297 {
298         unsigned int hash, repl_hash;
299
300         hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
301         repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
302
303         spin_lock_bh(&nf_conntrack_lock);
304         __nf_conntrack_hash_insert(ct, hash, repl_hash);
305         spin_unlock_bh(&nf_conntrack_lock);
306 }
307 EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
308
309 /* Confirm a connection given skb; places it in hash table */
310 int
311 __nf_conntrack_confirm(struct sk_buff *skb)
312 {
313         unsigned int hash, repl_hash;
314         struct nf_conntrack_tuple_hash *h;
315         struct nf_conn *ct;
316         struct nf_conn_help *help;
317         struct hlist_node *n;
318         enum ip_conntrack_info ctinfo;
319         struct net *net;
320
321         ct = nf_ct_get(skb, &ctinfo);
322         net = nf_ct_net(ct);
323
324         /* ipt_REJECT uses nf_conntrack_attach to attach related
325            ICMP/TCP RST packets in other direction.  Actual packet
326            which created connection will be IP_CT_NEW or for an
327            expected connection, IP_CT_RELATED. */
328         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
329                 return NF_ACCEPT;
330
331         hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
332         repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
333
334         /* We're not in hash table, and we refuse to set up related
335            connections for unconfirmed conns.  But packet copies and
336            REJECT will give spurious warnings here. */
337         /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
338
339         /* No external references means noone else could have
340            confirmed us. */
341         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
342         pr_debug("Confirming conntrack %p\n", ct);
343
344         spin_lock_bh(&nf_conntrack_lock);
345
346         /* See if there's one in the list already, including reverse:
347            NAT could have grabbed it without realizing, since we're
348            not in the hash.  If there is, we lost race. */
349         hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode)
350                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
351                                       &h->tuple))
352                         goto out;
353         hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode)
354                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
355                                       &h->tuple))
356                         goto out;
357
358         /* Remove from unconfirmed list */
359         hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
360
361         __nf_conntrack_hash_insert(ct, hash, repl_hash);
362         /* Timer relative to confirmation time, not original
363            setting time, otherwise we'd get timer wrap in
364            weird delay cases. */
365         ct->timeout.expires += jiffies;
366         add_timer(&ct->timeout);
367         atomic_inc(&ct->ct_general.use);
368         set_bit(IPS_CONFIRMED_BIT, &ct->status);
369         NF_CT_STAT_INC(insert);
370         spin_unlock_bh(&nf_conntrack_lock);
371         help = nfct_help(ct);
372         if (help && help->helper)
373                 nf_conntrack_event_cache(IPCT_HELPER, skb);
374 #ifdef CONFIG_NF_NAT_NEEDED
375         if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
376             test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
377                 nf_conntrack_event_cache(IPCT_NATINFO, skb);
378 #endif
379         nf_conntrack_event_cache(master_ct(ct) ?
380                                  IPCT_RELATED : IPCT_NEW, skb);
381         return NF_ACCEPT;
382
383 out:
384         NF_CT_STAT_INC(insert_failed);
385         spin_unlock_bh(&nf_conntrack_lock);
386         return NF_DROP;
387 }
388 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
389
390 /* Returns true if a connection correspondings to the tuple (required
391    for NAT). */
392 int
393 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
394                          const struct nf_conn *ignored_conntrack)
395 {
396         struct net *net = nf_ct_net(ignored_conntrack);
397         struct nf_conntrack_tuple_hash *h;
398         struct hlist_node *n;
399         unsigned int hash = hash_conntrack(tuple);
400
401         /* Disable BHs the entire time since we need to disable them at
402          * least once for the stats anyway.
403          */
404         rcu_read_lock_bh();
405         hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
406                 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
407                     nf_ct_tuple_equal(tuple, &h->tuple)) {
408                         NF_CT_STAT_INC(found);
409                         rcu_read_unlock_bh();
410                         return 1;
411                 }
412                 NF_CT_STAT_INC(searched);
413         }
414         rcu_read_unlock_bh();
415
416         return 0;
417 }
418 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
419
420 #define NF_CT_EVICTION_RANGE    8
421
422 /* There's a small race here where we may free a just-assured
423    connection.  Too bad: we're in trouble anyway. */
424 static noinline int early_drop(struct net *net, unsigned int hash)
425 {
426         /* Use oldest entry, which is roughly LRU */
427         struct nf_conntrack_tuple_hash *h;
428         struct nf_conn *ct = NULL, *tmp;
429         struct hlist_node *n;
430         unsigned int i, cnt = 0;
431         int dropped = 0;
432
433         rcu_read_lock();
434         for (i = 0; i < nf_conntrack_htable_size; i++) {
435                 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash],
436                                          hnode) {
437                         tmp = nf_ct_tuplehash_to_ctrack(h);
438                         if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
439                                 ct = tmp;
440                         cnt++;
441                 }
442
443                 if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
444                         ct = NULL;
445                 if (ct || cnt >= NF_CT_EVICTION_RANGE)
446                         break;
447                 hash = (hash + 1) % nf_conntrack_htable_size;
448         }
449         rcu_read_unlock();
450
451         if (!ct)
452                 return dropped;
453
454         if (del_timer(&ct->timeout)) {
455                 death_by_timeout((unsigned long)ct);
456                 dropped = 1;
457                 NF_CT_STAT_INC_ATOMIC(early_drop);
458         }
459         nf_ct_put(ct);
460         return dropped;
461 }
462
463 struct nf_conn *nf_conntrack_alloc(struct net *net,
464                                    const struct nf_conntrack_tuple *orig,
465                                    const struct nf_conntrack_tuple *repl,
466                                    gfp_t gfp)
467 {
468         struct nf_conn *ct = NULL;
469
470         if (unlikely(!nf_conntrack_hash_rnd_initted)) {
471                 get_random_bytes(&nf_conntrack_hash_rnd, 4);
472                 nf_conntrack_hash_rnd_initted = 1;
473         }
474
475         /* We don't want any race condition at early drop stage */
476         atomic_inc(&net->ct.count);
477
478         if (nf_conntrack_max &&
479             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
480                 unsigned int hash = hash_conntrack(orig);
481                 if (!early_drop(net, hash)) {
482                         atomic_dec(&net->ct.count);
483                         if (net_ratelimit())
484                                 printk(KERN_WARNING
485                                        "nf_conntrack: table full, dropping"
486                                        " packet.\n");
487                         return ERR_PTR(-ENOMEM);
488                 }
489         }
490
491         ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp);
492         if (ct == NULL) {
493                 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
494                 atomic_dec(&net->ct.count);
495                 return ERR_PTR(-ENOMEM);
496         }
497
498         atomic_set(&ct->ct_general.use, 1);
499         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
500         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
501         /* Don't set timer yet: wait for confirmation */
502         setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
503 #ifdef CONFIG_NET_NS
504         ct->ct_net = net;
505 #endif
506         INIT_RCU_HEAD(&ct->rcu);
507
508         return ct;
509 }
510 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
511
512 static void nf_conntrack_free_rcu(struct rcu_head *head)
513 {
514         struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
515         struct net *net = nf_ct_net(ct);
516
517         nf_ct_ext_free(ct);
518         kmem_cache_free(nf_conntrack_cachep, ct);
519         atomic_dec(&net->ct.count);
520 }
521
522 void nf_conntrack_free(struct nf_conn *ct)
523 {
524         nf_ct_ext_destroy(ct);
525         call_rcu(&ct->rcu, nf_conntrack_free_rcu);
526 }
527 EXPORT_SYMBOL_GPL(nf_conntrack_free);
528
529 /* Allocate a new conntrack: we return -ENOMEM if classification
530    failed due to stress.  Otherwise it really is unclassifiable. */
531 static struct nf_conntrack_tuple_hash *
532 init_conntrack(struct net *net,
533                const struct nf_conntrack_tuple *tuple,
534                struct nf_conntrack_l3proto *l3proto,
535                struct nf_conntrack_l4proto *l4proto,
536                struct sk_buff *skb,
537                unsigned int dataoff)
538 {
539         struct nf_conn *ct;
540         struct nf_conn_help *help;
541         struct nf_conntrack_tuple repl_tuple;
542         struct nf_conntrack_expect *exp;
543
544         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
545                 pr_debug("Can't invert tuple.\n");
546                 return NULL;
547         }
548
549         ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
550         if (ct == NULL || IS_ERR(ct)) {
551                 pr_debug("Can't allocate conntrack.\n");
552                 return (struct nf_conntrack_tuple_hash *)ct;
553         }
554
555         if (!l4proto->new(ct, skb, dataoff)) {
556                 nf_conntrack_free(ct);
557                 pr_debug("init conntrack: can't track with proto module\n");
558                 return NULL;
559         }
560
561         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
562
563         spin_lock_bh(&nf_conntrack_lock);
564         exp = nf_ct_find_expectation(net, tuple);
565         if (exp) {
566                 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
567                          ct, exp);
568                 /* Welcome, Mr. Bond.  We've been expecting you... */
569                 __set_bit(IPS_EXPECTED_BIT, &ct->status);
570                 ct->master = exp->master;
571                 if (exp->helper) {
572                         help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
573                         if (help)
574                                 rcu_assign_pointer(help->helper, exp->helper);
575                 }
576
577 #ifdef CONFIG_NF_CONNTRACK_MARK
578                 ct->mark = exp->master->mark;
579 #endif
580 #ifdef CONFIG_NF_CONNTRACK_SECMARK
581                 ct->secmark = exp->master->secmark;
582 #endif
583                 nf_conntrack_get(&ct->master->ct_general);
584                 NF_CT_STAT_INC(expect_new);
585         } else {
586                 struct nf_conntrack_helper *helper;
587
588                 helper = __nf_ct_helper_find(&repl_tuple);
589                 if (helper) {
590                         help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
591                         if (help)
592                                 rcu_assign_pointer(help->helper, helper);
593                 }
594                 NF_CT_STAT_INC(new);
595         }
596
597         /* Overload tuple linked list to put us in unconfirmed list. */
598         hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
599                        &net->ct.unconfirmed);
600
601         spin_unlock_bh(&nf_conntrack_lock);
602
603         if (exp) {
604                 if (exp->expectfn)
605                         exp->expectfn(ct, exp);
606                 nf_ct_expect_put(exp);
607         }
608
609         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
610 }
611
612 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
613 static inline struct nf_conn *
614 resolve_normal_ct(struct net *net,
615                   struct sk_buff *skb,
616                   unsigned int dataoff,
617                   u_int16_t l3num,
618                   u_int8_t protonum,
619                   struct nf_conntrack_l3proto *l3proto,
620                   struct nf_conntrack_l4proto *l4proto,
621                   int *set_reply,
622                   enum ip_conntrack_info *ctinfo)
623 {
624         struct nf_conntrack_tuple tuple;
625         struct nf_conntrack_tuple_hash *h;
626         struct nf_conn *ct;
627
628         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
629                              dataoff, l3num, protonum, &tuple, l3proto,
630                              l4proto)) {
631                 pr_debug("resolve_normal_ct: Can't get tuple\n");
632                 return NULL;
633         }
634
635         /* look for tuple match */
636         h = nf_conntrack_find_get(net, &tuple);
637         if (!h) {
638                 h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff);
639                 if (!h)
640                         return NULL;
641                 if (IS_ERR(h))
642                         return (void *)h;
643         }
644         ct = nf_ct_tuplehash_to_ctrack(h);
645
646         /* It exists; we have (non-exclusive) reference. */
647         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
648                 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
649                 /* Please set reply bit if this packet OK */
650                 *set_reply = 1;
651         } else {
652                 /* Once we've had two way comms, always ESTABLISHED. */
653                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
654                         pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
655                         *ctinfo = IP_CT_ESTABLISHED;
656                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
657                         pr_debug("nf_conntrack_in: related packet for %p\n",
658                                  ct);
659                         *ctinfo = IP_CT_RELATED;
660                 } else {
661                         pr_debug("nf_conntrack_in: new packet for %p\n", ct);
662                         *ctinfo = IP_CT_NEW;
663                 }
664                 *set_reply = 0;
665         }
666         skb->nfct = &ct->ct_general;
667         skb->nfctinfo = *ctinfo;
668         return ct;
669 }
670
671 unsigned int
672 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
673                 struct sk_buff *skb)
674 {
675         struct nf_conn *ct;
676         enum ip_conntrack_info ctinfo;
677         struct nf_conntrack_l3proto *l3proto;
678         struct nf_conntrack_l4proto *l4proto;
679         unsigned int dataoff;
680         u_int8_t protonum;
681         int set_reply = 0;
682         int ret;
683
684         /* Previously seen (loopback or untracked)?  Ignore. */
685         if (skb->nfct) {
686                 NF_CT_STAT_INC_ATOMIC(ignore);
687                 return NF_ACCEPT;
688         }
689
690         /* rcu_read_lock()ed by nf_hook_slow */
691         l3proto = __nf_ct_l3proto_find(pf);
692         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
693                                    &dataoff, &protonum);
694         if (ret <= 0) {
695                 pr_debug("not prepared to track yet or error occured\n");
696                 NF_CT_STAT_INC_ATOMIC(error);
697                 NF_CT_STAT_INC_ATOMIC(invalid);
698                 return -ret;
699         }
700
701         l4proto = __nf_ct_l4proto_find(pf, protonum);
702
703         /* It may be an special packet, error, unclean...
704          * inverse of the return code tells to the netfilter
705          * core what to do with the packet. */
706         if (l4proto->error != NULL &&
707             (ret = l4proto->error(skb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
708                 NF_CT_STAT_INC_ATOMIC(error);
709                 NF_CT_STAT_INC_ATOMIC(invalid);
710                 return -ret;
711         }
712
713         ct = resolve_normal_ct(net, skb, dataoff, pf, protonum,
714                                l3proto, l4proto, &set_reply, &ctinfo);
715         if (!ct) {
716                 /* Not valid part of a connection */
717                 NF_CT_STAT_INC_ATOMIC(invalid);
718                 return NF_ACCEPT;
719         }
720
721         if (IS_ERR(ct)) {
722                 /* Too stressed to deal. */
723                 NF_CT_STAT_INC_ATOMIC(drop);
724                 return NF_DROP;
725         }
726
727         NF_CT_ASSERT(skb->nfct);
728
729         ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
730         if (ret < 0) {
731                 /* Invalid: inverse of the return code tells
732                  * the netfilter core what to do */
733                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
734                 nf_conntrack_put(skb->nfct);
735                 skb->nfct = NULL;
736                 NF_CT_STAT_INC_ATOMIC(invalid);
737                 return -ret;
738         }
739
740         if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
741                 nf_conntrack_event_cache(IPCT_STATUS, skb);
742
743         return ret;
744 }
745 EXPORT_SYMBOL_GPL(nf_conntrack_in);
746
747 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
748                           const struct nf_conntrack_tuple *orig)
749 {
750         bool ret;
751
752         rcu_read_lock();
753         ret = nf_ct_invert_tuple(inverse, orig,
754                                  __nf_ct_l3proto_find(orig->src.l3num),
755                                  __nf_ct_l4proto_find(orig->src.l3num,
756                                                       orig->dst.protonum));
757         rcu_read_unlock();
758         return ret;
759 }
760 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
761
762 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
763    implicitly racy: see __nf_conntrack_confirm */
764 void nf_conntrack_alter_reply(struct nf_conn *ct,
765                               const struct nf_conntrack_tuple *newreply)
766 {
767         struct nf_conn_help *help = nfct_help(ct);
768         struct nf_conntrack_helper *helper;
769
770         /* Should be unconfirmed, so not in hash table yet */
771         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
772
773         pr_debug("Altering reply tuple of %p to ", ct);
774         nf_ct_dump_tuple(newreply);
775
776         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
777         if (ct->master || (help && !hlist_empty(&help->expectations)))
778                 return;
779
780         rcu_read_lock();
781         helper = __nf_ct_helper_find(newreply);
782         if (helper == NULL) {
783                 if (help)
784                         rcu_assign_pointer(help->helper, NULL);
785                 goto out;
786         }
787
788         if (help == NULL) {
789                 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
790                 if (help == NULL)
791                         goto out;
792         } else {
793                 memset(&help->help, 0, sizeof(help->help));
794         }
795
796         rcu_assign_pointer(help->helper, helper);
797 out:
798         rcu_read_unlock();
799 }
800 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
801
802 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
803 void __nf_ct_refresh_acct(struct nf_conn *ct,
804                           enum ip_conntrack_info ctinfo,
805                           const struct sk_buff *skb,
806                           unsigned long extra_jiffies,
807                           int do_acct)
808 {
809         int event = 0;
810
811         NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
812         NF_CT_ASSERT(skb);
813
814         spin_lock_bh(&nf_conntrack_lock);
815
816         /* Only update if this is not a fixed timeout */
817         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
818                 goto acct;
819
820         /* If not in hash table, timer will not be active yet */
821         if (!nf_ct_is_confirmed(ct)) {
822                 ct->timeout.expires = extra_jiffies;
823                 event = IPCT_REFRESH;
824         } else {
825                 unsigned long newtime = jiffies + extra_jiffies;
826
827                 /* Only update the timeout if the new timeout is at least
828                    HZ jiffies from the old timeout. Need del_timer for race
829                    avoidance (may already be dying). */
830                 if (newtime - ct->timeout.expires >= HZ
831                     && del_timer(&ct->timeout)) {
832                         ct->timeout.expires = newtime;
833                         add_timer(&ct->timeout);
834                         event = IPCT_REFRESH;
835                 }
836         }
837
838 acct:
839         if (do_acct) {
840                 struct nf_conn_counter *acct;
841
842                 acct = nf_conn_acct_find(ct);
843                 if (acct) {
844                         acct[CTINFO2DIR(ctinfo)].packets++;
845                         acct[CTINFO2DIR(ctinfo)].bytes +=
846                                 skb->len - skb_network_offset(skb);
847                 }
848         }
849
850         spin_unlock_bh(&nf_conntrack_lock);
851
852         /* must be unlocked when calling event cache */
853         if (event)
854                 nf_conntrack_event_cache(event, skb);
855 }
856 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
857
858 bool __nf_ct_kill_acct(struct nf_conn *ct,
859                        enum ip_conntrack_info ctinfo,
860                        const struct sk_buff *skb,
861                        int do_acct)
862 {
863         if (do_acct) {
864                 struct nf_conn_counter *acct;
865
866                 spin_lock_bh(&nf_conntrack_lock);
867                 acct = nf_conn_acct_find(ct);
868                 if (acct) {
869                         acct[CTINFO2DIR(ctinfo)].packets++;
870                         acct[CTINFO2DIR(ctinfo)].bytes +=
871                                 skb->len - skb_network_offset(skb);
872                 }
873                 spin_unlock_bh(&nf_conntrack_lock);
874         }
875
876         if (del_timer(&ct->timeout)) {
877                 ct->timeout.function((unsigned long)ct);
878                 return true;
879         }
880         return false;
881 }
882 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
883
884 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
885
886 #include <linux/netfilter/nfnetlink.h>
887 #include <linux/netfilter/nfnetlink_conntrack.h>
888 #include <linux/mutex.h>
889
890 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
891  * in ip_conntrack_core, since we don't want the protocols to autoload
892  * or depend on ctnetlink */
893 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
894                                const struct nf_conntrack_tuple *tuple)
895 {
896         NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
897         NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
898         return 0;
899
900 nla_put_failure:
901         return -1;
902 }
903 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
904
905 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
906         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
907         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
908 };
909 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
910
911 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
912                                struct nf_conntrack_tuple *t)
913 {
914         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
915                 return -EINVAL;
916
917         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
918         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
919
920         return 0;
921 }
922 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
923 #endif
924
925 /* Used by ipt_REJECT and ip6t_REJECT. */
926 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
927 {
928         struct nf_conn *ct;
929         enum ip_conntrack_info ctinfo;
930
931         /* This ICMP is in reverse direction to the packet which caused it */
932         ct = nf_ct_get(skb, &ctinfo);
933         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
934                 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
935         else
936                 ctinfo = IP_CT_RELATED;
937
938         /* Attach to new skbuff, and increment count */
939         nskb->nfct = &ct->ct_general;
940         nskb->nfctinfo = ctinfo;
941         nf_conntrack_get(nskb->nfct);
942 }
943
944 /* Bring out ya dead! */
945 static struct nf_conn *
946 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
947                 void *data, unsigned int *bucket)
948 {
949         struct nf_conntrack_tuple_hash *h;
950         struct nf_conn *ct;
951         struct hlist_node *n;
952
953         spin_lock_bh(&nf_conntrack_lock);
954         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
955                 hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) {
956                         ct = nf_ct_tuplehash_to_ctrack(h);
957                         if (iter(ct, data))
958                                 goto found;
959                 }
960         }
961         hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) {
962                 ct = nf_ct_tuplehash_to_ctrack(h);
963                 if (iter(ct, data))
964                         set_bit(IPS_DYING_BIT, &ct->status);
965         }
966         spin_unlock_bh(&nf_conntrack_lock);
967         return NULL;
968 found:
969         atomic_inc(&ct->ct_general.use);
970         spin_unlock_bh(&nf_conntrack_lock);
971         return ct;
972 }
973
974 void nf_ct_iterate_cleanup(struct net *net,
975                            int (*iter)(struct nf_conn *i, void *data),
976                            void *data)
977 {
978         struct nf_conn *ct;
979         unsigned int bucket = 0;
980
981         while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
982                 /* Time to push up daises... */
983                 if (del_timer(&ct->timeout))
984                         death_by_timeout((unsigned long)ct);
985                 /* ... else the timer will get him soon. */
986
987                 nf_ct_put(ct);
988         }
989 }
990 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
991
992 static int kill_all(struct nf_conn *i, void *data)
993 {
994         return 1;
995 }
996
997 void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size)
998 {
999         if (vmalloced)
1000                 vfree(hash);
1001         else
1002                 free_pages((unsigned long)hash,
1003                            get_order(sizeof(struct hlist_head) * size));
1004 }
1005 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1006
1007 void nf_conntrack_flush(struct net *net)
1008 {
1009         nf_ct_iterate_cleanup(net, kill_all, NULL);
1010 }
1011 EXPORT_SYMBOL_GPL(nf_conntrack_flush);
1012
1013 /* Mishearing the voices in his head, our hero wonders how he's
1014    supposed to kill the mall. */
1015 void nf_conntrack_cleanup(struct net *net)
1016 {
1017         rcu_assign_pointer(ip_ct_attach, NULL);
1018
1019         /* This makes sure all current packets have passed through
1020            netfilter framework.  Roll on, two-stage module
1021            delete... */
1022         synchronize_net();
1023
1024         nf_ct_event_cache_flush();
1025  i_see_dead_people:
1026         nf_conntrack_flush(net);
1027         if (atomic_read(&net->ct.count) != 0) {
1028                 schedule();
1029                 goto i_see_dead_people;
1030         }
1031         /* wait until all references to nf_conntrack_untracked are dropped */
1032         while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1033                 schedule();
1034
1035         rcu_assign_pointer(nf_ct_destroy, NULL);
1036
1037         kmem_cache_destroy(nf_conntrack_cachep);
1038         nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1039                              nf_conntrack_htable_size);
1040
1041         nf_conntrack_acct_fini();
1042         nf_conntrack_expect_fini(net);
1043         nf_conntrack_helper_fini();
1044         nf_conntrack_proto_fini();
1045 }
1046
1047 struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced)
1048 {
1049         struct hlist_head *hash;
1050         unsigned int size, i;
1051
1052         *vmalloced = 0;
1053
1054         size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
1055         hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
1056                                        get_order(sizeof(struct hlist_head)
1057                                                  * size));
1058         if (!hash) {
1059                 *vmalloced = 1;
1060                 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1061                 hash = vmalloc(sizeof(struct hlist_head) * size);
1062         }
1063
1064         if (hash)
1065                 for (i = 0; i < size; i++)
1066                         INIT_HLIST_HEAD(&hash[i]);
1067
1068         return hash;
1069 }
1070 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1071
1072 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1073 {
1074         int i, bucket, vmalloced, old_vmalloced;
1075         unsigned int hashsize, old_size;
1076         int rnd;
1077         struct hlist_head *hash, *old_hash;
1078         struct nf_conntrack_tuple_hash *h;
1079
1080         /* On boot, we can set this without any fancy locking. */
1081         if (!nf_conntrack_htable_size)
1082                 return param_set_uint(val, kp);
1083
1084         hashsize = simple_strtoul(val, NULL, 0);
1085         if (!hashsize)
1086                 return -EINVAL;
1087
1088         hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);
1089         if (!hash)
1090                 return -ENOMEM;
1091
1092         /* We have to rehahs for the new table anyway, so we also can
1093          * use a newrandom seed */
1094         get_random_bytes(&rnd, 4);
1095
1096         /* Lookups in the old hash might happen in parallel, which means we
1097          * might get false negatives during connection lookup. New connections
1098          * created because of a false negative won't make it into the hash
1099          * though since that required taking the lock.
1100          */
1101         spin_lock_bh(&nf_conntrack_lock);
1102         for (i = 0; i < nf_conntrack_htable_size; i++) {
1103                 while (!hlist_empty(&init_net.ct.hash[i])) {
1104                         h = hlist_entry(init_net.ct.hash[i].first,
1105                                         struct nf_conntrack_tuple_hash, hnode);
1106                         hlist_del_rcu(&h->hnode);
1107                         bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1108                         hlist_add_head(&h->hnode, &hash[bucket]);
1109                 }
1110         }
1111         old_size = nf_conntrack_htable_size;
1112         old_vmalloced = init_net.ct.hash_vmalloc;
1113         old_hash = init_net.ct.hash;
1114
1115         nf_conntrack_htable_size = hashsize;
1116         init_net.ct.hash_vmalloc = vmalloced;
1117         init_net.ct.hash = hash;
1118         nf_conntrack_hash_rnd = rnd;
1119         spin_unlock_bh(&nf_conntrack_lock);
1120
1121         nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1122         return 0;
1123 }
1124 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1125
1126 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1127                   &nf_conntrack_htable_size, 0600);
1128
1129 int nf_conntrack_init(struct net *net)
1130 {
1131         int max_factor = 8;
1132         int ret;
1133
1134         /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
1135          * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1136         if (!nf_conntrack_htable_size) {
1137                 nf_conntrack_htable_size
1138                         = (((num_physpages << PAGE_SHIFT) / 16384)
1139                            / sizeof(struct hlist_head));
1140                 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1141                         nf_conntrack_htable_size = 16384;
1142                 if (nf_conntrack_htable_size < 32)
1143                         nf_conntrack_htable_size = 32;
1144
1145                 /* Use a max. factor of four by default to get the same max as
1146                  * with the old struct list_heads. When a table size is given
1147                  * we use the old value of 8 to avoid reducing the max.
1148                  * entries. */
1149                 max_factor = 4;
1150         }
1151         atomic_set(&net->ct.count, 0);
1152         net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1153                                                   &net->ct.hash_vmalloc);
1154         if (!net->ct.hash) {
1155                 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1156                 goto err_out;
1157         }
1158         INIT_HLIST_HEAD(&net->ct.unconfirmed);
1159
1160         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1161
1162         printk("nf_conntrack version %s (%u buckets, %d max)\n",
1163                NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1164                nf_conntrack_max);
1165
1166         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1167                                                 sizeof(struct nf_conn),
1168                                                 0, 0, NULL);
1169         if (!nf_conntrack_cachep) {
1170                 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1171                 goto err_free_hash;
1172         }
1173
1174         ret = nf_conntrack_proto_init();
1175         if (ret < 0)
1176                 goto err_free_conntrack_slab;
1177
1178         ret = nf_conntrack_expect_init(net);
1179         if (ret < 0)
1180                 goto out_fini_proto;
1181
1182         ret = nf_conntrack_helper_init();
1183         if (ret < 0)
1184                 goto out_fini_expect;
1185
1186         ret = nf_conntrack_acct_init();
1187         if (ret < 0)
1188                 goto out_fini_helper;
1189
1190         /* For use by REJECT target */
1191         rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1192         rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1193
1194         /* Set up fake conntrack:
1195             - to never be deleted, not in any hashes */
1196 #ifdef CONFIG_NET_NS
1197         nf_conntrack_untracked.ct_net = &init_net;
1198 #endif
1199         atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1200         /*  - and look it like as a confirmed connection */
1201         set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1202
1203         return ret;
1204
1205 out_fini_helper:
1206         nf_conntrack_helper_fini();
1207 out_fini_expect:
1208         nf_conntrack_expect_fini(net);
1209 out_fini_proto:
1210         nf_conntrack_proto_fini();
1211 err_free_conntrack_slab:
1212         kmem_cache_destroy(nf_conntrack_cachep);
1213 err_free_hash:
1214         nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1215                              nf_conntrack_htable_size);
1216 err_out:
1217         return -ENOMEM;
1218 }