netfilter: netns nf_conntrack: pass conntrack to nf_conntrack_event_cache() not skb
[safe/jmp/linux-2.6] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/proc_fs.h>
19 #include <linux/vmalloc.h>
20 #include <linux/stddef.h>
21 #include <linux/slab.h>
22 #include <linux/random.h>
23 #include <linux/jhash.h>
24 #include <linux/err.h>
25 #include <linux/percpu.h>
26 #include <linux/moduleparam.h>
27 #include <linux/notifier.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/socket.h>
31 #include <linux/mm.h>
32
33 #include <net/netfilter/nf_conntrack.h>
34 #include <net/netfilter/nf_conntrack_l3proto.h>
35 #include <net/netfilter/nf_conntrack_l4proto.h>
36 #include <net/netfilter/nf_conntrack_expect.h>
37 #include <net/netfilter/nf_conntrack_helper.h>
38 #include <net/netfilter/nf_conntrack_core.h>
39 #include <net/netfilter/nf_conntrack_extend.h>
40 #include <net/netfilter/nf_conntrack_acct.h>
41
42 #define NF_CONNTRACK_VERSION    "0.5.0"
43
44 DEFINE_SPINLOCK(nf_conntrack_lock);
45 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
46
47 unsigned int nf_conntrack_htable_size __read_mostly;
48 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
49
50 int nf_conntrack_max __read_mostly;
51 EXPORT_SYMBOL_GPL(nf_conntrack_max);
52
53 struct nf_conn nf_conntrack_untracked __read_mostly;
54 EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
55
56 unsigned int nf_ct_log_invalid __read_mostly;
57 static struct kmem_cache *nf_conntrack_cachep __read_mostly;
58
59 DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
60 EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
61
62 static int nf_conntrack_hash_rnd_initted;
63 static unsigned int nf_conntrack_hash_rnd;
64
65 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
66                                   unsigned int size, unsigned int rnd)
67 {
68         unsigned int n;
69         u_int32_t h;
70
71         /* The direction must be ignored, so we hash everything up to the
72          * destination ports (which is a multiple of 4) and treat the last
73          * three bytes manually.
74          */
75         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
76         h = jhash2((u32 *)tuple, n,
77                    rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
78                           tuple->dst.protonum));
79
80         return ((u64)h * size) >> 32;
81 }
82
83 static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
84 {
85         return __hash_conntrack(tuple, nf_conntrack_htable_size,
86                                 nf_conntrack_hash_rnd);
87 }
88
89 bool
90 nf_ct_get_tuple(const struct sk_buff *skb,
91                 unsigned int nhoff,
92                 unsigned int dataoff,
93                 u_int16_t l3num,
94                 u_int8_t protonum,
95                 struct nf_conntrack_tuple *tuple,
96                 const struct nf_conntrack_l3proto *l3proto,
97                 const struct nf_conntrack_l4proto *l4proto)
98 {
99         memset(tuple, 0, sizeof(*tuple));
100
101         tuple->src.l3num = l3num;
102         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
103                 return false;
104
105         tuple->dst.protonum = protonum;
106         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
107
108         return l4proto->pkt_to_tuple(skb, dataoff, tuple);
109 }
110 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
111
112 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
113                        u_int16_t l3num, struct nf_conntrack_tuple *tuple)
114 {
115         struct nf_conntrack_l3proto *l3proto;
116         struct nf_conntrack_l4proto *l4proto;
117         unsigned int protoff;
118         u_int8_t protonum;
119         int ret;
120
121         rcu_read_lock();
122
123         l3proto = __nf_ct_l3proto_find(l3num);
124         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
125         if (ret != NF_ACCEPT) {
126                 rcu_read_unlock();
127                 return false;
128         }
129
130         l4proto = __nf_ct_l4proto_find(l3num, protonum);
131
132         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
133                               l3proto, l4proto);
134
135         rcu_read_unlock();
136         return ret;
137 }
138 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
139
140 bool
141 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
142                    const struct nf_conntrack_tuple *orig,
143                    const struct nf_conntrack_l3proto *l3proto,
144                    const struct nf_conntrack_l4proto *l4proto)
145 {
146         memset(inverse, 0, sizeof(*inverse));
147
148         inverse->src.l3num = orig->src.l3num;
149         if (l3proto->invert_tuple(inverse, orig) == 0)
150                 return false;
151
152         inverse->dst.dir = !orig->dst.dir;
153
154         inverse->dst.protonum = orig->dst.protonum;
155         return l4proto->invert_tuple(inverse, orig);
156 }
157 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
158
159 static void
160 clean_from_lists(struct nf_conn *ct)
161 {
162         pr_debug("clean_from_lists(%p)\n", ct);
163         hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
164         hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
165
166         /* Destroy all pending expectations */
167         nf_ct_remove_expectations(ct);
168 }
169
170 static void
171 destroy_conntrack(struct nf_conntrack *nfct)
172 {
173         struct nf_conn *ct = (struct nf_conn *)nfct;
174         struct nf_conntrack_l4proto *l4proto;
175
176         pr_debug("destroy_conntrack(%p)\n", ct);
177         NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
178         NF_CT_ASSERT(!timer_pending(&ct->timeout));
179
180         nf_conntrack_event(IPCT_DESTROY, ct);
181         set_bit(IPS_DYING_BIT, &ct->status);
182
183         /* To make sure we don't get any weird locking issues here:
184          * destroy_conntrack() MUST NOT be called with a write lock
185          * to nf_conntrack_lock!!! -HW */
186         rcu_read_lock();
187         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
188         if (l4proto && l4proto->destroy)
189                 l4proto->destroy(ct);
190
191         rcu_read_unlock();
192
193         spin_lock_bh(&nf_conntrack_lock);
194         /* Expectations will have been removed in clean_from_lists,
195          * except TFTP can create an expectation on the first packet,
196          * before connection is in the list, so we need to clean here,
197          * too. */
198         nf_ct_remove_expectations(ct);
199
200         /* We overload first tuple to link into unconfirmed list. */
201         if (!nf_ct_is_confirmed(ct)) {
202                 BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
203                 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
204         }
205
206         NF_CT_STAT_INC(delete);
207         spin_unlock_bh(&nf_conntrack_lock);
208
209         if (ct->master)
210                 nf_ct_put(ct->master);
211
212         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
213         nf_conntrack_free(ct);
214 }
215
216 static void death_by_timeout(unsigned long ul_conntrack)
217 {
218         struct nf_conn *ct = (void *)ul_conntrack;
219         struct nf_conn_help *help = nfct_help(ct);
220         struct nf_conntrack_helper *helper;
221
222         if (help) {
223                 rcu_read_lock();
224                 helper = rcu_dereference(help->helper);
225                 if (helper && helper->destroy)
226                         helper->destroy(ct);
227                 rcu_read_unlock();
228         }
229
230         spin_lock_bh(&nf_conntrack_lock);
231         /* Inside lock so preempt is disabled on module removal path.
232          * Otherwise we can get spurious warnings. */
233         NF_CT_STAT_INC(delete_list);
234         clean_from_lists(ct);
235         spin_unlock_bh(&nf_conntrack_lock);
236         nf_ct_put(ct);
237 }
238
239 struct nf_conntrack_tuple_hash *
240 __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
241 {
242         struct nf_conntrack_tuple_hash *h;
243         struct hlist_node *n;
244         unsigned int hash = hash_conntrack(tuple);
245
246         /* Disable BHs the entire time since we normally need to disable them
247          * at least once for the stats anyway.
248          */
249         local_bh_disable();
250         hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
251                 if (nf_ct_tuple_equal(tuple, &h->tuple)) {
252                         NF_CT_STAT_INC(found);
253                         local_bh_enable();
254                         return h;
255                 }
256                 NF_CT_STAT_INC(searched);
257         }
258         local_bh_enable();
259
260         return NULL;
261 }
262 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
263
264 /* Find a connection corresponding to a tuple. */
265 struct nf_conntrack_tuple_hash *
266 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
267 {
268         struct nf_conntrack_tuple_hash *h;
269         struct nf_conn *ct;
270
271         rcu_read_lock();
272         h = __nf_conntrack_find(net, tuple);
273         if (h) {
274                 ct = nf_ct_tuplehash_to_ctrack(h);
275                 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
276                         h = NULL;
277         }
278         rcu_read_unlock();
279
280         return h;
281 }
282 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
283
284 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
285                                        unsigned int hash,
286                                        unsigned int repl_hash)
287 {
288         struct net *net = nf_ct_net(ct);
289
290         hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
291                            &net->ct.hash[hash]);
292         hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
293                            &net->ct.hash[repl_hash]);
294 }
295
296 void nf_conntrack_hash_insert(struct nf_conn *ct)
297 {
298         unsigned int hash, repl_hash;
299
300         hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
301         repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
302
303         spin_lock_bh(&nf_conntrack_lock);
304         __nf_conntrack_hash_insert(ct, hash, repl_hash);
305         spin_unlock_bh(&nf_conntrack_lock);
306 }
307 EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
308
309 /* Confirm a connection given skb; places it in hash table */
310 int
311 __nf_conntrack_confirm(struct sk_buff *skb)
312 {
313         unsigned int hash, repl_hash;
314         struct nf_conntrack_tuple_hash *h;
315         struct nf_conn *ct;
316         struct nf_conn_help *help;
317         struct hlist_node *n;
318         enum ip_conntrack_info ctinfo;
319         struct net *net;
320
321         ct = nf_ct_get(skb, &ctinfo);
322         net = nf_ct_net(ct);
323
324         /* ipt_REJECT uses nf_conntrack_attach to attach related
325            ICMP/TCP RST packets in other direction.  Actual packet
326            which created connection will be IP_CT_NEW or for an
327            expected connection, IP_CT_RELATED. */
328         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
329                 return NF_ACCEPT;
330
331         hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
332         repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
333
334         /* We're not in hash table, and we refuse to set up related
335            connections for unconfirmed conns.  But packet copies and
336            REJECT will give spurious warnings here. */
337         /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
338
339         /* No external references means noone else could have
340            confirmed us. */
341         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
342         pr_debug("Confirming conntrack %p\n", ct);
343
344         spin_lock_bh(&nf_conntrack_lock);
345
346         /* See if there's one in the list already, including reverse:
347            NAT could have grabbed it without realizing, since we're
348            not in the hash.  If there is, we lost race. */
349         hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode)
350                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
351                                       &h->tuple))
352                         goto out;
353         hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode)
354                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
355                                       &h->tuple))
356                         goto out;
357
358         /* Remove from unconfirmed list */
359         hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
360
361         __nf_conntrack_hash_insert(ct, hash, repl_hash);
362         /* Timer relative to confirmation time, not original
363            setting time, otherwise we'd get timer wrap in
364            weird delay cases. */
365         ct->timeout.expires += jiffies;
366         add_timer(&ct->timeout);
367         atomic_inc(&ct->ct_general.use);
368         set_bit(IPS_CONFIRMED_BIT, &ct->status);
369         NF_CT_STAT_INC(insert);
370         spin_unlock_bh(&nf_conntrack_lock);
371         help = nfct_help(ct);
372         if (help && help->helper)
373                 nf_conntrack_event_cache(IPCT_HELPER, ct);
374 #ifdef CONFIG_NF_NAT_NEEDED
375         if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
376             test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
377                 nf_conntrack_event_cache(IPCT_NATINFO, ct);
378 #endif
379         nf_conntrack_event_cache(master_ct(ct) ?
380                                  IPCT_RELATED : IPCT_NEW, ct);
381         return NF_ACCEPT;
382
383 out:
384         NF_CT_STAT_INC(insert_failed);
385         spin_unlock_bh(&nf_conntrack_lock);
386         return NF_DROP;
387 }
388 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
389
390 /* Returns true if a connection correspondings to the tuple (required
391    for NAT). */
392 int
393 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
394                          const struct nf_conn *ignored_conntrack)
395 {
396         struct net *net = nf_ct_net(ignored_conntrack);
397         struct nf_conntrack_tuple_hash *h;
398         struct hlist_node *n;
399         unsigned int hash = hash_conntrack(tuple);
400
401         /* Disable BHs the entire time since we need to disable them at
402          * least once for the stats anyway.
403          */
404         rcu_read_lock_bh();
405         hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
406                 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
407                     nf_ct_tuple_equal(tuple, &h->tuple)) {
408                         NF_CT_STAT_INC(found);
409                         rcu_read_unlock_bh();
410                         return 1;
411                 }
412                 NF_CT_STAT_INC(searched);
413         }
414         rcu_read_unlock_bh();
415
416         return 0;
417 }
418 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
419
420 #define NF_CT_EVICTION_RANGE    8
421
422 /* There's a small race here where we may free a just-assured
423    connection.  Too bad: we're in trouble anyway. */
424 static noinline int early_drop(struct net *net, unsigned int hash)
425 {
426         /* Use oldest entry, which is roughly LRU */
427         struct nf_conntrack_tuple_hash *h;
428         struct nf_conn *ct = NULL, *tmp;
429         struct hlist_node *n;
430         unsigned int i, cnt = 0;
431         int dropped = 0;
432
433         rcu_read_lock();
434         for (i = 0; i < nf_conntrack_htable_size; i++) {
435                 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash],
436                                          hnode) {
437                         tmp = nf_ct_tuplehash_to_ctrack(h);
438                         if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
439                                 ct = tmp;
440                         cnt++;
441                 }
442
443                 if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
444                         ct = NULL;
445                 if (ct || cnt >= NF_CT_EVICTION_RANGE)
446                         break;
447                 hash = (hash + 1) % nf_conntrack_htable_size;
448         }
449         rcu_read_unlock();
450
451         if (!ct)
452                 return dropped;
453
454         if (del_timer(&ct->timeout)) {
455                 death_by_timeout((unsigned long)ct);
456                 dropped = 1;
457                 NF_CT_STAT_INC_ATOMIC(early_drop);
458         }
459         nf_ct_put(ct);
460         return dropped;
461 }
462
463 struct nf_conn *nf_conntrack_alloc(struct net *net,
464                                    const struct nf_conntrack_tuple *orig,
465                                    const struct nf_conntrack_tuple *repl,
466                                    gfp_t gfp)
467 {
468         struct nf_conn *ct = NULL;
469
470         if (unlikely(!nf_conntrack_hash_rnd_initted)) {
471                 get_random_bytes(&nf_conntrack_hash_rnd, 4);
472                 nf_conntrack_hash_rnd_initted = 1;
473         }
474
475         /* We don't want any race condition at early drop stage */
476         atomic_inc(&net->ct.count);
477
478         if (nf_conntrack_max &&
479             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
480                 unsigned int hash = hash_conntrack(orig);
481                 if (!early_drop(net, hash)) {
482                         atomic_dec(&net->ct.count);
483                         if (net_ratelimit())
484                                 printk(KERN_WARNING
485                                        "nf_conntrack: table full, dropping"
486                                        " packet.\n");
487                         return ERR_PTR(-ENOMEM);
488                 }
489         }
490
491         ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp);
492         if (ct == NULL) {
493                 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
494                 atomic_dec(&net->ct.count);
495                 return ERR_PTR(-ENOMEM);
496         }
497
498         atomic_set(&ct->ct_general.use, 1);
499         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
500         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
501         /* Don't set timer yet: wait for confirmation */
502         setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
503 #ifdef CONFIG_NET_NS
504         ct->ct_net = net;
505 #endif
506         INIT_RCU_HEAD(&ct->rcu);
507
508         return ct;
509 }
510 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
511
512 static void nf_conntrack_free_rcu(struct rcu_head *head)
513 {
514         struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
515         struct net *net = nf_ct_net(ct);
516
517         nf_ct_ext_free(ct);
518         kmem_cache_free(nf_conntrack_cachep, ct);
519         atomic_dec(&net->ct.count);
520 }
521
522 void nf_conntrack_free(struct nf_conn *ct)
523 {
524         nf_ct_ext_destroy(ct);
525         call_rcu(&ct->rcu, nf_conntrack_free_rcu);
526 }
527 EXPORT_SYMBOL_GPL(nf_conntrack_free);
528
529 /* Allocate a new conntrack: we return -ENOMEM if classification
530    failed due to stress.  Otherwise it really is unclassifiable. */
531 static struct nf_conntrack_tuple_hash *
532 init_conntrack(struct net *net,
533                const struct nf_conntrack_tuple *tuple,
534                struct nf_conntrack_l3proto *l3proto,
535                struct nf_conntrack_l4proto *l4proto,
536                struct sk_buff *skb,
537                unsigned int dataoff)
538 {
539         struct nf_conn *ct;
540         struct nf_conn_help *help;
541         struct nf_conntrack_tuple repl_tuple;
542         struct nf_conntrack_expect *exp;
543
544         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
545                 pr_debug("Can't invert tuple.\n");
546                 return NULL;
547         }
548
549         ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
550         if (ct == NULL || IS_ERR(ct)) {
551                 pr_debug("Can't allocate conntrack.\n");
552                 return (struct nf_conntrack_tuple_hash *)ct;
553         }
554
555         if (!l4proto->new(ct, skb, dataoff)) {
556                 nf_conntrack_free(ct);
557                 pr_debug("init conntrack: can't track with proto module\n");
558                 return NULL;
559         }
560
561         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
562
563         spin_lock_bh(&nf_conntrack_lock);
564         exp = nf_ct_find_expectation(net, tuple);
565         if (exp) {
566                 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
567                          ct, exp);
568                 /* Welcome, Mr. Bond.  We've been expecting you... */
569                 __set_bit(IPS_EXPECTED_BIT, &ct->status);
570                 ct->master = exp->master;
571                 if (exp->helper) {
572                         help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
573                         if (help)
574                                 rcu_assign_pointer(help->helper, exp->helper);
575                 }
576
577 #ifdef CONFIG_NF_CONNTRACK_MARK
578                 ct->mark = exp->master->mark;
579 #endif
580 #ifdef CONFIG_NF_CONNTRACK_SECMARK
581                 ct->secmark = exp->master->secmark;
582 #endif
583                 nf_conntrack_get(&ct->master->ct_general);
584                 NF_CT_STAT_INC(expect_new);
585         } else {
586                 struct nf_conntrack_helper *helper;
587
588                 helper = __nf_ct_helper_find(&repl_tuple);
589                 if (helper) {
590                         help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
591                         if (help)
592                                 rcu_assign_pointer(help->helper, helper);
593                 }
594                 NF_CT_STAT_INC(new);
595         }
596
597         /* Overload tuple linked list to put us in unconfirmed list. */
598         hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
599                        &net->ct.unconfirmed);
600
601         spin_unlock_bh(&nf_conntrack_lock);
602
603         if (exp) {
604                 if (exp->expectfn)
605                         exp->expectfn(ct, exp);
606                 nf_ct_expect_put(exp);
607         }
608
609         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
610 }
611
612 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
613 static inline struct nf_conn *
614 resolve_normal_ct(struct net *net,
615                   struct sk_buff *skb,
616                   unsigned int dataoff,
617                   u_int16_t l3num,
618                   u_int8_t protonum,
619                   struct nf_conntrack_l3proto *l3proto,
620                   struct nf_conntrack_l4proto *l4proto,
621                   int *set_reply,
622                   enum ip_conntrack_info *ctinfo)
623 {
624         struct nf_conntrack_tuple tuple;
625         struct nf_conntrack_tuple_hash *h;
626         struct nf_conn *ct;
627
628         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
629                              dataoff, l3num, protonum, &tuple, l3proto,
630                              l4proto)) {
631                 pr_debug("resolve_normal_ct: Can't get tuple\n");
632                 return NULL;
633         }
634
635         /* look for tuple match */
636         h = nf_conntrack_find_get(net, &tuple);
637         if (!h) {
638                 h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff);
639                 if (!h)
640                         return NULL;
641                 if (IS_ERR(h))
642                         return (void *)h;
643         }
644         ct = nf_ct_tuplehash_to_ctrack(h);
645
646         /* It exists; we have (non-exclusive) reference. */
647         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
648                 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
649                 /* Please set reply bit if this packet OK */
650                 *set_reply = 1;
651         } else {
652                 /* Once we've had two way comms, always ESTABLISHED. */
653                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
654                         pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
655                         *ctinfo = IP_CT_ESTABLISHED;
656                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
657                         pr_debug("nf_conntrack_in: related packet for %p\n",
658                                  ct);
659                         *ctinfo = IP_CT_RELATED;
660                 } else {
661                         pr_debug("nf_conntrack_in: new packet for %p\n", ct);
662                         *ctinfo = IP_CT_NEW;
663                 }
664                 *set_reply = 0;
665         }
666         skb->nfct = &ct->ct_general;
667         skb->nfctinfo = *ctinfo;
668         return ct;
669 }
670
671 unsigned int
672 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
673                 struct sk_buff *skb)
674 {
675         struct nf_conn *ct;
676         enum ip_conntrack_info ctinfo;
677         struct nf_conntrack_l3proto *l3proto;
678         struct nf_conntrack_l4proto *l4proto;
679         unsigned int dataoff;
680         u_int8_t protonum;
681         int set_reply = 0;
682         int ret;
683
684         /* Previously seen (loopback or untracked)?  Ignore. */
685         if (skb->nfct) {
686                 NF_CT_STAT_INC_ATOMIC(ignore);
687                 return NF_ACCEPT;
688         }
689
690         /* rcu_read_lock()ed by nf_hook_slow */
691         l3proto = __nf_ct_l3proto_find(pf);
692         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
693                                    &dataoff, &protonum);
694         if (ret <= 0) {
695                 pr_debug("not prepared to track yet or error occured\n");
696                 NF_CT_STAT_INC_ATOMIC(error);
697                 NF_CT_STAT_INC_ATOMIC(invalid);
698                 return -ret;
699         }
700
701         l4proto = __nf_ct_l4proto_find(pf, protonum);
702
703         /* It may be an special packet, error, unclean...
704          * inverse of the return code tells to the netfilter
705          * core what to do with the packet. */
706         if (l4proto->error != NULL) {
707                 ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum);
708                 if (ret <= 0) {
709                         NF_CT_STAT_INC_ATOMIC(error);
710                         NF_CT_STAT_INC_ATOMIC(invalid);
711                         return -ret;
712                 }
713         }
714
715         ct = resolve_normal_ct(net, skb, dataoff, pf, protonum,
716                                l3proto, l4proto, &set_reply, &ctinfo);
717         if (!ct) {
718                 /* Not valid part of a connection */
719                 NF_CT_STAT_INC_ATOMIC(invalid);
720                 return NF_ACCEPT;
721         }
722
723         if (IS_ERR(ct)) {
724                 /* Too stressed to deal. */
725                 NF_CT_STAT_INC_ATOMIC(drop);
726                 return NF_DROP;
727         }
728
729         NF_CT_ASSERT(skb->nfct);
730
731         ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
732         if (ret < 0) {
733                 /* Invalid: inverse of the return code tells
734                  * the netfilter core what to do */
735                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
736                 nf_conntrack_put(skb->nfct);
737                 skb->nfct = NULL;
738                 NF_CT_STAT_INC_ATOMIC(invalid);
739                 return -ret;
740         }
741
742         if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
743                 nf_conntrack_event_cache(IPCT_STATUS, ct);
744
745         return ret;
746 }
747 EXPORT_SYMBOL_GPL(nf_conntrack_in);
748
749 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
750                           const struct nf_conntrack_tuple *orig)
751 {
752         bool ret;
753
754         rcu_read_lock();
755         ret = nf_ct_invert_tuple(inverse, orig,
756                                  __nf_ct_l3proto_find(orig->src.l3num),
757                                  __nf_ct_l4proto_find(orig->src.l3num,
758                                                       orig->dst.protonum));
759         rcu_read_unlock();
760         return ret;
761 }
762 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
763
764 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
765    implicitly racy: see __nf_conntrack_confirm */
766 void nf_conntrack_alter_reply(struct nf_conn *ct,
767                               const struct nf_conntrack_tuple *newreply)
768 {
769         struct nf_conn_help *help = nfct_help(ct);
770         struct nf_conntrack_helper *helper;
771
772         /* Should be unconfirmed, so not in hash table yet */
773         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
774
775         pr_debug("Altering reply tuple of %p to ", ct);
776         nf_ct_dump_tuple(newreply);
777
778         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
779         if (ct->master || (help && !hlist_empty(&help->expectations)))
780                 return;
781
782         rcu_read_lock();
783         helper = __nf_ct_helper_find(newreply);
784         if (helper == NULL) {
785                 if (help)
786                         rcu_assign_pointer(help->helper, NULL);
787                 goto out;
788         }
789
790         if (help == NULL) {
791                 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
792                 if (help == NULL)
793                         goto out;
794         } else {
795                 memset(&help->help, 0, sizeof(help->help));
796         }
797
798         rcu_assign_pointer(help->helper, helper);
799 out:
800         rcu_read_unlock();
801 }
802 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
803
804 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
805 void __nf_ct_refresh_acct(struct nf_conn *ct,
806                           enum ip_conntrack_info ctinfo,
807                           const struct sk_buff *skb,
808                           unsigned long extra_jiffies,
809                           int do_acct)
810 {
811         int event = 0;
812
813         NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
814         NF_CT_ASSERT(skb);
815
816         spin_lock_bh(&nf_conntrack_lock);
817
818         /* Only update if this is not a fixed timeout */
819         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
820                 goto acct;
821
822         /* If not in hash table, timer will not be active yet */
823         if (!nf_ct_is_confirmed(ct)) {
824                 ct->timeout.expires = extra_jiffies;
825                 event = IPCT_REFRESH;
826         } else {
827                 unsigned long newtime = jiffies + extra_jiffies;
828
829                 /* Only update the timeout if the new timeout is at least
830                    HZ jiffies from the old timeout. Need del_timer for race
831                    avoidance (may already be dying). */
832                 if (newtime - ct->timeout.expires >= HZ
833                     && del_timer(&ct->timeout)) {
834                         ct->timeout.expires = newtime;
835                         add_timer(&ct->timeout);
836                         event = IPCT_REFRESH;
837                 }
838         }
839
840 acct:
841         if (do_acct) {
842                 struct nf_conn_counter *acct;
843
844                 acct = nf_conn_acct_find(ct);
845                 if (acct) {
846                         acct[CTINFO2DIR(ctinfo)].packets++;
847                         acct[CTINFO2DIR(ctinfo)].bytes +=
848                                 skb->len - skb_network_offset(skb);
849                 }
850         }
851
852         spin_unlock_bh(&nf_conntrack_lock);
853
854         /* must be unlocked when calling event cache */
855         if (event)
856                 nf_conntrack_event_cache(event, ct);
857 }
858 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
859
860 bool __nf_ct_kill_acct(struct nf_conn *ct,
861                        enum ip_conntrack_info ctinfo,
862                        const struct sk_buff *skb,
863                        int do_acct)
864 {
865         if (do_acct) {
866                 struct nf_conn_counter *acct;
867
868                 spin_lock_bh(&nf_conntrack_lock);
869                 acct = nf_conn_acct_find(ct);
870                 if (acct) {
871                         acct[CTINFO2DIR(ctinfo)].packets++;
872                         acct[CTINFO2DIR(ctinfo)].bytes +=
873                                 skb->len - skb_network_offset(skb);
874                 }
875                 spin_unlock_bh(&nf_conntrack_lock);
876         }
877
878         if (del_timer(&ct->timeout)) {
879                 ct->timeout.function((unsigned long)ct);
880                 return true;
881         }
882         return false;
883 }
884 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
885
886 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
887
888 #include <linux/netfilter/nfnetlink.h>
889 #include <linux/netfilter/nfnetlink_conntrack.h>
890 #include <linux/mutex.h>
891
892 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
893  * in ip_conntrack_core, since we don't want the protocols to autoload
894  * or depend on ctnetlink */
895 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
896                                const struct nf_conntrack_tuple *tuple)
897 {
898         NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
899         NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
900         return 0;
901
902 nla_put_failure:
903         return -1;
904 }
905 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
906
907 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
908         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
909         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
910 };
911 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
912
913 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
914                                struct nf_conntrack_tuple *t)
915 {
916         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
917                 return -EINVAL;
918
919         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
920         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
921
922         return 0;
923 }
924 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
925 #endif
926
927 /* Used by ipt_REJECT and ip6t_REJECT. */
928 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
929 {
930         struct nf_conn *ct;
931         enum ip_conntrack_info ctinfo;
932
933         /* This ICMP is in reverse direction to the packet which caused it */
934         ct = nf_ct_get(skb, &ctinfo);
935         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
936                 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
937         else
938                 ctinfo = IP_CT_RELATED;
939
940         /* Attach to new skbuff, and increment count */
941         nskb->nfct = &ct->ct_general;
942         nskb->nfctinfo = ctinfo;
943         nf_conntrack_get(nskb->nfct);
944 }
945
946 /* Bring out ya dead! */
947 static struct nf_conn *
948 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
949                 void *data, unsigned int *bucket)
950 {
951         struct nf_conntrack_tuple_hash *h;
952         struct nf_conn *ct;
953         struct hlist_node *n;
954
955         spin_lock_bh(&nf_conntrack_lock);
956         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
957                 hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) {
958                         ct = nf_ct_tuplehash_to_ctrack(h);
959                         if (iter(ct, data))
960                                 goto found;
961                 }
962         }
963         hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) {
964                 ct = nf_ct_tuplehash_to_ctrack(h);
965                 if (iter(ct, data))
966                         set_bit(IPS_DYING_BIT, &ct->status);
967         }
968         spin_unlock_bh(&nf_conntrack_lock);
969         return NULL;
970 found:
971         atomic_inc(&ct->ct_general.use);
972         spin_unlock_bh(&nf_conntrack_lock);
973         return ct;
974 }
975
976 void nf_ct_iterate_cleanup(struct net *net,
977                            int (*iter)(struct nf_conn *i, void *data),
978                            void *data)
979 {
980         struct nf_conn *ct;
981         unsigned int bucket = 0;
982
983         while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
984                 /* Time to push up daises... */
985                 if (del_timer(&ct->timeout))
986                         death_by_timeout((unsigned long)ct);
987                 /* ... else the timer will get him soon. */
988
989                 nf_ct_put(ct);
990         }
991 }
992 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
993
994 static int kill_all(struct nf_conn *i, void *data)
995 {
996         return 1;
997 }
998
999 void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size)
1000 {
1001         if (vmalloced)
1002                 vfree(hash);
1003         else
1004                 free_pages((unsigned long)hash,
1005                            get_order(sizeof(struct hlist_head) * size));
1006 }
1007 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1008
1009 void nf_conntrack_flush(struct net *net)
1010 {
1011         nf_ct_iterate_cleanup(net, kill_all, NULL);
1012 }
1013 EXPORT_SYMBOL_GPL(nf_conntrack_flush);
1014
1015 /* Mishearing the voices in his head, our hero wonders how he's
1016    supposed to kill the mall. */
1017 void nf_conntrack_cleanup(struct net *net)
1018 {
1019         rcu_assign_pointer(ip_ct_attach, NULL);
1020
1021         /* This makes sure all current packets have passed through
1022            netfilter framework.  Roll on, two-stage module
1023            delete... */
1024         synchronize_net();
1025
1026         nf_ct_event_cache_flush();
1027  i_see_dead_people:
1028         nf_conntrack_flush(net);
1029         if (atomic_read(&net->ct.count) != 0) {
1030                 schedule();
1031                 goto i_see_dead_people;
1032         }
1033         /* wait until all references to nf_conntrack_untracked are dropped */
1034         while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1035                 schedule();
1036
1037         rcu_assign_pointer(nf_ct_destroy, NULL);
1038
1039         kmem_cache_destroy(nf_conntrack_cachep);
1040         nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1041                              nf_conntrack_htable_size);
1042
1043         nf_conntrack_acct_fini();
1044         nf_conntrack_expect_fini(net);
1045         nf_conntrack_helper_fini();
1046         nf_conntrack_proto_fini();
1047 }
1048
1049 struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced)
1050 {
1051         struct hlist_head *hash;
1052         unsigned int size, i;
1053
1054         *vmalloced = 0;
1055
1056         size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
1057         hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
1058                                        get_order(sizeof(struct hlist_head)
1059                                                  * size));
1060         if (!hash) {
1061                 *vmalloced = 1;
1062                 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1063                 hash = vmalloc(sizeof(struct hlist_head) * size);
1064         }
1065
1066         if (hash)
1067                 for (i = 0; i < size; i++)
1068                         INIT_HLIST_HEAD(&hash[i]);
1069
1070         return hash;
1071 }
1072 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1073
1074 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1075 {
1076         int i, bucket, vmalloced, old_vmalloced;
1077         unsigned int hashsize, old_size;
1078         int rnd;
1079         struct hlist_head *hash, *old_hash;
1080         struct nf_conntrack_tuple_hash *h;
1081
1082         /* On boot, we can set this without any fancy locking. */
1083         if (!nf_conntrack_htable_size)
1084                 return param_set_uint(val, kp);
1085
1086         hashsize = simple_strtoul(val, NULL, 0);
1087         if (!hashsize)
1088                 return -EINVAL;
1089
1090         hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);
1091         if (!hash)
1092                 return -ENOMEM;
1093
1094         /* We have to rehahs for the new table anyway, so we also can
1095          * use a newrandom seed */
1096         get_random_bytes(&rnd, 4);
1097
1098         /* Lookups in the old hash might happen in parallel, which means we
1099          * might get false negatives during connection lookup. New connections
1100          * created because of a false negative won't make it into the hash
1101          * though since that required taking the lock.
1102          */
1103         spin_lock_bh(&nf_conntrack_lock);
1104         for (i = 0; i < nf_conntrack_htable_size; i++) {
1105                 while (!hlist_empty(&init_net.ct.hash[i])) {
1106                         h = hlist_entry(init_net.ct.hash[i].first,
1107                                         struct nf_conntrack_tuple_hash, hnode);
1108                         hlist_del_rcu(&h->hnode);
1109                         bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1110                         hlist_add_head(&h->hnode, &hash[bucket]);
1111                 }
1112         }
1113         old_size = nf_conntrack_htable_size;
1114         old_vmalloced = init_net.ct.hash_vmalloc;
1115         old_hash = init_net.ct.hash;
1116
1117         nf_conntrack_htable_size = hashsize;
1118         init_net.ct.hash_vmalloc = vmalloced;
1119         init_net.ct.hash = hash;
1120         nf_conntrack_hash_rnd = rnd;
1121         spin_unlock_bh(&nf_conntrack_lock);
1122
1123         nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1124         return 0;
1125 }
1126 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1127
1128 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1129                   &nf_conntrack_htable_size, 0600);
1130
1131 int nf_conntrack_init(struct net *net)
1132 {
1133         int max_factor = 8;
1134         int ret;
1135
1136         /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
1137          * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1138         if (!nf_conntrack_htable_size) {
1139                 nf_conntrack_htable_size
1140                         = (((num_physpages << PAGE_SHIFT) / 16384)
1141                            / sizeof(struct hlist_head));
1142                 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1143                         nf_conntrack_htable_size = 16384;
1144                 if (nf_conntrack_htable_size < 32)
1145                         nf_conntrack_htable_size = 32;
1146
1147                 /* Use a max. factor of four by default to get the same max as
1148                  * with the old struct list_heads. When a table size is given
1149                  * we use the old value of 8 to avoid reducing the max.
1150                  * entries. */
1151                 max_factor = 4;
1152         }
1153         atomic_set(&net->ct.count, 0);
1154         net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1155                                                   &net->ct.hash_vmalloc);
1156         if (!net->ct.hash) {
1157                 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1158                 goto err_out;
1159         }
1160         INIT_HLIST_HEAD(&net->ct.unconfirmed);
1161
1162         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1163
1164         printk("nf_conntrack version %s (%u buckets, %d max)\n",
1165                NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1166                nf_conntrack_max);
1167
1168         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1169                                                 sizeof(struct nf_conn),
1170                                                 0, 0, NULL);
1171         if (!nf_conntrack_cachep) {
1172                 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1173                 goto err_free_hash;
1174         }
1175
1176         ret = nf_conntrack_proto_init();
1177         if (ret < 0)
1178                 goto err_free_conntrack_slab;
1179
1180         ret = nf_conntrack_expect_init(net);
1181         if (ret < 0)
1182                 goto out_fini_proto;
1183
1184         ret = nf_conntrack_helper_init();
1185         if (ret < 0)
1186                 goto out_fini_expect;
1187
1188         ret = nf_conntrack_acct_init();
1189         if (ret < 0)
1190                 goto out_fini_helper;
1191
1192         /* For use by REJECT target */
1193         rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1194         rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1195
1196         /* Set up fake conntrack:
1197             - to never be deleted, not in any hashes */
1198 #ifdef CONFIG_NET_NS
1199         nf_conntrack_untracked.ct_net = &init_net;
1200 #endif
1201         atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1202         /*  - and look it like as a confirmed connection */
1203         set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1204
1205         return ret;
1206
1207 out_fini_helper:
1208         nf_conntrack_helper_fini();
1209 out_fini_expect:
1210         nf_conntrack_expect_fini(net);
1211 out_fini_proto:
1212         nf_conntrack_proto_fini();
1213 err_free_conntrack_slab:
1214         kmem_cache_destroy(nf_conntrack_cachep);
1215 err_free_hash:
1216         nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1217                              nf_conntrack_htable_size);
1218 err_out:
1219         return -ENOMEM;
1220 }