netfilter: nf_conntrack: fix warning and prototype mismatch
[safe/jmp/linux-2.6] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/proc_fs.h>
19 #include <linux/vmalloc.h>
20 #include <linux/stddef.h>
21 #include <linux/slab.h>
22 #include <linux/random.h>
23 #include <linux/jhash.h>
24 #include <linux/err.h>
25 #include <linux/percpu.h>
26 #include <linux/moduleparam.h>
27 #include <linux/notifier.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/socket.h>
31 #include <linux/mm.h>
32
33 #include <net/netfilter/nf_conntrack.h>
34 #include <net/netfilter/nf_conntrack_l3proto.h>
35 #include <net/netfilter/nf_conntrack_l4proto.h>
36 #include <net/netfilter/nf_conntrack_expect.h>
37 #include <net/netfilter/nf_conntrack_helper.h>
38 #include <net/netfilter/nf_conntrack_core.h>
39 #include <net/netfilter/nf_conntrack_extend.h>
40 #include <net/netfilter/nf_conntrack_acct.h>
41 #include <net/netfilter/nf_nat.h>
42 #include <net/netfilter/nf_nat_core.h>
43
44 #define NF_CONNTRACK_VERSION    "0.5.0"
45
46 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
47                                       enum nf_nat_manip_type manip,
48                                       struct nlattr *attr) __read_mostly;
49 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
50
51 DEFINE_SPINLOCK(nf_conntrack_lock);
52 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
53
54 unsigned int nf_conntrack_htable_size __read_mostly;
55 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
56
57 int nf_conntrack_max __read_mostly;
58 EXPORT_SYMBOL_GPL(nf_conntrack_max);
59
60 struct nf_conn nf_conntrack_untracked __read_mostly;
61 EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
62
63 static struct kmem_cache *nf_conntrack_cachep __read_mostly;
64
65 static int nf_conntrack_hash_rnd_initted;
66 static unsigned int nf_conntrack_hash_rnd;
67
68 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
69                                   unsigned int size, unsigned int rnd)
70 {
71         unsigned int n;
72         u_int32_t h;
73
74         /* The direction must be ignored, so we hash everything up to the
75          * destination ports (which is a multiple of 4) and treat the last
76          * three bytes manually.
77          */
78         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
79         h = jhash2((u32 *)tuple, n,
80                    rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
81                           tuple->dst.protonum));
82
83         return ((u64)h * size) >> 32;
84 }
85
86 static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
87 {
88         return __hash_conntrack(tuple, nf_conntrack_htable_size,
89                                 nf_conntrack_hash_rnd);
90 }
91
92 bool
93 nf_ct_get_tuple(const struct sk_buff *skb,
94                 unsigned int nhoff,
95                 unsigned int dataoff,
96                 u_int16_t l3num,
97                 u_int8_t protonum,
98                 struct nf_conntrack_tuple *tuple,
99                 const struct nf_conntrack_l3proto *l3proto,
100                 const struct nf_conntrack_l4proto *l4proto)
101 {
102         memset(tuple, 0, sizeof(*tuple));
103
104         tuple->src.l3num = l3num;
105         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
106                 return false;
107
108         tuple->dst.protonum = protonum;
109         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
110
111         return l4proto->pkt_to_tuple(skb, dataoff, tuple);
112 }
113 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
114
115 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
116                        u_int16_t l3num, struct nf_conntrack_tuple *tuple)
117 {
118         struct nf_conntrack_l3proto *l3proto;
119         struct nf_conntrack_l4proto *l4proto;
120         unsigned int protoff;
121         u_int8_t protonum;
122         int ret;
123
124         rcu_read_lock();
125
126         l3proto = __nf_ct_l3proto_find(l3num);
127         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
128         if (ret != NF_ACCEPT) {
129                 rcu_read_unlock();
130                 return false;
131         }
132
133         l4proto = __nf_ct_l4proto_find(l3num, protonum);
134
135         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
136                               l3proto, l4proto);
137
138         rcu_read_unlock();
139         return ret;
140 }
141 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
142
143 bool
144 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
145                    const struct nf_conntrack_tuple *orig,
146                    const struct nf_conntrack_l3proto *l3proto,
147                    const struct nf_conntrack_l4proto *l4proto)
148 {
149         memset(inverse, 0, sizeof(*inverse));
150
151         inverse->src.l3num = orig->src.l3num;
152         if (l3proto->invert_tuple(inverse, orig) == 0)
153                 return false;
154
155         inverse->dst.dir = !orig->dst.dir;
156
157         inverse->dst.protonum = orig->dst.protonum;
158         return l4proto->invert_tuple(inverse, orig);
159 }
160 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
161
162 static void
163 clean_from_lists(struct nf_conn *ct)
164 {
165         pr_debug("clean_from_lists(%p)\n", ct);
166         hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
167         hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
168
169         /* Destroy all pending expectations */
170         nf_ct_remove_expectations(ct);
171 }
172
173 static void
174 destroy_conntrack(struct nf_conntrack *nfct)
175 {
176         struct nf_conn *ct = (struct nf_conn *)nfct;
177         struct net *net = nf_ct_net(ct);
178         struct nf_conntrack_l4proto *l4proto;
179
180         pr_debug("destroy_conntrack(%p)\n", ct);
181         NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
182         NF_CT_ASSERT(!timer_pending(&ct->timeout));
183
184         if (!test_bit(IPS_DYING_BIT, &ct->status))
185                 nf_conntrack_event(IPCT_DESTROY, ct);
186         set_bit(IPS_DYING_BIT, &ct->status);
187
188         /* To make sure we don't get any weird locking issues here:
189          * destroy_conntrack() MUST NOT be called with a write lock
190          * to nf_conntrack_lock!!! -HW */
191         rcu_read_lock();
192         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
193         if (l4proto && l4proto->destroy)
194                 l4proto->destroy(ct);
195
196         rcu_read_unlock();
197
198         spin_lock_bh(&nf_conntrack_lock);
199         /* Expectations will have been removed in clean_from_lists,
200          * except TFTP can create an expectation on the first packet,
201          * before connection is in the list, so we need to clean here,
202          * too. */
203         nf_ct_remove_expectations(ct);
204
205         /* We overload first tuple to link into unconfirmed list. */
206         if (!nf_ct_is_confirmed(ct)) {
207                 BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
208                 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
209         }
210
211         NF_CT_STAT_INC(net, delete);
212         spin_unlock_bh(&nf_conntrack_lock);
213
214         if (ct->master)
215                 nf_ct_put(ct->master);
216
217         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
218         nf_conntrack_free(ct);
219 }
220
221 static void death_by_timeout(unsigned long ul_conntrack)
222 {
223         struct nf_conn *ct = (void *)ul_conntrack;
224         struct net *net = nf_ct_net(ct);
225         struct nf_conn_help *help = nfct_help(ct);
226         struct nf_conntrack_helper *helper;
227
228         if (help) {
229                 rcu_read_lock();
230                 helper = rcu_dereference(help->helper);
231                 if (helper && helper->destroy)
232                         helper->destroy(ct);
233                 rcu_read_unlock();
234         }
235
236         spin_lock_bh(&nf_conntrack_lock);
237         /* Inside lock so preempt is disabled on module removal path.
238          * Otherwise we can get spurious warnings. */
239         NF_CT_STAT_INC(net, delete_list);
240         clean_from_lists(ct);
241         spin_unlock_bh(&nf_conntrack_lock);
242         nf_ct_put(ct);
243 }
244
245 struct nf_conntrack_tuple_hash *
246 __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
247 {
248         struct nf_conntrack_tuple_hash *h;
249         struct hlist_node *n;
250         unsigned int hash = hash_conntrack(tuple);
251
252         /* Disable BHs the entire time since we normally need to disable them
253          * at least once for the stats anyway.
254          */
255         local_bh_disable();
256         hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
257                 if (nf_ct_tuple_equal(tuple, &h->tuple)) {
258                         NF_CT_STAT_INC(net, found);
259                         local_bh_enable();
260                         return h;
261                 }
262                 NF_CT_STAT_INC(net, searched);
263         }
264         local_bh_enable();
265
266         return NULL;
267 }
268 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
269
270 /* Find a connection corresponding to a tuple. */
271 struct nf_conntrack_tuple_hash *
272 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
273 {
274         struct nf_conntrack_tuple_hash *h;
275         struct nf_conn *ct;
276
277         rcu_read_lock();
278         h = __nf_conntrack_find(net, tuple);
279         if (h) {
280                 ct = nf_ct_tuplehash_to_ctrack(h);
281                 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
282                         h = NULL;
283         }
284         rcu_read_unlock();
285
286         return h;
287 }
288 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
289
290 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
291                                        unsigned int hash,
292                                        unsigned int repl_hash)
293 {
294         struct net *net = nf_ct_net(ct);
295
296         hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
297                            &net->ct.hash[hash]);
298         hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
299                            &net->ct.hash[repl_hash]);
300 }
301
302 void nf_conntrack_hash_insert(struct nf_conn *ct)
303 {
304         unsigned int hash, repl_hash;
305
306         hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
307         repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
308
309         spin_lock_bh(&nf_conntrack_lock);
310         __nf_conntrack_hash_insert(ct, hash, repl_hash);
311         spin_unlock_bh(&nf_conntrack_lock);
312 }
313 EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
314
315 /* Confirm a connection given skb; places it in hash table */
316 int
317 __nf_conntrack_confirm(struct sk_buff *skb)
318 {
319         unsigned int hash, repl_hash;
320         struct nf_conntrack_tuple_hash *h;
321         struct nf_conn *ct;
322         struct nf_conn_help *help;
323         struct hlist_node *n;
324         enum ip_conntrack_info ctinfo;
325         struct net *net;
326
327         ct = nf_ct_get(skb, &ctinfo);
328         net = nf_ct_net(ct);
329
330         /* ipt_REJECT uses nf_conntrack_attach to attach related
331            ICMP/TCP RST packets in other direction.  Actual packet
332            which created connection will be IP_CT_NEW or for an
333            expected connection, IP_CT_RELATED. */
334         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
335                 return NF_ACCEPT;
336
337         hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
338         repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
339
340         /* We're not in hash table, and we refuse to set up related
341            connections for unconfirmed conns.  But packet copies and
342            REJECT will give spurious warnings here. */
343         /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
344
345         /* No external references means noone else could have
346            confirmed us. */
347         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
348         pr_debug("Confirming conntrack %p\n", ct);
349
350         spin_lock_bh(&nf_conntrack_lock);
351
352         /* See if there's one in the list already, including reverse:
353            NAT could have grabbed it without realizing, since we're
354            not in the hash.  If there is, we lost race. */
355         hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode)
356                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
357                                       &h->tuple))
358                         goto out;
359         hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode)
360                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
361                                       &h->tuple))
362                         goto out;
363
364         /* Remove from unconfirmed list */
365         hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
366
367         __nf_conntrack_hash_insert(ct, hash, repl_hash);
368         /* Timer relative to confirmation time, not original
369            setting time, otherwise we'd get timer wrap in
370            weird delay cases. */
371         ct->timeout.expires += jiffies;
372         add_timer(&ct->timeout);
373         atomic_inc(&ct->ct_general.use);
374         set_bit(IPS_CONFIRMED_BIT, &ct->status);
375         NF_CT_STAT_INC(net, insert);
376         spin_unlock_bh(&nf_conntrack_lock);
377         help = nfct_help(ct);
378         if (help && help->helper)
379                 nf_conntrack_event_cache(IPCT_HELPER, ct);
380 #ifdef CONFIG_NF_NAT_NEEDED
381         if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
382             test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
383                 nf_conntrack_event_cache(IPCT_NATINFO, ct);
384 #endif
385         nf_conntrack_event_cache(master_ct(ct) ?
386                                  IPCT_RELATED : IPCT_NEW, ct);
387         return NF_ACCEPT;
388
389 out:
390         NF_CT_STAT_INC(net, insert_failed);
391         spin_unlock_bh(&nf_conntrack_lock);
392         return NF_DROP;
393 }
394 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
395
396 /* Returns true if a connection correspondings to the tuple (required
397    for NAT). */
398 int
399 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
400                          const struct nf_conn *ignored_conntrack)
401 {
402         struct net *net = nf_ct_net(ignored_conntrack);
403         struct nf_conntrack_tuple_hash *h;
404         struct hlist_node *n;
405         unsigned int hash = hash_conntrack(tuple);
406
407         /* Disable BHs the entire time since we need to disable them at
408          * least once for the stats anyway.
409          */
410         rcu_read_lock_bh();
411         hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
412                 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
413                     nf_ct_tuple_equal(tuple, &h->tuple)) {
414                         NF_CT_STAT_INC(net, found);
415                         rcu_read_unlock_bh();
416                         return 1;
417                 }
418                 NF_CT_STAT_INC(net, searched);
419         }
420         rcu_read_unlock_bh();
421
422         return 0;
423 }
424 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
425
426 #define NF_CT_EVICTION_RANGE    8
427
428 /* There's a small race here where we may free a just-assured
429    connection.  Too bad: we're in trouble anyway. */
430 static noinline int early_drop(struct net *net, unsigned int hash)
431 {
432         /* Use oldest entry, which is roughly LRU */
433         struct nf_conntrack_tuple_hash *h;
434         struct nf_conn *ct = NULL, *tmp;
435         struct hlist_node *n;
436         unsigned int i, cnt = 0;
437         int dropped = 0;
438
439         rcu_read_lock();
440         for (i = 0; i < nf_conntrack_htable_size; i++) {
441                 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash],
442                                          hnode) {
443                         tmp = nf_ct_tuplehash_to_ctrack(h);
444                         if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
445                                 ct = tmp;
446                         cnt++;
447                 }
448
449                 if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
450                         ct = NULL;
451                 if (ct || cnt >= NF_CT_EVICTION_RANGE)
452                         break;
453                 hash = (hash + 1) % nf_conntrack_htable_size;
454         }
455         rcu_read_unlock();
456
457         if (!ct)
458                 return dropped;
459
460         if (del_timer(&ct->timeout)) {
461                 death_by_timeout((unsigned long)ct);
462                 dropped = 1;
463                 NF_CT_STAT_INC_ATOMIC(net, early_drop);
464         }
465         nf_ct_put(ct);
466         return dropped;
467 }
468
469 struct nf_conn *nf_conntrack_alloc(struct net *net,
470                                    const struct nf_conntrack_tuple *orig,
471                                    const struct nf_conntrack_tuple *repl,
472                                    gfp_t gfp)
473 {
474         struct nf_conn *ct = NULL;
475
476         if (unlikely(!nf_conntrack_hash_rnd_initted)) {
477                 get_random_bytes(&nf_conntrack_hash_rnd, 4);
478                 nf_conntrack_hash_rnd_initted = 1;
479         }
480
481         /* We don't want any race condition at early drop stage */
482         atomic_inc(&net->ct.count);
483
484         if (nf_conntrack_max &&
485             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
486                 unsigned int hash = hash_conntrack(orig);
487                 if (!early_drop(net, hash)) {
488                         atomic_dec(&net->ct.count);
489                         if (net_ratelimit())
490                                 printk(KERN_WARNING
491                                        "nf_conntrack: table full, dropping"
492                                        " packet.\n");
493                         return ERR_PTR(-ENOMEM);
494                 }
495         }
496
497         ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp);
498         if (ct == NULL) {
499                 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
500                 atomic_dec(&net->ct.count);
501                 return ERR_PTR(-ENOMEM);
502         }
503
504         atomic_set(&ct->ct_general.use, 1);
505         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
506         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
507         /* Don't set timer yet: wait for confirmation */
508         setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
509 #ifdef CONFIG_NET_NS
510         ct->ct_net = net;
511 #endif
512         INIT_RCU_HEAD(&ct->rcu);
513
514         return ct;
515 }
516 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
517
518 static void nf_conntrack_free_rcu(struct rcu_head *head)
519 {
520         struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
521         struct net *net = nf_ct_net(ct);
522
523         nf_ct_ext_free(ct);
524         kmem_cache_free(nf_conntrack_cachep, ct);
525         atomic_dec(&net->ct.count);
526 }
527
528 void nf_conntrack_free(struct nf_conn *ct)
529 {
530         nf_ct_ext_destroy(ct);
531         call_rcu(&ct->rcu, nf_conntrack_free_rcu);
532 }
533 EXPORT_SYMBOL_GPL(nf_conntrack_free);
534
535 /* Allocate a new conntrack: we return -ENOMEM if classification
536    failed due to stress.  Otherwise it really is unclassifiable. */
537 static struct nf_conntrack_tuple_hash *
538 init_conntrack(struct net *net,
539                const struct nf_conntrack_tuple *tuple,
540                struct nf_conntrack_l3proto *l3proto,
541                struct nf_conntrack_l4proto *l4proto,
542                struct sk_buff *skb,
543                unsigned int dataoff)
544 {
545         struct nf_conn *ct;
546         struct nf_conn_help *help;
547         struct nf_conntrack_tuple repl_tuple;
548         struct nf_conntrack_expect *exp;
549
550         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
551                 pr_debug("Can't invert tuple.\n");
552                 return NULL;
553         }
554
555         ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
556         if (ct == NULL || IS_ERR(ct)) {
557                 pr_debug("Can't allocate conntrack.\n");
558                 return (struct nf_conntrack_tuple_hash *)ct;
559         }
560
561         if (!l4proto->new(ct, skb, dataoff)) {
562                 nf_conntrack_free(ct);
563                 pr_debug("init conntrack: can't track with proto module\n");
564                 return NULL;
565         }
566
567         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
568
569         spin_lock_bh(&nf_conntrack_lock);
570         exp = nf_ct_find_expectation(net, tuple);
571         if (exp) {
572                 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
573                          ct, exp);
574                 /* Welcome, Mr. Bond.  We've been expecting you... */
575                 __set_bit(IPS_EXPECTED_BIT, &ct->status);
576                 ct->master = exp->master;
577                 if (exp->helper) {
578                         help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
579                         if (help)
580                                 rcu_assign_pointer(help->helper, exp->helper);
581                 }
582
583 #ifdef CONFIG_NF_CONNTRACK_MARK
584                 ct->mark = exp->master->mark;
585 #endif
586 #ifdef CONFIG_NF_CONNTRACK_SECMARK
587                 ct->secmark = exp->master->secmark;
588 #endif
589                 nf_conntrack_get(&ct->master->ct_general);
590                 NF_CT_STAT_INC(net, expect_new);
591         } else {
592                 __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
593                 NF_CT_STAT_INC(net, new);
594         }
595
596         /* Overload tuple linked list to put us in unconfirmed list. */
597         hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
598                        &net->ct.unconfirmed);
599
600         spin_unlock_bh(&nf_conntrack_lock);
601
602         if (exp) {
603                 if (exp->expectfn)
604                         exp->expectfn(ct, exp);
605                 nf_ct_expect_put(exp);
606         }
607
608         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
609 }
610
611 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
612 static inline struct nf_conn *
613 resolve_normal_ct(struct net *net,
614                   struct sk_buff *skb,
615                   unsigned int dataoff,
616                   u_int16_t l3num,
617                   u_int8_t protonum,
618                   struct nf_conntrack_l3proto *l3proto,
619                   struct nf_conntrack_l4proto *l4proto,
620                   int *set_reply,
621                   enum ip_conntrack_info *ctinfo)
622 {
623         struct nf_conntrack_tuple tuple;
624         struct nf_conntrack_tuple_hash *h;
625         struct nf_conn *ct;
626
627         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
628                              dataoff, l3num, protonum, &tuple, l3proto,
629                              l4proto)) {
630                 pr_debug("resolve_normal_ct: Can't get tuple\n");
631                 return NULL;
632         }
633
634         /* look for tuple match */
635         h = nf_conntrack_find_get(net, &tuple);
636         if (!h) {
637                 h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff);
638                 if (!h)
639                         return NULL;
640                 if (IS_ERR(h))
641                         return (void *)h;
642         }
643         ct = nf_ct_tuplehash_to_ctrack(h);
644
645         /* It exists; we have (non-exclusive) reference. */
646         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
647                 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
648                 /* Please set reply bit if this packet OK */
649                 *set_reply = 1;
650         } else {
651                 /* Once we've had two way comms, always ESTABLISHED. */
652                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
653                         pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
654                         *ctinfo = IP_CT_ESTABLISHED;
655                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
656                         pr_debug("nf_conntrack_in: related packet for %p\n",
657                                  ct);
658                         *ctinfo = IP_CT_RELATED;
659                 } else {
660                         pr_debug("nf_conntrack_in: new packet for %p\n", ct);
661                         *ctinfo = IP_CT_NEW;
662                 }
663                 *set_reply = 0;
664         }
665         skb->nfct = &ct->ct_general;
666         skb->nfctinfo = *ctinfo;
667         return ct;
668 }
669
670 unsigned int
671 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
672                 struct sk_buff *skb)
673 {
674         struct nf_conn *ct;
675         enum ip_conntrack_info ctinfo;
676         struct nf_conntrack_l3proto *l3proto;
677         struct nf_conntrack_l4proto *l4proto;
678         unsigned int dataoff;
679         u_int8_t protonum;
680         int set_reply = 0;
681         int ret;
682
683         /* Previously seen (loopback or untracked)?  Ignore. */
684         if (skb->nfct) {
685                 NF_CT_STAT_INC_ATOMIC(net, ignore);
686                 return NF_ACCEPT;
687         }
688
689         /* rcu_read_lock()ed by nf_hook_slow */
690         l3proto = __nf_ct_l3proto_find(pf);
691         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
692                                    &dataoff, &protonum);
693         if (ret <= 0) {
694                 pr_debug("not prepared to track yet or error occured\n");
695                 NF_CT_STAT_INC_ATOMIC(net, error);
696                 NF_CT_STAT_INC_ATOMIC(net, invalid);
697                 return -ret;
698         }
699
700         l4proto = __nf_ct_l4proto_find(pf, protonum);
701
702         /* It may be an special packet, error, unclean...
703          * inverse of the return code tells to the netfilter
704          * core what to do with the packet. */
705         if (l4proto->error != NULL) {
706                 ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum);
707                 if (ret <= 0) {
708                         NF_CT_STAT_INC_ATOMIC(net, error);
709                         NF_CT_STAT_INC_ATOMIC(net, invalid);
710                         return -ret;
711                 }
712         }
713
714         ct = resolve_normal_ct(net, skb, dataoff, pf, protonum,
715                                l3proto, l4proto, &set_reply, &ctinfo);
716         if (!ct) {
717                 /* Not valid part of a connection */
718                 NF_CT_STAT_INC_ATOMIC(net, invalid);
719                 return NF_ACCEPT;
720         }
721
722         if (IS_ERR(ct)) {
723                 /* Too stressed to deal. */
724                 NF_CT_STAT_INC_ATOMIC(net, drop);
725                 return NF_DROP;
726         }
727
728         NF_CT_ASSERT(skb->nfct);
729
730         ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
731         if (ret < 0) {
732                 /* Invalid: inverse of the return code tells
733                  * the netfilter core what to do */
734                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
735                 nf_conntrack_put(skb->nfct);
736                 skb->nfct = NULL;
737                 NF_CT_STAT_INC_ATOMIC(net, invalid);
738                 return -ret;
739         }
740
741         if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
742                 nf_conntrack_event_cache(IPCT_STATUS, ct);
743
744         return ret;
745 }
746 EXPORT_SYMBOL_GPL(nf_conntrack_in);
747
748 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
749                           const struct nf_conntrack_tuple *orig)
750 {
751         bool ret;
752
753         rcu_read_lock();
754         ret = nf_ct_invert_tuple(inverse, orig,
755                                  __nf_ct_l3proto_find(orig->src.l3num),
756                                  __nf_ct_l4proto_find(orig->src.l3num,
757                                                       orig->dst.protonum));
758         rcu_read_unlock();
759         return ret;
760 }
761 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
762
763 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
764    implicitly racy: see __nf_conntrack_confirm */
765 void nf_conntrack_alter_reply(struct nf_conn *ct,
766                               const struct nf_conntrack_tuple *newreply)
767 {
768         struct nf_conn_help *help = nfct_help(ct);
769
770         /* Should be unconfirmed, so not in hash table yet */
771         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
772
773         pr_debug("Altering reply tuple of %p to ", ct);
774         nf_ct_dump_tuple(newreply);
775
776         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
777         if (ct->master || (help && !hlist_empty(&help->expectations)))
778                 return;
779
780         rcu_read_lock();
781         __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
782         rcu_read_unlock();
783 }
784 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
785
786 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
787 void __nf_ct_refresh_acct(struct nf_conn *ct,
788                           enum ip_conntrack_info ctinfo,
789                           const struct sk_buff *skb,
790                           unsigned long extra_jiffies,
791                           int do_acct)
792 {
793         int event = 0;
794
795         NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
796         NF_CT_ASSERT(skb);
797
798         spin_lock_bh(&nf_conntrack_lock);
799
800         /* Only update if this is not a fixed timeout */
801         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
802                 goto acct;
803
804         /* If not in hash table, timer will not be active yet */
805         if (!nf_ct_is_confirmed(ct)) {
806                 ct->timeout.expires = extra_jiffies;
807                 event = IPCT_REFRESH;
808         } else {
809                 unsigned long newtime = jiffies + extra_jiffies;
810
811                 /* Only update the timeout if the new timeout is at least
812                    HZ jiffies from the old timeout. Need del_timer for race
813                    avoidance (may already be dying). */
814                 if (newtime - ct->timeout.expires >= HZ
815                     && del_timer(&ct->timeout)) {
816                         ct->timeout.expires = newtime;
817                         add_timer(&ct->timeout);
818                         event = IPCT_REFRESH;
819                 }
820         }
821
822 acct:
823         if (do_acct) {
824                 struct nf_conn_counter *acct;
825
826                 acct = nf_conn_acct_find(ct);
827                 if (acct) {
828                         acct[CTINFO2DIR(ctinfo)].packets++;
829                         acct[CTINFO2DIR(ctinfo)].bytes +=
830                                 skb->len - skb_network_offset(skb);
831                 }
832         }
833
834         spin_unlock_bh(&nf_conntrack_lock);
835
836         /* must be unlocked when calling event cache */
837         if (event)
838                 nf_conntrack_event_cache(event, ct);
839 }
840 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
841
842 bool __nf_ct_kill_acct(struct nf_conn *ct,
843                        enum ip_conntrack_info ctinfo,
844                        const struct sk_buff *skb,
845                        int do_acct)
846 {
847         if (do_acct) {
848                 struct nf_conn_counter *acct;
849
850                 spin_lock_bh(&nf_conntrack_lock);
851                 acct = nf_conn_acct_find(ct);
852                 if (acct) {
853                         acct[CTINFO2DIR(ctinfo)].packets++;
854                         acct[CTINFO2DIR(ctinfo)].bytes +=
855                                 skb->len - skb_network_offset(skb);
856                 }
857                 spin_unlock_bh(&nf_conntrack_lock);
858         }
859
860         if (del_timer(&ct->timeout)) {
861                 ct->timeout.function((unsigned long)ct);
862                 return true;
863         }
864         return false;
865 }
866 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
867
868 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
869
870 #include <linux/netfilter/nfnetlink.h>
871 #include <linux/netfilter/nfnetlink_conntrack.h>
872 #include <linux/mutex.h>
873
874 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
875  * in ip_conntrack_core, since we don't want the protocols to autoload
876  * or depend on ctnetlink */
877 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
878                                const struct nf_conntrack_tuple *tuple)
879 {
880         NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
881         NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
882         return 0;
883
884 nla_put_failure:
885         return -1;
886 }
887 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
888
889 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
890         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
891         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
892 };
893 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
894
895 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
896                                struct nf_conntrack_tuple *t)
897 {
898         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
899                 return -EINVAL;
900
901         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
902         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
903
904         return 0;
905 }
906 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
907 #endif
908
909 /* Used by ipt_REJECT and ip6t_REJECT. */
910 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
911 {
912         struct nf_conn *ct;
913         enum ip_conntrack_info ctinfo;
914
915         /* This ICMP is in reverse direction to the packet which caused it */
916         ct = nf_ct_get(skb, &ctinfo);
917         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
918                 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
919         else
920                 ctinfo = IP_CT_RELATED;
921
922         /* Attach to new skbuff, and increment count */
923         nskb->nfct = &ct->ct_general;
924         nskb->nfctinfo = ctinfo;
925         nf_conntrack_get(nskb->nfct);
926 }
927
928 /* Bring out ya dead! */
929 static struct nf_conn *
930 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
931                 void *data, unsigned int *bucket)
932 {
933         struct nf_conntrack_tuple_hash *h;
934         struct nf_conn *ct;
935         struct hlist_node *n;
936
937         spin_lock_bh(&nf_conntrack_lock);
938         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
939                 hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) {
940                         ct = nf_ct_tuplehash_to_ctrack(h);
941                         if (iter(ct, data))
942                                 goto found;
943                 }
944         }
945         hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) {
946                 ct = nf_ct_tuplehash_to_ctrack(h);
947                 if (iter(ct, data))
948                         set_bit(IPS_DYING_BIT, &ct->status);
949         }
950         spin_unlock_bh(&nf_conntrack_lock);
951         return NULL;
952 found:
953         atomic_inc(&ct->ct_general.use);
954         spin_unlock_bh(&nf_conntrack_lock);
955         return ct;
956 }
957
958 void nf_ct_iterate_cleanup(struct net *net,
959                            int (*iter)(struct nf_conn *i, void *data),
960                            void *data)
961 {
962         struct nf_conn *ct;
963         unsigned int bucket = 0;
964
965         while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
966                 /* Time to push up daises... */
967                 if (del_timer(&ct->timeout))
968                         death_by_timeout((unsigned long)ct);
969                 /* ... else the timer will get him soon. */
970
971                 nf_ct_put(ct);
972         }
973 }
974 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
975
976 struct __nf_ct_flush_report {
977         u32 pid;
978         int report;
979 };
980
981 static int kill_all(struct nf_conn *i, void *data)
982 {
983         struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
984
985         /* get_next_corpse sets the dying bit for us */
986         nf_conntrack_event_report(IPCT_DESTROY,
987                                   i,
988                                   fr->pid,
989                                   fr->report);
990         return 1;
991 }
992
993 void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size)
994 {
995         if (vmalloced)
996                 vfree(hash);
997         else
998                 free_pages((unsigned long)hash,
999                            get_order(sizeof(struct hlist_head) * size));
1000 }
1001 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1002
1003 void nf_conntrack_flush(struct net *net, u32 pid, int report)
1004 {
1005         struct __nf_ct_flush_report fr = {
1006                 .pid    = pid,
1007                 .report = report,
1008         };
1009         nf_ct_iterate_cleanup(net, kill_all, &fr);
1010 }
1011 EXPORT_SYMBOL_GPL(nf_conntrack_flush);
1012
1013 static void nf_conntrack_cleanup_init_net(void)
1014 {
1015         nf_conntrack_helper_fini();
1016         nf_conntrack_proto_fini();
1017         kmem_cache_destroy(nf_conntrack_cachep);
1018 }
1019
1020 static void nf_conntrack_cleanup_net(struct net *net)
1021 {
1022         nf_ct_event_cache_flush(net);
1023         nf_conntrack_ecache_fini(net);
1024  i_see_dead_people:
1025         nf_conntrack_flush(net, 0, 0);
1026         if (atomic_read(&net->ct.count) != 0) {
1027                 schedule();
1028                 goto i_see_dead_people;
1029         }
1030         /* wait until all references to nf_conntrack_untracked are dropped */
1031         while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1032                 schedule();
1033
1034         nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1035                              nf_conntrack_htable_size);
1036         nf_conntrack_acct_fini(net);
1037         nf_conntrack_expect_fini(net);
1038         free_percpu(net->ct.stat);
1039 }
1040
1041 /* Mishearing the voices in his head, our hero wonders how he's
1042    supposed to kill the mall. */
1043 void nf_conntrack_cleanup(struct net *net)
1044 {
1045         if (net_eq(net, &init_net))
1046                 rcu_assign_pointer(ip_ct_attach, NULL);
1047
1048         /* This makes sure all current packets have passed through
1049            netfilter framework.  Roll on, two-stage module
1050            delete... */
1051         synchronize_net();
1052
1053         nf_conntrack_cleanup_net(net);
1054
1055         if (net_eq(net, &init_net)) {
1056                 rcu_assign_pointer(nf_ct_destroy, NULL);
1057                 nf_conntrack_cleanup_init_net();
1058         }
1059 }
1060
1061 struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced)
1062 {
1063         struct hlist_head *hash;
1064         unsigned int size, i;
1065
1066         *vmalloced = 0;
1067
1068         size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
1069         hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
1070                                        get_order(sizeof(struct hlist_head)
1071                                                  * size));
1072         if (!hash) {
1073                 *vmalloced = 1;
1074                 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1075                 hash = vmalloc(sizeof(struct hlist_head) * size);
1076         }
1077
1078         if (hash)
1079                 for (i = 0; i < size; i++)
1080                         INIT_HLIST_HEAD(&hash[i]);
1081
1082         return hash;
1083 }
1084 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1085
1086 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1087 {
1088         int i, bucket, vmalloced, old_vmalloced;
1089         unsigned int hashsize, old_size;
1090         int rnd;
1091         struct hlist_head *hash, *old_hash;
1092         struct nf_conntrack_tuple_hash *h;
1093
1094         /* On boot, we can set this without any fancy locking. */
1095         if (!nf_conntrack_htable_size)
1096                 return param_set_uint(val, kp);
1097
1098         hashsize = simple_strtoul(val, NULL, 0);
1099         if (!hashsize)
1100                 return -EINVAL;
1101
1102         hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);
1103         if (!hash)
1104                 return -ENOMEM;
1105
1106         /* We have to rehahs for the new table anyway, so we also can
1107          * use a newrandom seed */
1108         get_random_bytes(&rnd, 4);
1109
1110         /* Lookups in the old hash might happen in parallel, which means we
1111          * might get false negatives during connection lookup. New connections
1112          * created because of a false negative won't make it into the hash
1113          * though since that required taking the lock.
1114          */
1115         spin_lock_bh(&nf_conntrack_lock);
1116         for (i = 0; i < nf_conntrack_htable_size; i++) {
1117                 while (!hlist_empty(&init_net.ct.hash[i])) {
1118                         h = hlist_entry(init_net.ct.hash[i].first,
1119                                         struct nf_conntrack_tuple_hash, hnode);
1120                         hlist_del_rcu(&h->hnode);
1121                         bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1122                         hlist_add_head(&h->hnode, &hash[bucket]);
1123                 }
1124         }
1125         old_size = nf_conntrack_htable_size;
1126         old_vmalloced = init_net.ct.hash_vmalloc;
1127         old_hash = init_net.ct.hash;
1128
1129         nf_conntrack_htable_size = hashsize;
1130         init_net.ct.hash_vmalloc = vmalloced;
1131         init_net.ct.hash = hash;
1132         nf_conntrack_hash_rnd = rnd;
1133         spin_unlock_bh(&nf_conntrack_lock);
1134
1135         nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1136         return 0;
1137 }
1138 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1139
1140 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1141                   &nf_conntrack_htable_size, 0600);
1142
1143 static int nf_conntrack_init_init_net(void)
1144 {
1145         int max_factor = 8;
1146         int ret;
1147
1148         /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
1149          * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1150         if (!nf_conntrack_htable_size) {
1151                 nf_conntrack_htable_size
1152                         = (((num_physpages << PAGE_SHIFT) / 16384)
1153                            / sizeof(struct hlist_head));
1154                 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1155                         nf_conntrack_htable_size = 16384;
1156                 if (nf_conntrack_htable_size < 32)
1157                         nf_conntrack_htable_size = 32;
1158
1159                 /* Use a max. factor of four by default to get the same max as
1160                  * with the old struct list_heads. When a table size is given
1161                  * we use the old value of 8 to avoid reducing the max.
1162                  * entries. */
1163                 max_factor = 4;
1164         }
1165         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1166
1167         printk("nf_conntrack version %s (%u buckets, %d max)\n",
1168                NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1169                nf_conntrack_max);
1170
1171         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1172                                                 sizeof(struct nf_conn),
1173                                                 0, 0, NULL);
1174         if (!nf_conntrack_cachep) {
1175                 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1176                 ret = -ENOMEM;
1177                 goto err_cache;
1178         }
1179
1180         ret = nf_conntrack_proto_init();
1181         if (ret < 0)
1182                 goto err_proto;
1183
1184         ret = nf_conntrack_helper_init();
1185         if (ret < 0)
1186                 goto err_helper;
1187
1188         return 0;
1189
1190 err_helper:
1191         nf_conntrack_proto_fini();
1192 err_proto:
1193         kmem_cache_destroy(nf_conntrack_cachep);
1194 err_cache:
1195         return ret;
1196 }
1197
1198 static int nf_conntrack_init_net(struct net *net)
1199 {
1200         int ret;
1201
1202         atomic_set(&net->ct.count, 0);
1203         INIT_HLIST_HEAD(&net->ct.unconfirmed);
1204         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1205         if (!net->ct.stat) {
1206                 ret = -ENOMEM;
1207                 goto err_stat;
1208         }
1209         ret = nf_conntrack_ecache_init(net);
1210         if (ret < 0)
1211                 goto err_ecache;
1212         net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1213                                                   &net->ct.hash_vmalloc);
1214         if (!net->ct.hash) {
1215                 ret = -ENOMEM;
1216                 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1217                 goto err_hash;
1218         }
1219         ret = nf_conntrack_expect_init(net);
1220         if (ret < 0)
1221                 goto err_expect;
1222         ret = nf_conntrack_acct_init(net);
1223         if (ret < 0)
1224                 goto err_acct;
1225
1226         /* Set up fake conntrack:
1227             - to never be deleted, not in any hashes */
1228 #ifdef CONFIG_NET_NS
1229         nf_conntrack_untracked.ct_net = &init_net;
1230 #endif
1231         atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1232         /*  - and look it like as a confirmed connection */
1233         set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1234
1235         return 0;
1236
1237 err_acct:
1238         nf_conntrack_expect_fini(net);
1239 err_expect:
1240         nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1241                              nf_conntrack_htable_size);
1242 err_hash:
1243         nf_conntrack_ecache_fini(net);
1244 err_ecache:
1245         free_percpu(net->ct.stat);
1246 err_stat:
1247         return ret;
1248 }
1249
1250 int nf_conntrack_init(struct net *net)
1251 {
1252         int ret;
1253
1254         if (net_eq(net, &init_net)) {
1255                 ret = nf_conntrack_init_init_net();
1256                 if (ret < 0)
1257                         goto out_init_net;
1258         }
1259         ret = nf_conntrack_init_net(net);
1260         if (ret < 0)
1261                 goto out_net;
1262
1263         if (net_eq(net, &init_net)) {
1264                 /* For use by REJECT target */
1265                 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1266                 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1267         }
1268         return 0;
1269
1270 out_net:
1271         if (net_eq(net, &init_net))
1272                 nf_conntrack_cleanup_init_net();
1273 out_init_net:
1274         return ret;
1275 }