1 /* Expectation handling for nf_conntrack. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
23 #include <net/netfilter/nf_conntrack.h>
24 #include <net/netfilter/nf_conntrack_core.h>
25 #include <net/netfilter/nf_conntrack_expect.h>
26 #include <net/netfilter/nf_conntrack_helper.h>
27 #include <net/netfilter/nf_conntrack_tuple.h>
29 LIST_HEAD(nf_conntrack_expect_list);
30 kmem_cache_t *nf_conntrack_expect_cachep __read_mostly;
31 DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
32 static unsigned int nf_conntrack_expect_next_id;
34 /* nf_conntrack_expect helper functions */
35 void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
37 struct nf_conn_help *master_help = nfct_help(exp->master);
39 NF_CT_ASSERT(master_help);
40 NF_CT_ASSERT(!timer_pending(&exp->timeout));
43 NF_CT_STAT_INC(expect_delete);
44 master_help->expecting--;
45 nf_conntrack_expect_put(exp);
48 static void expectation_timed_out(unsigned long ul_expect)
50 struct nf_conntrack_expect *exp = (void *)ul_expect;
52 write_lock_bh(&nf_conntrack_lock);
53 nf_ct_unlink_expect(exp);
54 write_unlock_bh(&nf_conntrack_lock);
55 nf_conntrack_expect_put(exp);
58 struct nf_conntrack_expect *
59 __nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
61 struct nf_conntrack_expect *i;
63 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
64 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
70 /* Just find a expectation corresponding to a tuple. */
71 struct nf_conntrack_expect *
72 nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
74 struct nf_conntrack_expect *i;
76 read_lock_bh(&nf_conntrack_lock);
77 i = __nf_conntrack_expect_find(tuple);
80 read_unlock_bh(&nf_conntrack_lock);
85 /* If an expectation for this connection is found, it gets delete from
86 * global list then returned. */
87 struct nf_conntrack_expect *
88 find_expectation(const struct nf_conntrack_tuple *tuple)
90 struct nf_conntrack_expect *i;
92 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
93 /* If master is not in hash table yet (ie. packet hasn't left
94 this machine yet), how can other end know about expected?
95 Hence these are not the droids you are looking for (if
96 master ct never got confirmed, we'd hold a reference to it
97 and weird things would happen to future packets). */
98 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
99 && nf_ct_is_confirmed(i->master)) {
100 if (i->flags & NF_CT_EXPECT_PERMANENT) {
103 } else if (del_timer(&i->timeout)) {
104 nf_ct_unlink_expect(i);
112 /* delete all expectations for this conntrack */
113 void nf_ct_remove_expectations(struct nf_conn *ct)
115 struct nf_conntrack_expect *i, *tmp;
116 struct nf_conn_help *help = nfct_help(ct);
118 /* Optimization: most connection never expect any others. */
119 if (!help || help->expecting == 0)
122 list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) {
123 if (i->master == ct && del_timer(&i->timeout)) {
124 nf_ct_unlink_expect(i);
125 nf_conntrack_expect_put(i);
130 /* Would two expected things clash? */
131 static inline int expect_clash(const struct nf_conntrack_expect *a,
132 const struct nf_conntrack_expect *b)
134 /* Part covered by intersection of masks must be unequal,
135 otherwise they clash */
136 struct nf_conntrack_tuple intersect_mask;
139 intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num;
140 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
141 intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all;
142 intersect_mask.dst.protonum = a->mask.dst.protonum
143 & b->mask.dst.protonum;
145 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
146 intersect_mask.src.u3.all[count] =
147 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
150 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
151 intersect_mask.dst.u3.all[count] =
152 a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count];
155 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
158 static inline int expect_matches(const struct nf_conntrack_expect *a,
159 const struct nf_conntrack_expect *b)
161 return a->master == b->master
162 && nf_ct_tuple_equal(&a->tuple, &b->tuple)
163 && nf_ct_tuple_equal(&a->mask, &b->mask);
166 /* Generally a bad idea to call this: could have matched already. */
167 void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp)
169 struct nf_conntrack_expect *i;
171 write_lock_bh(&nf_conntrack_lock);
172 /* choose the the oldest expectation to evict */
173 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
174 if (expect_matches(i, exp) && del_timer(&i->timeout)) {
175 nf_ct_unlink_expect(i);
176 write_unlock_bh(&nf_conntrack_lock);
177 nf_conntrack_expect_put(i);
181 write_unlock_bh(&nf_conntrack_lock);
184 /* We don't increase the master conntrack refcount for non-fulfilled
185 * conntracks. During the conntrack destruction, the expectations are
186 * always killed before the conntrack itself */
187 struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me)
189 struct nf_conntrack_expect *new;
191 new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC);
196 atomic_set(&new->use, 1);
200 void nf_conntrack_expect_put(struct nf_conntrack_expect *exp)
202 if (atomic_dec_and_test(&exp->use))
203 kmem_cache_free(nf_conntrack_expect_cachep, exp);
206 static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
208 struct nf_conn_help *master_help = nfct_help(exp->master);
210 atomic_inc(&exp->use);
211 master_help->expecting++;
212 list_add(&exp->list, &nf_conntrack_expect_list);
214 init_timer(&exp->timeout);
215 exp->timeout.data = (unsigned long)exp;
216 exp->timeout.function = expectation_timed_out;
217 exp->timeout.expires = jiffies + master_help->helper->timeout * HZ;
218 add_timer(&exp->timeout);
220 exp->id = ++nf_conntrack_expect_next_id;
221 atomic_inc(&exp->use);
222 NF_CT_STAT_INC(expect_create);
225 /* Race with expectations being used means we could have none to find; OK. */
226 static void evict_oldest_expect(struct nf_conn *master)
228 struct nf_conntrack_expect *i;
230 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
231 if (i->master == master) {
232 if (del_timer(&i->timeout)) {
233 nf_ct_unlink_expect(i);
234 nf_conntrack_expect_put(i);
241 static inline int refresh_timer(struct nf_conntrack_expect *i)
243 struct nf_conn_help *master_help = nfct_help(i->master);
245 if (!del_timer(&i->timeout))
248 i->timeout.expires = jiffies + master_help->helper->timeout*HZ;
249 add_timer(&i->timeout);
253 int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
255 struct nf_conntrack_expect *i;
256 struct nf_conn *master = expect->master;
257 struct nf_conn_help *master_help = nfct_help(master);
260 NF_CT_ASSERT(master_help);
262 write_lock_bh(&nf_conntrack_lock);
263 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
264 if (expect_matches(i, expect)) {
265 /* Refresh timer: if it's dying, ignore.. */
266 if (refresh_timer(i)) {
270 } else if (expect_clash(i, expect)) {
275 /* Will be over limit? */
276 if (master_help->helper->max_expected &&
277 master_help->expecting >= master_help->helper->max_expected)
278 evict_oldest_expect(master);
280 nf_conntrack_expect_insert(expect);
281 nf_conntrack_expect_event(IPEXP_NEW, expect);
284 write_unlock_bh(&nf_conntrack_lock);
288 #ifdef CONFIG_PROC_FS
289 static void *exp_seq_start(struct seq_file *s, loff_t *pos)
291 struct list_head *e = &nf_conntrack_expect_list;
294 /* strange seq_file api calls stop even if we fail,
295 * thus we need to grab lock since stop unlocks */
296 read_lock_bh(&nf_conntrack_lock);
301 for (i = 0; i <= *pos; i++) {
303 if (e == &nf_conntrack_expect_list)
309 static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
311 struct list_head *e = v;
316 if (e == &nf_conntrack_expect_list)
322 static void exp_seq_stop(struct seq_file *s, void *v)
324 read_unlock_bh(&nf_conntrack_lock);
327 static int exp_seq_show(struct seq_file *s, void *v)
329 struct nf_conntrack_expect *expect = v;
331 if (expect->timeout.function)
332 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
333 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
336 seq_printf(s, "l3proto = %u proto=%u ",
337 expect->tuple.src.l3num,
338 expect->tuple.dst.protonum);
339 print_tuple(s, &expect->tuple,
340 __nf_ct_l3proto_find(expect->tuple.src.l3num),
341 __nf_ct_proto_find(expect->tuple.src.l3num,
342 expect->tuple.dst.protonum));
343 return seq_putc(s, '\n');
346 static struct seq_operations exp_seq_ops = {
347 .start = exp_seq_start,
348 .next = exp_seq_next,
349 .stop = exp_seq_stop,
353 static int exp_open(struct inode *inode, struct file *file)
355 return seq_open(file, &exp_seq_ops);
358 struct file_operations exp_file_ops = {
359 .owner = THIS_MODULE,
363 .release = seq_release
365 #endif /* CONFIG_PROC_FS */