[NET]: Make the device list and device lookups per namespace.
[safe/jmp/linux-2.6] / net / ipv4 / netfilter / nf_conntrack_l3proto_ipv4_compat.c
1 /* ip_conntrack proc compat - based on ip_conntrack_standalone.c
2  *
3  * (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/types.h>
11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/percpu.h>
14 #include <net/net_namespace.h>
15
16 #include <linux/netfilter.h>
17 #include <net/netfilter/nf_conntrack_core.h>
18 #include <net/netfilter/nf_conntrack_l3proto.h>
19 #include <net/netfilter/nf_conntrack_l4proto.h>
20 #include <net/netfilter/nf_conntrack_expect.h>
21
22 #ifdef CONFIG_NF_CT_ACCT
23 static unsigned int
24 seq_print_counters(struct seq_file *s,
25                    const struct ip_conntrack_counter *counter)
26 {
27         return seq_printf(s, "packets=%llu bytes=%llu ",
28                           (unsigned long long)counter->packets,
29                           (unsigned long long)counter->bytes);
30 }
31 #else
32 #define seq_print_counters(x, y)        0
33 #endif
34
35 struct ct_iter_state {
36         unsigned int bucket;
37 };
38
39 static struct hlist_node *ct_get_first(struct seq_file *seq)
40 {
41         struct ct_iter_state *st = seq->private;
42
43         for (st->bucket = 0;
44              st->bucket < nf_conntrack_htable_size;
45              st->bucket++) {
46                 if (!hlist_empty(&nf_conntrack_hash[st->bucket]))
47                         return nf_conntrack_hash[st->bucket].first;
48         }
49         return NULL;
50 }
51
52 static struct hlist_node *ct_get_next(struct seq_file *seq,
53                                       struct hlist_node *head)
54 {
55         struct ct_iter_state *st = seq->private;
56
57         head = head->next;
58         while (head == NULL) {
59                 if (++st->bucket >= nf_conntrack_htable_size)
60                         return NULL;
61                 head = nf_conntrack_hash[st->bucket].first;
62         }
63         return head;
64 }
65
66 static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
67 {
68         struct hlist_node *head = ct_get_first(seq);
69
70         if (head)
71                 while (pos && (head = ct_get_next(seq, head)))
72                         pos--;
73         return pos ? NULL : head;
74 }
75
76 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
77 {
78         read_lock_bh(&nf_conntrack_lock);
79         return ct_get_idx(seq, *pos);
80 }
81
82 static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
83 {
84         (*pos)++;
85         return ct_get_next(s, v);
86 }
87
88 static void ct_seq_stop(struct seq_file *s, void *v)
89 {
90         read_unlock_bh(&nf_conntrack_lock);
91 }
92
93 static int ct_seq_show(struct seq_file *s, void *v)
94 {
95         const struct nf_conntrack_tuple_hash *hash = v;
96         const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
97         struct nf_conntrack_l3proto *l3proto;
98         struct nf_conntrack_l4proto *l4proto;
99
100         NF_CT_ASSERT(ct);
101
102         /* we only want to print DIR_ORIGINAL */
103         if (NF_CT_DIRECTION(hash))
104                 return 0;
105         if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num != AF_INET)
106                 return 0;
107
108         l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
109                                        .tuple.src.l3num);
110         NF_CT_ASSERT(l3proto);
111         l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
112                                        .tuple.src.l3num,
113                                        ct->tuplehash[IP_CT_DIR_ORIGINAL]
114                                        .tuple.dst.protonum);
115         NF_CT_ASSERT(l4proto);
116
117         if (seq_printf(s, "%-8s %u %ld ",
118                       l4proto->name,
119                       ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
120                       timer_pending(&ct->timeout)
121                       ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
122                 return -ENOSPC;
123
124         if (l3proto->print_conntrack(s, ct))
125                 return -ENOSPC;
126
127         if (l4proto->print_conntrack(s, ct))
128                 return -ENOSPC;
129
130         if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
131                         l3proto, l4proto))
132                 return -ENOSPC;
133
134         if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL]))
135                 return -ENOSPC;
136
137         if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
138                 if (seq_printf(s, "[UNREPLIED] "))
139                         return -ENOSPC;
140
141         if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
142                         l3proto, l4proto))
143                 return -ENOSPC;
144
145         if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY]))
146                 return -ENOSPC;
147
148         if (test_bit(IPS_ASSURED_BIT, &ct->status))
149                 if (seq_printf(s, "[ASSURED] "))
150                         return -ENOSPC;
151
152 #ifdef CONFIG_NF_CONNTRACK_MARK
153         if (seq_printf(s, "mark=%u ", ct->mark))
154                 return -ENOSPC;
155 #endif
156
157 #ifdef CONFIG_NF_CONNTRACK_SECMARK
158         if (seq_printf(s, "secmark=%u ", ct->secmark))
159                 return -ENOSPC;
160 #endif
161
162         if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
163                 return -ENOSPC;
164
165         return 0;
166 }
167
168 static const struct seq_operations ct_seq_ops = {
169         .start = ct_seq_start,
170         .next  = ct_seq_next,
171         .stop  = ct_seq_stop,
172         .show  = ct_seq_show
173 };
174
175 static int ct_open(struct inode *inode, struct file *file)
176 {
177         struct seq_file *seq;
178         struct ct_iter_state *st;
179         int ret;
180
181         st = kzalloc(sizeof(struct ct_iter_state), GFP_KERNEL);
182         if (st == NULL)
183                 return -ENOMEM;
184         ret = seq_open(file, &ct_seq_ops);
185         if (ret)
186                 goto out_free;
187         seq          = file->private_data;
188         seq->private = st;
189         return ret;
190 out_free:
191         kfree(st);
192         return ret;
193 }
194
195 static const struct file_operations ct_file_ops = {
196         .owner   = THIS_MODULE,
197         .open    = ct_open,
198         .read    = seq_read,
199         .llseek  = seq_lseek,
200         .release = seq_release_private,
201 };
202
203 /* expects */
204 struct ct_expect_iter_state {
205         unsigned int bucket;
206 };
207
208 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
209 {
210         struct ct_expect_iter_state *st = seq->private;
211
212         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
213                 if (!hlist_empty(&nf_ct_expect_hash[st->bucket]))
214                         return nf_ct_expect_hash[st->bucket].first;
215         }
216         return NULL;
217 }
218
219 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
220                                              struct hlist_node *head)
221 {
222         struct ct_expect_iter_state *st = seq->private;
223
224         head = head->next;
225         while (head == NULL) {
226                 if (++st->bucket >= nf_ct_expect_hsize)
227                         return NULL;
228                 head = nf_ct_expect_hash[st->bucket].first;
229         }
230         return head;
231 }
232
233 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
234 {
235         struct hlist_node *head = ct_expect_get_first(seq);
236
237         if (head)
238                 while (pos && (head = ct_expect_get_next(seq, head)))
239                         pos--;
240         return pos ? NULL : head;
241 }
242
243 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
244 {
245         read_lock_bh(&nf_conntrack_lock);
246         return ct_expect_get_idx(seq, *pos);
247 }
248
249 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
250 {
251         (*pos)++;
252         return ct_expect_get_next(seq, v);
253 }
254
255 static void exp_seq_stop(struct seq_file *seq, void *v)
256 {
257         read_unlock_bh(&nf_conntrack_lock);
258 }
259
260 static int exp_seq_show(struct seq_file *s, void *v)
261 {
262         struct nf_conntrack_expect *exp;
263         struct hlist_node *n = v;
264
265         exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
266
267         if (exp->tuple.src.l3num != AF_INET)
268                 return 0;
269
270         if (exp->timeout.function)
271                 seq_printf(s, "%ld ", timer_pending(&exp->timeout)
272                            ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
273         else
274                 seq_printf(s, "- ");
275
276         seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
277
278         print_tuple(s, &exp->tuple,
279                     __nf_ct_l3proto_find(exp->tuple.src.l3num),
280                     __nf_ct_l4proto_find(exp->tuple.src.l3num,
281                                          exp->tuple.dst.protonum));
282         return seq_putc(s, '\n');
283 }
284
285 static const struct seq_operations exp_seq_ops = {
286         .start = exp_seq_start,
287         .next = exp_seq_next,
288         .stop = exp_seq_stop,
289         .show = exp_seq_show
290 };
291
292 static int exp_open(struct inode *inode, struct file *file)
293 {
294         struct seq_file *seq;
295         struct ct_expect_iter_state *st;
296         int ret;
297
298         st = kzalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
299         if (!st)
300                 return -ENOMEM;
301         ret = seq_open(file, &exp_seq_ops);
302         if (ret)
303                 goto out_free;
304         seq          = file->private_data;
305         seq->private = st;
306         return ret;
307 out_free:
308         kfree(st);
309         return ret;
310 }
311
312 static const struct file_operations ip_exp_file_ops = {
313         .owner   = THIS_MODULE,
314         .open    = exp_open,
315         .read    = seq_read,
316         .llseek  = seq_lseek,
317         .release = seq_release_private,
318 };
319
320 static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
321 {
322         int cpu;
323
324         if (*pos == 0)
325                 return SEQ_START_TOKEN;
326
327         for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
328                 if (!cpu_possible(cpu))
329                         continue;
330                 *pos = cpu+1;
331                 return &per_cpu(nf_conntrack_stat, cpu);
332         }
333
334         return NULL;
335 }
336
337 static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
338 {
339         int cpu;
340
341         for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
342                 if (!cpu_possible(cpu))
343                         continue;
344                 *pos = cpu+1;
345                 return &per_cpu(nf_conntrack_stat, cpu);
346         }
347
348         return NULL;
349 }
350
351 static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
352 {
353 }
354
355 static int ct_cpu_seq_show(struct seq_file *seq, void *v)
356 {
357         unsigned int nr_conntracks = atomic_read(&nf_conntrack_count);
358         struct ip_conntrack_stat *st = v;
359
360         if (v == SEQ_START_TOKEN) {
361                 seq_printf(seq, "entries  searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error  expect_new expect_create expect_delete\n");
362                 return 0;
363         }
364
365         seq_printf(seq, "%08x  %08x %08x %08x %08x %08x %08x %08x "
366                         "%08x %08x %08x %08x %08x  %08x %08x %08x \n",
367                    nr_conntracks,
368                    st->searched,
369                    st->found,
370                    st->new,
371                    st->invalid,
372                    st->ignore,
373                    st->delete,
374                    st->delete_list,
375                    st->insert,
376                    st->insert_failed,
377                    st->drop,
378                    st->early_drop,
379                    st->error,
380
381                    st->expect_new,
382                    st->expect_create,
383                    st->expect_delete
384                 );
385         return 0;
386 }
387
388 static const struct seq_operations ct_cpu_seq_ops = {
389         .start  = ct_cpu_seq_start,
390         .next   = ct_cpu_seq_next,
391         .stop   = ct_cpu_seq_stop,
392         .show   = ct_cpu_seq_show,
393 };
394
395 static int ct_cpu_seq_open(struct inode *inode, struct file *file)
396 {
397         return seq_open(file, &ct_cpu_seq_ops);
398 }
399
400 static const struct file_operations ct_cpu_seq_fops = {
401         .owner   = THIS_MODULE,
402         .open    = ct_cpu_seq_open,
403         .read    = seq_read,
404         .llseek  = seq_lseek,
405         .release = seq_release_private,
406 };
407
408 int __init nf_conntrack_ipv4_compat_init(void)
409 {
410         struct proc_dir_entry *proc, *proc_exp, *proc_stat;
411
412         proc = proc_net_fops_create(&init_net, "ip_conntrack", 0440, &ct_file_ops);
413         if (!proc)
414                 goto err1;
415
416         proc_exp = proc_net_fops_create(&init_net, "ip_conntrack_expect", 0440,
417                                         &ip_exp_file_ops);
418         if (!proc_exp)
419                 goto err2;
420
421         proc_stat = create_proc_entry("ip_conntrack", S_IRUGO, init_net.proc_net_stat);
422         if (!proc_stat)
423                 goto err3;
424
425         proc_stat->proc_fops = &ct_cpu_seq_fops;
426         proc_stat->owner = THIS_MODULE;
427
428         return 0;
429
430 err3:
431         proc_net_remove(&init_net, "ip_conntrack_expect");
432 err2:
433         proc_net_remove(&init_net, "ip_conntrack");
434 err1:
435         return -ENOMEM;
436 }
437
438 void __exit nf_conntrack_ipv4_compat_fini(void)
439 {
440         remove_proc_entry("ip_conntrack", init_net.proc_net_stat);
441         proc_net_remove(&init_net, "ip_conntrack_expect");
442         proc_net_remove(&init_net, "ip_conntrack");
443 }