[NETFILTER]: Replate direct proc_fops assignment with proc_create call.
[safe/jmp/linux-2.6] / net / ipv4 / netfilter / nf_conntrack_l3proto_ipv4_compat.c
1 /* ip_conntrack proc compat - based on ip_conntrack_standalone.c
2  *
3  * (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/types.h>
11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/percpu.h>
14 #include <net/net_namespace.h>
15
16 #include <linux/netfilter.h>
17 #include <net/netfilter/nf_conntrack_core.h>
18 #include <net/netfilter/nf_conntrack_l3proto.h>
19 #include <net/netfilter/nf_conntrack_l4proto.h>
20 #include <net/netfilter/nf_conntrack_expect.h>
21
22 #ifdef CONFIG_NF_CT_ACCT
23 static unsigned int
24 seq_print_counters(struct seq_file *s,
25                    const struct ip_conntrack_counter *counter)
26 {
27         return seq_printf(s, "packets=%llu bytes=%llu ",
28                           (unsigned long long)counter->packets,
29                           (unsigned long long)counter->bytes);
30 }
31 #else
32 #define seq_print_counters(x, y)        0
33 #endif
34
35 struct ct_iter_state {
36         unsigned int bucket;
37 };
38
39 static struct hlist_node *ct_get_first(struct seq_file *seq)
40 {
41         struct ct_iter_state *st = seq->private;
42         struct hlist_node *n;
43
44         for (st->bucket = 0;
45              st->bucket < nf_conntrack_htable_size;
46              st->bucket++) {
47                 n = rcu_dereference(nf_conntrack_hash[st->bucket].first);
48                 if (n)
49                         return n;
50         }
51         return NULL;
52 }
53
54 static struct hlist_node *ct_get_next(struct seq_file *seq,
55                                       struct hlist_node *head)
56 {
57         struct ct_iter_state *st = seq->private;
58
59         head = rcu_dereference(head->next);
60         while (head == NULL) {
61                 if (++st->bucket >= nf_conntrack_htable_size)
62                         return NULL;
63                 head = rcu_dereference(nf_conntrack_hash[st->bucket].first);
64         }
65         return head;
66 }
67
68 static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
69 {
70         struct hlist_node *head = ct_get_first(seq);
71
72         if (head)
73                 while (pos && (head = ct_get_next(seq, head)))
74                         pos--;
75         return pos ? NULL : head;
76 }
77
78 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
79         __acquires(RCU)
80 {
81         rcu_read_lock();
82         return ct_get_idx(seq, *pos);
83 }
84
85 static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
86 {
87         (*pos)++;
88         return ct_get_next(s, v);
89 }
90
91 static void ct_seq_stop(struct seq_file *s, void *v)
92         __releases(RCU)
93 {
94         rcu_read_unlock();
95 }
96
97 static int ct_seq_show(struct seq_file *s, void *v)
98 {
99         const struct nf_conntrack_tuple_hash *hash = v;
100         const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
101         const struct nf_conntrack_l3proto *l3proto;
102         const struct nf_conntrack_l4proto *l4proto;
103
104         NF_CT_ASSERT(ct);
105
106         /* we only want to print DIR_ORIGINAL */
107         if (NF_CT_DIRECTION(hash))
108                 return 0;
109         if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num != AF_INET)
110                 return 0;
111
112         l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
113                                        .tuple.src.l3num);
114         NF_CT_ASSERT(l3proto);
115         l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
116                                        .tuple.src.l3num,
117                                        ct->tuplehash[IP_CT_DIR_ORIGINAL]
118                                        .tuple.dst.protonum);
119         NF_CT_ASSERT(l4proto);
120
121         if (seq_printf(s, "%-8s %u %ld ",
122                       l4proto->name,
123                       ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
124                       timer_pending(&ct->timeout)
125                       ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
126                 return -ENOSPC;
127
128         if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
129                 return -ENOSPC;
130
131         if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
132                         l3proto, l4proto))
133                 return -ENOSPC;
134
135         if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL]))
136                 return -ENOSPC;
137
138         if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
139                 if (seq_printf(s, "[UNREPLIED] "))
140                         return -ENOSPC;
141
142         if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
143                         l3proto, l4proto))
144                 return -ENOSPC;
145
146         if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY]))
147                 return -ENOSPC;
148
149         if (test_bit(IPS_ASSURED_BIT, &ct->status))
150                 if (seq_printf(s, "[ASSURED] "))
151                         return -ENOSPC;
152
153 #ifdef CONFIG_NF_CONNTRACK_MARK
154         if (seq_printf(s, "mark=%u ", ct->mark))
155                 return -ENOSPC;
156 #endif
157
158 #ifdef CONFIG_NF_CONNTRACK_SECMARK
159         if (seq_printf(s, "secmark=%u ", ct->secmark))
160                 return -ENOSPC;
161 #endif
162
163         if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
164                 return -ENOSPC;
165
166         return 0;
167 }
168
169 static const struct seq_operations ct_seq_ops = {
170         .start = ct_seq_start,
171         .next  = ct_seq_next,
172         .stop  = ct_seq_stop,
173         .show  = ct_seq_show
174 };
175
176 static int ct_open(struct inode *inode, struct file *file)
177 {
178         return seq_open_private(file, &ct_seq_ops,
179                         sizeof(struct ct_iter_state));
180 }
181
182 static const struct file_operations ct_file_ops = {
183         .owner   = THIS_MODULE,
184         .open    = ct_open,
185         .read    = seq_read,
186         .llseek  = seq_lseek,
187         .release = seq_release_private,
188 };
189
190 /* expects */
191 struct ct_expect_iter_state {
192         unsigned int bucket;
193 };
194
195 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
196 {
197         struct ct_expect_iter_state *st = seq->private;
198         struct hlist_node *n;
199
200         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
201                 n = rcu_dereference(nf_ct_expect_hash[st->bucket].first);
202                 if (n)
203                         return n;
204         }
205         return NULL;
206 }
207
208 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
209                                              struct hlist_node *head)
210 {
211         struct ct_expect_iter_state *st = seq->private;
212
213         head = rcu_dereference(head->next);
214         while (head == NULL) {
215                 if (++st->bucket >= nf_ct_expect_hsize)
216                         return NULL;
217                 head = rcu_dereference(nf_ct_expect_hash[st->bucket].first);
218         }
219         return head;
220 }
221
222 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
223 {
224         struct hlist_node *head = ct_expect_get_first(seq);
225
226         if (head)
227                 while (pos && (head = ct_expect_get_next(seq, head)))
228                         pos--;
229         return pos ? NULL : head;
230 }
231
232 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
233         __acquires(RCU)
234 {
235         rcu_read_lock();
236         return ct_expect_get_idx(seq, *pos);
237 }
238
239 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
240 {
241         (*pos)++;
242         return ct_expect_get_next(seq, v);
243 }
244
245 static void exp_seq_stop(struct seq_file *seq, void *v)
246         __releases(RCU)
247 {
248         rcu_read_unlock();
249 }
250
251 static int exp_seq_show(struct seq_file *s, void *v)
252 {
253         struct nf_conntrack_expect *exp;
254         const struct hlist_node *n = v;
255
256         exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
257
258         if (exp->tuple.src.l3num != AF_INET)
259                 return 0;
260
261         if (exp->timeout.function)
262                 seq_printf(s, "%ld ", timer_pending(&exp->timeout)
263                            ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
264         else
265                 seq_printf(s, "- ");
266
267         seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
268
269         print_tuple(s, &exp->tuple,
270                     __nf_ct_l3proto_find(exp->tuple.src.l3num),
271                     __nf_ct_l4proto_find(exp->tuple.src.l3num,
272                                          exp->tuple.dst.protonum));
273         return seq_putc(s, '\n');
274 }
275
276 static const struct seq_operations exp_seq_ops = {
277         .start = exp_seq_start,
278         .next = exp_seq_next,
279         .stop = exp_seq_stop,
280         .show = exp_seq_show
281 };
282
283 static int exp_open(struct inode *inode, struct file *file)
284 {
285         return seq_open_private(file, &exp_seq_ops,
286                         sizeof(struct ct_expect_iter_state));
287 }
288
289 static const struct file_operations ip_exp_file_ops = {
290         .owner   = THIS_MODULE,
291         .open    = exp_open,
292         .read    = seq_read,
293         .llseek  = seq_lseek,
294         .release = seq_release_private,
295 };
296
297 static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
298 {
299         int cpu;
300
301         if (*pos == 0)
302                 return SEQ_START_TOKEN;
303
304         for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
305                 if (!cpu_possible(cpu))
306                         continue;
307                 *pos = cpu+1;
308                 return &per_cpu(nf_conntrack_stat, cpu);
309         }
310
311         return NULL;
312 }
313
314 static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
315 {
316         int cpu;
317
318         for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
319                 if (!cpu_possible(cpu))
320                         continue;
321                 *pos = cpu+1;
322                 return &per_cpu(nf_conntrack_stat, cpu);
323         }
324
325         return NULL;
326 }
327
328 static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
329 {
330 }
331
332 static int ct_cpu_seq_show(struct seq_file *seq, void *v)
333 {
334         unsigned int nr_conntracks = atomic_read(&nf_conntrack_count);
335         const struct ip_conntrack_stat *st = v;
336
337         if (v == SEQ_START_TOKEN) {
338                 seq_printf(seq, "entries  searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error  expect_new expect_create expect_delete\n");
339                 return 0;
340         }
341
342         seq_printf(seq, "%08x  %08x %08x %08x %08x %08x %08x %08x "
343                         "%08x %08x %08x %08x %08x  %08x %08x %08x \n",
344                    nr_conntracks,
345                    st->searched,
346                    st->found,
347                    st->new,
348                    st->invalid,
349                    st->ignore,
350                    st->delete,
351                    st->delete_list,
352                    st->insert,
353                    st->insert_failed,
354                    st->drop,
355                    st->early_drop,
356                    st->error,
357
358                    st->expect_new,
359                    st->expect_create,
360                    st->expect_delete
361                 );
362         return 0;
363 }
364
365 static const struct seq_operations ct_cpu_seq_ops = {
366         .start  = ct_cpu_seq_start,
367         .next   = ct_cpu_seq_next,
368         .stop   = ct_cpu_seq_stop,
369         .show   = ct_cpu_seq_show,
370 };
371
372 static int ct_cpu_seq_open(struct inode *inode, struct file *file)
373 {
374         return seq_open(file, &ct_cpu_seq_ops);
375 }
376
377 static const struct file_operations ct_cpu_seq_fops = {
378         .owner   = THIS_MODULE,
379         .open    = ct_cpu_seq_open,
380         .read    = seq_read,
381         .llseek  = seq_lseek,
382         .release = seq_release_private,
383 };
384
385 int __init nf_conntrack_ipv4_compat_init(void)
386 {
387         struct proc_dir_entry *proc, *proc_exp, *proc_stat;
388
389         proc = proc_net_fops_create(&init_net, "ip_conntrack", 0440, &ct_file_ops);
390         if (!proc)
391                 goto err1;
392
393         proc_exp = proc_net_fops_create(&init_net, "ip_conntrack_expect", 0440,
394                                         &ip_exp_file_ops);
395         if (!proc_exp)
396                 goto err2;
397
398         proc_stat = proc_create("ip_conntrack", S_IRUGO,
399                                 init_net.proc_net_stat, &ct_cpu_seq_fops);
400         if (!proc_stat)
401                 goto err3;
402         return 0;
403
404 err3:
405         proc_net_remove(&init_net, "ip_conntrack_expect");
406 err2:
407         proc_net_remove(&init_net, "ip_conntrack");
408 err1:
409         return -ENOMEM;
410 }
411
412 void __exit nf_conntrack_ipv4_compat_fini(void)
413 {
414         remove_proc_entry("ip_conntrack", init_net.proc_net_stat);
415         proc_net_remove(&init_net, "ip_conntrack_expect");
416         proc_net_remove(&init_net, "ip_conntrack");
417 }