[NETFILTER]: add /proc/net/netfilter interface to nf_queue
[safe/jmp/linux-2.6] / net / netfilter / nf_queue.c
1 #include <linux/config.h>
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <linux/seq_file.h>
9 #include <net/protocol.h>
10
11 #include "nf_internals.h"
12
13 /* 
14  * A queue handler may be registered for each protocol.  Each is protected by
15  * long term mutex.  The handler must provide an an outfn() to accept packets
16  * for queueing and must reinject all packets it receives, no matter what.
17  */
18 static struct nf_queue_handler *queue_handler[NPROTO];
19 static struct nf_queue_rerouter *queue_rerouter;
20
21 static DEFINE_RWLOCK(queue_handler_lock);
22
23 int nf_register_queue_handler(int pf, struct nf_queue_handler *qh)
24 {      
25         int ret;
26
27         if (pf >= NPROTO)
28                 return -EINVAL;
29
30         write_lock_bh(&queue_handler_lock);
31         if (queue_handler[pf])
32                 ret = -EBUSY;
33         else {
34                 queue_handler[pf] = qh;
35                 ret = 0;
36         }
37         write_unlock_bh(&queue_handler_lock);
38
39         return ret;
40 }
41 EXPORT_SYMBOL(nf_register_queue_handler);
42
43 /* The caller must flush their queue before this */
44 int nf_unregister_queue_handler(int pf)
45 {
46         if (pf >= NPROTO)
47                 return -EINVAL;
48
49         write_lock_bh(&queue_handler_lock);
50         queue_handler[pf] = NULL;
51         write_unlock_bh(&queue_handler_lock);
52         
53         return 0;
54 }
55 EXPORT_SYMBOL(nf_unregister_queue_handler);
56
57 int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
58 {
59         if (pf >= NPROTO)
60                 return -EINVAL;
61
62         write_lock_bh(&queue_handler_lock);
63         memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf]));
64         write_unlock_bh(&queue_handler_lock);
65
66         return 0;
67 }
68 EXPORT_SYMBOL_GPL(nf_register_queue_rerouter);
69
70 int nf_unregister_queue_rerouter(int pf)
71 {
72         if (pf >= NPROTO)
73                 return -EINVAL;
74
75         write_lock_bh(&queue_handler_lock);
76         memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf]));
77         write_unlock_bh(&queue_handler_lock);
78         return 0;
79 }
80 EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
81
82 void nf_unregister_queue_handlers(struct nf_queue_handler *qh)
83 {
84         int pf;
85
86         write_lock_bh(&queue_handler_lock);
87         for (pf = 0; pf < NPROTO; pf++)  {
88                 if (queue_handler[pf] == qh)
89                         queue_handler[pf] = NULL;
90         }
91         write_unlock_bh(&queue_handler_lock);
92 }
93 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
94
95 /* 
96  * Any packet that leaves via this function must come back 
97  * through nf_reinject().
98  */
99 int nf_queue(struct sk_buff **skb, 
100              struct list_head *elem, 
101              int pf, unsigned int hook,
102              struct net_device *indev,
103              struct net_device *outdev,
104              int (*okfn)(struct sk_buff *),
105              unsigned int queuenum)
106 {
107         int status;
108         struct nf_info *info;
109 #ifdef CONFIG_BRIDGE_NETFILTER
110         struct net_device *physindev = NULL;
111         struct net_device *physoutdev = NULL;
112 #endif
113
114         /* QUEUE == DROP if noone is waiting, to be safe. */
115         read_lock(&queue_handler_lock);
116         if (!queue_handler[pf]->outfn) {
117                 read_unlock(&queue_handler_lock);
118                 kfree_skb(*skb);
119                 return 1;
120         }
121
122         info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC);
123         if (!info) {
124                 if (net_ratelimit())
125                         printk(KERN_ERR "OOM queueing packet %p\n",
126                                *skb);
127                 read_unlock(&queue_handler_lock);
128                 kfree_skb(*skb);
129                 return 1;
130         }
131
132         *info = (struct nf_info) { 
133                 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
134
135         /* If it's going away, ignore hook. */
136         if (!try_module_get(info->elem->owner)) {
137                 read_unlock(&queue_handler_lock);
138                 kfree(info);
139                 return 0;
140         }
141
142         /* Bump dev refs so they don't vanish while packet is out */
143         if (indev) dev_hold(indev);
144         if (outdev) dev_hold(outdev);
145
146 #ifdef CONFIG_BRIDGE_NETFILTER
147         if ((*skb)->nf_bridge) {
148                 physindev = (*skb)->nf_bridge->physindev;
149                 if (physindev) dev_hold(physindev);
150                 physoutdev = (*skb)->nf_bridge->physoutdev;
151                 if (physoutdev) dev_hold(physoutdev);
152         }
153 #endif
154         if (queue_rerouter[pf].save)
155                 queue_rerouter[pf].save(*skb, info);
156
157         status = queue_handler[pf]->outfn(*skb, info, queuenum,
158                                           queue_handler[pf]->data);
159
160         if (status >= 0 && queue_rerouter[pf].reroute)
161                 status = queue_rerouter[pf].reroute(skb, info);
162
163         read_unlock(&queue_handler_lock);
164
165         if (status < 0) {
166                 /* James M doesn't say fuck enough. */
167                 if (indev) dev_put(indev);
168                 if (outdev) dev_put(outdev);
169 #ifdef CONFIG_BRIDGE_NETFILTER
170                 if (physindev) dev_put(physindev);
171                 if (physoutdev) dev_put(physoutdev);
172 #endif
173                 module_put(info->elem->owner);
174                 kfree(info);
175                 kfree_skb(*skb);
176
177                 return 1;
178         }
179
180         return 1;
181 }
182
183 void nf_reinject(struct sk_buff *skb, struct nf_info *info,
184                  unsigned int verdict)
185 {
186         struct list_head *elem = &info->elem->list;
187         struct list_head *i;
188
189         rcu_read_lock();
190
191         /* Release those devices we held, or Alexey will kill me. */
192         if (info->indev) dev_put(info->indev);
193         if (info->outdev) dev_put(info->outdev);
194 #ifdef CONFIG_BRIDGE_NETFILTER
195         if (skb->nf_bridge) {
196                 if (skb->nf_bridge->physindev)
197                         dev_put(skb->nf_bridge->physindev);
198                 if (skb->nf_bridge->physoutdev)
199                         dev_put(skb->nf_bridge->physoutdev);
200         }
201 #endif
202
203         /* Drop reference to owner of hook which queued us. */
204         module_put(info->elem->owner);
205
206         list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
207                 if (i == elem) 
208                         break;
209         }
210   
211         if (elem == &nf_hooks[info->pf][info->hook]) {
212                 /* The module which sent it to userspace is gone. */
213                 NFDEBUG("%s: module disappeared, dropping packet.\n",
214                         __FUNCTION__);
215                 verdict = NF_DROP;
216         }
217
218         /* Continue traversal iff userspace said ok... */
219         if (verdict == NF_REPEAT) {
220                 elem = elem->prev;
221                 verdict = NF_ACCEPT;
222         }
223
224         if (verdict == NF_ACCEPT) {
225         next_hook:
226                 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
227                                      &skb, info->hook, 
228                                      info->indev, info->outdev, &elem,
229                                      info->okfn, INT_MIN);
230         }
231
232         switch (verdict & NF_VERDICT_MASK) {
233         case NF_ACCEPT:
234                 info->okfn(skb);
235                 break;
236
237         case NF_QUEUE:
238                 if (!nf_queue(&skb, elem, info->pf, info->hook, 
239                               info->indev, info->outdev, info->okfn,
240                               verdict >> NF_VERDICT_BITS))
241                         goto next_hook;
242                 break;
243         }
244         rcu_read_unlock();
245
246         if (verdict == NF_DROP)
247                 kfree_skb(skb);
248
249         kfree(info);
250         return;
251 }
252 EXPORT_SYMBOL(nf_reinject);
253
254 #ifdef CONFIG_PROC_FS
255 static void *seq_start(struct seq_file *seq, loff_t *pos)
256 {
257         if (*pos >= NPROTO)
258                 return NULL;
259
260         return pos;
261 }
262
263 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
264 {
265         (*pos)++;
266
267         if (*pos >= NPROTO)
268                 return NULL;
269
270         return pos;
271 }
272
273 static void seq_stop(struct seq_file *s, void *v)
274 {
275
276 }
277
278 static int seq_show(struct seq_file *s, void *v)
279 {
280         int ret;
281         loff_t *pos = v;
282         struct nf_queue_handler *qh;
283
284         read_lock_bh(&queue_handler_lock);
285         qh = queue_handler[*pos];
286         if (!qh)
287                 ret = seq_printf(s, "%2lld NONE\n", *pos);
288         else
289                 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
290         read_unlock_bh(&queue_handler_lock);
291
292         return ret;
293 }
294
295 static struct seq_operations nfqueue_seq_ops = {
296         .start  = seq_start,
297         .next   = seq_next,
298         .stop   = seq_stop,
299         .show   = seq_show,
300 };
301
302 static int nfqueue_open(struct inode *inode, struct file *file)
303 {
304         return seq_open(file, &nfqueue_seq_ops);
305 }
306
307 static struct file_operations nfqueue_file_ops = {
308         .owner   = THIS_MODULE,
309         .open    = nfqueue_open,
310         .read    = seq_read,
311         .llseek  = seq_lseek,
312         .release = seq_release,
313 };
314 #endif /* PROC_FS */
315
316
317 int __init netfilter_queue_init(void)
318 {
319 #ifdef CONFIG_PROC_FS
320         struct proc_dir_entry *pde;
321 #endif
322         queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter),
323                                  GFP_KERNEL);
324         if (!queue_rerouter)
325                 return -ENOMEM;
326
327 #ifdef CONFIG_PROC_FS
328         pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter);
329         if (!pde) {
330                 kfree(queue_rerouter);
331                 return -1;
332         }
333         pde->proc_fops = &nfqueue_file_ops;
334 #endif
335         memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter));
336
337         return 0;
338 }
339