5586f843ed45f302a7dcfc3383f682b058b3c1b0
[safe/jmp/linux-2.6] / net / netfilter / nf_queue.c
1 #include <linux/config.h>
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <net/protocol.h>
9
10 #include "nf_internals.h"
11
12 /* 
13  * A queue handler may be registered for each protocol.  Each is protected by
14  * long term mutex.  The handler must provide an an outfn() to accept packets
15  * for queueing and must reinject all packets it receives, no matter what.
16  */
17 static struct nf_queue_handler_t {
18         nf_queue_outfn_t outfn;
19         void *data;
20 } queue_handler[NPROTO];
21
22 static struct nf_queue_rerouter *queue_rerouter;
23
24 static DEFINE_RWLOCK(queue_handler_lock);
25
26
27 int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data)
28 {      
29         int ret;
30
31         if (pf >= NPROTO)
32                 return -EINVAL;
33
34         write_lock_bh(&queue_handler_lock);
35         if (queue_handler[pf].outfn)
36                 ret = -EBUSY;
37         else {
38                 queue_handler[pf].outfn = outfn;
39                 queue_handler[pf].data = data;
40                 ret = 0;
41         }
42         write_unlock_bh(&queue_handler_lock);
43
44         return ret;
45 }
46 EXPORT_SYMBOL(nf_register_queue_handler);
47
48 /* The caller must flush their queue before this */
49 int nf_unregister_queue_handler(int pf)
50 {
51         if (pf >= NPROTO)
52                 return -EINVAL;
53
54         write_lock_bh(&queue_handler_lock);
55         queue_handler[pf].outfn = NULL;
56         queue_handler[pf].data = NULL;
57         write_unlock_bh(&queue_handler_lock);
58         
59         return 0;
60 }
61 EXPORT_SYMBOL(nf_unregister_queue_handler);
62
63 int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
64 {
65         if (pf >= NPROTO)
66                 return -EINVAL;
67
68         write_lock_bh(&queue_handler_lock);
69         memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf]));
70         write_unlock_bh(&queue_handler_lock);
71
72         return 0;
73 }
74 EXPORT_SYMBOL_GPL(nf_register_queue_rerouter);
75
76 int nf_unregister_queue_rerouter(int pf)
77 {
78         if (pf >= NPROTO)
79                 return -EINVAL;
80
81         write_lock_bh(&queue_handler_lock);
82         memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf]));
83         write_unlock_bh(&queue_handler_lock);
84         return 0;
85 }
86 EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
87
88 void nf_unregister_queue_handlers(nf_queue_outfn_t outfn)
89 {
90         int pf;
91
92         write_lock_bh(&queue_handler_lock);
93         for (pf = 0; pf < NPROTO; pf++)  {
94                 if (queue_handler[pf].outfn == outfn) {
95                         queue_handler[pf].outfn = NULL;
96                         queue_handler[pf].data = NULL;
97                 }
98         }
99         write_unlock_bh(&queue_handler_lock);
100 }
101 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
102
103 /* 
104  * Any packet that leaves via this function must come back 
105  * through nf_reinject().
106  */
107 int nf_queue(struct sk_buff **skb, 
108              struct list_head *elem, 
109              int pf, unsigned int hook,
110              struct net_device *indev,
111              struct net_device *outdev,
112              int (*okfn)(struct sk_buff *),
113              unsigned int queuenum)
114 {
115         int status;
116         struct nf_info *info;
117 #ifdef CONFIG_BRIDGE_NETFILTER
118         struct net_device *physindev = NULL;
119         struct net_device *physoutdev = NULL;
120 #endif
121
122         /* QUEUE == DROP if noone is waiting, to be safe. */
123         read_lock(&queue_handler_lock);
124         if (!queue_handler[pf].outfn) {
125                 read_unlock(&queue_handler_lock);
126                 kfree_skb(*skb);
127                 return 1;
128         }
129
130         info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC);
131         if (!info) {
132                 if (net_ratelimit())
133                         printk(KERN_ERR "OOM queueing packet %p\n",
134                                *skb);
135                 read_unlock(&queue_handler_lock);
136                 kfree_skb(*skb);
137                 return 1;
138         }
139
140         *info = (struct nf_info) { 
141                 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
142
143         /* If it's going away, ignore hook. */
144         if (!try_module_get(info->elem->owner)) {
145                 read_unlock(&queue_handler_lock);
146                 kfree(info);
147                 return 0;
148         }
149
150         /* Bump dev refs so they don't vanish while packet is out */
151         if (indev) dev_hold(indev);
152         if (outdev) dev_hold(outdev);
153
154 #ifdef CONFIG_BRIDGE_NETFILTER
155         if ((*skb)->nf_bridge) {
156                 physindev = (*skb)->nf_bridge->physindev;
157                 if (physindev) dev_hold(physindev);
158                 physoutdev = (*skb)->nf_bridge->physoutdev;
159                 if (physoutdev) dev_hold(physoutdev);
160         }
161 #endif
162         if (queue_rerouter[pf].save)
163                 queue_rerouter[pf].save(*skb, info);
164
165         status = queue_handler[pf].outfn(*skb, info, queuenum,
166                                          queue_handler[pf].data);
167
168         if (status >= 0 && queue_rerouter[pf].reroute)
169                 status = queue_rerouter[pf].reroute(skb, info);
170
171         read_unlock(&queue_handler_lock);
172
173         if (status < 0) {
174                 /* James M doesn't say fuck enough. */
175                 if (indev) dev_put(indev);
176                 if (outdev) dev_put(outdev);
177 #ifdef CONFIG_BRIDGE_NETFILTER
178                 if (physindev) dev_put(physindev);
179                 if (physoutdev) dev_put(physoutdev);
180 #endif
181                 module_put(info->elem->owner);
182                 kfree(info);
183                 kfree_skb(*skb);
184
185                 return 1;
186         }
187
188         return 1;
189 }
190
191 void nf_reinject(struct sk_buff *skb, struct nf_info *info,
192                  unsigned int verdict)
193 {
194         struct list_head *elem = &info->elem->list;
195         struct list_head *i;
196
197         rcu_read_lock();
198
199         /* Release those devices we held, or Alexey will kill me. */
200         if (info->indev) dev_put(info->indev);
201         if (info->outdev) dev_put(info->outdev);
202 #ifdef CONFIG_BRIDGE_NETFILTER
203         if (skb->nf_bridge) {
204                 if (skb->nf_bridge->physindev)
205                         dev_put(skb->nf_bridge->physindev);
206                 if (skb->nf_bridge->physoutdev)
207                         dev_put(skb->nf_bridge->physoutdev);
208         }
209 #endif
210
211         /* Drop reference to owner of hook which queued us. */
212         module_put(info->elem->owner);
213
214         list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
215                 if (i == elem) 
216                         break;
217         }
218   
219         if (elem == &nf_hooks[info->pf][info->hook]) {
220                 /* The module which sent it to userspace is gone. */
221                 NFDEBUG("%s: module disappeared, dropping packet.\n",
222                         __FUNCTION__);
223                 verdict = NF_DROP;
224         }
225
226         /* Continue traversal iff userspace said ok... */
227         if (verdict == NF_REPEAT) {
228                 elem = elem->prev;
229                 verdict = NF_ACCEPT;
230         }
231
232         if (verdict == NF_ACCEPT) {
233         next_hook:
234                 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
235                                      &skb, info->hook, 
236                                      info->indev, info->outdev, &elem,
237                                      info->okfn, INT_MIN);
238         }
239
240         switch (verdict & NF_VERDICT_MASK) {
241         case NF_ACCEPT:
242                 info->okfn(skb);
243                 break;
244
245         case NF_QUEUE:
246                 if (!nf_queue(&skb, elem, info->pf, info->hook, 
247                               info->indev, info->outdev, info->okfn,
248                               verdict >> NF_VERDICT_BITS))
249                         goto next_hook;
250                 break;
251         }
252         rcu_read_unlock();
253
254         if (verdict == NF_DROP)
255                 kfree_skb(skb);
256
257         kfree(info);
258         return;
259 }
260 EXPORT_SYMBOL(nf_reinject);
261
262 int __init netfilter_queue_init(void)
263 {
264         queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter),
265                                  GFP_KERNEL);
266         if (!queue_rerouter)
267                 return -ENOMEM;
268
269         memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter));
270
271         return 0;
272 }
273