1 #include <linux/config.h>
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <net/protocol.h>
10 #include "nf_internals.h"
13 * A queue handler may be registered for each protocol. Each is protected by
14 * long term mutex. The handler must provide an an outfn() to accept packets
15 * for queueing and must reinject all packets it receives, no matter what.
17 static struct nf_queue_handler_t {
18 nf_queue_outfn_t outfn;
20 } queue_handler[NPROTO];
22 static struct nf_queue_rerouter *queue_rerouter;
24 static DEFINE_RWLOCK(queue_handler_lock);
27 int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data)
34 write_lock_bh(&queue_handler_lock);
35 if (queue_handler[pf].outfn)
38 queue_handler[pf].outfn = outfn;
39 queue_handler[pf].data = data;
42 write_unlock_bh(&queue_handler_lock);
46 EXPORT_SYMBOL(nf_register_queue_handler);
48 /* The caller must flush their queue before this */
49 int nf_unregister_queue_handler(int pf)
54 write_lock_bh(&queue_handler_lock);
55 queue_handler[pf].outfn = NULL;
56 queue_handler[pf].data = NULL;
57 write_unlock_bh(&queue_handler_lock);
61 EXPORT_SYMBOL(nf_unregister_queue_handler);
63 int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
68 write_lock_bh(&queue_handler_lock);
69 memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf]));
70 write_unlock_bh(&queue_handler_lock);
74 EXPORT_SYMBOL_GPL(nf_register_queue_rerouter);
76 int nf_unregister_queue_rerouter(int pf)
81 write_lock_bh(&queue_handler_lock);
82 memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf]));
83 write_unlock_bh(&queue_handler_lock);
86 EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
88 void nf_unregister_queue_handlers(nf_queue_outfn_t outfn)
92 write_lock_bh(&queue_handler_lock);
93 for (pf = 0; pf < NPROTO; pf++) {
94 if (queue_handler[pf].outfn == outfn) {
95 queue_handler[pf].outfn = NULL;
96 queue_handler[pf].data = NULL;
99 write_unlock_bh(&queue_handler_lock);
101 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
104 * Any packet that leaves via this function must come back
105 * through nf_reinject().
107 int nf_queue(struct sk_buff **skb,
108 struct list_head *elem,
109 int pf, unsigned int hook,
110 struct net_device *indev,
111 struct net_device *outdev,
112 int (*okfn)(struct sk_buff *),
113 unsigned int queuenum)
116 struct nf_info *info;
117 #ifdef CONFIG_BRIDGE_NETFILTER
118 struct net_device *physindev = NULL;
119 struct net_device *physoutdev = NULL;
122 /* QUEUE == DROP if noone is waiting, to be safe. */
123 read_lock(&queue_handler_lock);
124 if (!queue_handler[pf].outfn) {
125 read_unlock(&queue_handler_lock);
130 info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC);
133 printk(KERN_ERR "OOM queueing packet %p\n",
135 read_unlock(&queue_handler_lock);
140 *info = (struct nf_info) {
141 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
143 /* If it's going away, ignore hook. */
144 if (!try_module_get(info->elem->owner)) {
145 read_unlock(&queue_handler_lock);
150 /* Bump dev refs so they don't vanish while packet is out */
151 if (indev) dev_hold(indev);
152 if (outdev) dev_hold(outdev);
154 #ifdef CONFIG_BRIDGE_NETFILTER
155 if ((*skb)->nf_bridge) {
156 physindev = (*skb)->nf_bridge->physindev;
157 if (physindev) dev_hold(physindev);
158 physoutdev = (*skb)->nf_bridge->physoutdev;
159 if (physoutdev) dev_hold(physoutdev);
162 if (queue_rerouter[pf].save)
163 queue_rerouter[pf].save(*skb, info);
165 status = queue_handler[pf].outfn(*skb, info, queuenum,
166 queue_handler[pf].data);
168 if (status >= 0 && queue_rerouter[pf].reroute)
169 status = queue_rerouter[pf].reroute(skb, info);
171 read_unlock(&queue_handler_lock);
174 /* James M doesn't say fuck enough. */
175 if (indev) dev_put(indev);
176 if (outdev) dev_put(outdev);
177 #ifdef CONFIG_BRIDGE_NETFILTER
178 if (physindev) dev_put(physindev);
179 if (physoutdev) dev_put(physoutdev);
181 module_put(info->elem->owner);
191 void nf_reinject(struct sk_buff *skb, struct nf_info *info,
192 unsigned int verdict)
194 struct list_head *elem = &info->elem->list;
199 /* Release those devices we held, or Alexey will kill me. */
200 if (info->indev) dev_put(info->indev);
201 if (info->outdev) dev_put(info->outdev);
202 #ifdef CONFIG_BRIDGE_NETFILTER
203 if (skb->nf_bridge) {
204 if (skb->nf_bridge->physindev)
205 dev_put(skb->nf_bridge->physindev);
206 if (skb->nf_bridge->physoutdev)
207 dev_put(skb->nf_bridge->physoutdev);
211 /* Drop reference to owner of hook which queued us. */
212 module_put(info->elem->owner);
214 list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
219 if (elem == &nf_hooks[info->pf][info->hook]) {
220 /* The module which sent it to userspace is gone. */
221 NFDEBUG("%s: module disappeared, dropping packet.\n",
226 /* Continue traversal iff userspace said ok... */
227 if (verdict == NF_REPEAT) {
232 if (verdict == NF_ACCEPT) {
234 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
236 info->indev, info->outdev, &elem,
237 info->okfn, INT_MIN);
240 switch (verdict & NF_VERDICT_MASK) {
246 if (!nf_queue(&skb, elem, info->pf, info->hook,
247 info->indev, info->outdev, info->okfn,
248 verdict >> NF_VERDICT_BITS))
254 if (verdict == NF_DROP)
260 EXPORT_SYMBOL(nf_reinject);
262 int __init netfilter_queue_init(void)
264 queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter),
269 memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter));