X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=net%2Fnetfilter%2Fnf_queue.c;h=4f2310c93e015fb4350d4d641faf0b204010951a;hb=7e9c6eeb136a46dfd941852803b3a9dd78939b69;hp=c61f7237237f98cae58e66101aca9fedad61acbc;hpb=7a11b9848ae27e571f219fab5541bd84700f0d68;p=safe%2Fjmp%2Flinux-2.6 diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index c61f723..4f2310c 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -1,4 +1,3 @@ -#include #include #include #include @@ -8,218 +7,237 @@ #include #include #include +#include #include "nf_internals.h" -/* +/* * A queue handler may be registered for each protocol. Each is protected by * long term mutex. The handler must provide an an outfn() to accept packets * for queueing and must reinject all packets it receives, no matter what. */ -static struct nf_queue_handler *queue_handler[NPROTO]; -static struct nf_queue_rerouter *queue_rerouter[NPROTO]; +static const struct nf_queue_handler *queue_handler[NFPROTO_NUMPROTO] __read_mostly; -static DEFINE_RWLOCK(queue_handler_lock); +static DEFINE_MUTEX(queue_handler_mutex); /* return EBUSY when somebody else is registered, return EEXIST if the * same handler is registered, return 0 in case of success. */ -int nf_register_queue_handler(int pf, struct nf_queue_handler *qh) -{ +int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) +{ int ret; - if (pf >= NPROTO) + if (pf >= ARRAY_SIZE(queue_handler)) return -EINVAL; - write_lock_bh(&queue_handler_lock); + mutex_lock(&queue_handler_mutex); if (queue_handler[pf] == qh) ret = -EEXIST; else if (queue_handler[pf]) ret = -EBUSY; else { - queue_handler[pf] = qh; + rcu_assign_pointer(queue_handler[pf], qh); ret = 0; } - write_unlock_bh(&queue_handler_lock); + mutex_unlock(&queue_handler_mutex); return ret; } EXPORT_SYMBOL(nf_register_queue_handler); /* The caller must flush their queue before this */ -int nf_unregister_queue_handler(int pf) +int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) { - if (pf >= NPROTO) + if (pf >= ARRAY_SIZE(queue_handler)) return -EINVAL; - write_lock_bh(&queue_handler_lock); - queue_handler[pf] = NULL; - write_unlock_bh(&queue_handler_lock); - - return 0; -} -EXPORT_SYMBOL(nf_unregister_queue_handler); - -int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer) -{ - if (pf >= NPROTO) + mutex_lock(&queue_handler_mutex); + if (queue_handler[pf] && queue_handler[pf] != qh) { + mutex_unlock(&queue_handler_mutex); return -EINVAL; + } - write_lock_bh(&queue_handler_lock); - rcu_assign_pointer(queue_rerouter[pf], rer); - write_unlock_bh(&queue_handler_lock); + rcu_assign_pointer(queue_handler[pf], NULL); + mutex_unlock(&queue_handler_mutex); + + synchronize_rcu(); return 0; } -EXPORT_SYMBOL_GPL(nf_register_queue_rerouter); +EXPORT_SYMBOL(nf_unregister_queue_handler); -int nf_unregister_queue_rerouter(int pf) +void nf_unregister_queue_handlers(const struct nf_queue_handler *qh) { - if (pf >= NPROTO) - return -EINVAL; + u_int8_t pf; + + mutex_lock(&queue_handler_mutex); + for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) { + if (queue_handler[pf] == qh) + rcu_assign_pointer(queue_handler[pf], NULL); + } + mutex_unlock(&queue_handler_mutex); - write_lock_bh(&queue_handler_lock); - rcu_assign_pointer(queue_rerouter[pf], NULL); - write_unlock_bh(&queue_handler_lock); synchronize_rcu(); - return 0; } -EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter); +EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); -void nf_unregister_queue_handlers(struct nf_queue_handler *qh) +static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) { - int pf; + /* Release those devices we held, or Alexey will kill me. */ + if (entry->indev) + dev_put(entry->indev); + if (entry->outdev) + dev_put(entry->outdev); +#ifdef CONFIG_BRIDGE_NETFILTER + if (entry->skb->nf_bridge) { + struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge; - write_lock_bh(&queue_handler_lock); - for (pf = 0; pf < NPROTO; pf++) { - if (queue_handler[pf] == qh) - queue_handler[pf] = NULL; + if (nf_bridge->physindev) + dev_put(nf_bridge->physindev); + if (nf_bridge->physoutdev) + dev_put(nf_bridge->physoutdev); } - write_unlock_bh(&queue_handler_lock); +#endif + /* Drop reference to owner of hook which queued us. */ + module_put(entry->elem->owner); } -EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); -/* - * Any packet that leaves via this function must come back +/* + * Any packet that leaves via this function must come back * through nf_reinject(). */ -int nf_queue(struct sk_buff **skb, - struct list_head *elem, - int pf, unsigned int hook, - struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct sk_buff *), - unsigned int queuenum) +static int __nf_queue(struct sk_buff *skb, + struct list_head *elem, + u_int8_t pf, unsigned int hook, + struct net_device *indev, + struct net_device *outdev, + int (*okfn)(struct sk_buff *), + unsigned int queuenum) { int status; - struct nf_info *info; + struct nf_queue_entry *entry = NULL; #ifdef CONFIG_BRIDGE_NETFILTER - struct net_device *physindev = NULL; - struct net_device *physoutdev = NULL; + struct net_device *physindev; + struct net_device *physoutdev; #endif - struct nf_queue_rerouter *rerouter; + const struct nf_afinfo *afinfo; + const struct nf_queue_handler *qh; /* QUEUE == DROP if noone is waiting, to be safe. */ - read_lock(&queue_handler_lock); - if (!queue_handler[pf] || !queue_handler[pf]->outfn) { - read_unlock(&queue_handler_lock); - kfree_skb(*skb); - return 1; - } - - info = kmalloc(sizeof(*info)+queue_rerouter[pf]->rer_size, GFP_ATOMIC); - if (!info) { - if (net_ratelimit()) - printk(KERN_ERR "OOM queueing packet %p\n", - *skb); - read_unlock(&queue_handler_lock); - kfree_skb(*skb); - return 1; - } + rcu_read_lock(); - *info = (struct nf_info) { - (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn }; + qh = rcu_dereference(queue_handler[pf]); + if (!qh) + goto err_unlock; + + afinfo = nf_get_afinfo(pf); + if (!afinfo) + goto err_unlock; + + entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); + if (!entry) + goto err_unlock; + + *entry = (struct nf_queue_entry) { + .skb = skb, + .elem = list_entry(elem, struct nf_hook_ops, list), + .pf = pf, + .hook = hook, + .indev = indev, + .outdev = outdev, + .okfn = okfn, + }; /* If it's going away, ignore hook. */ - if (!try_module_get(info->elem->owner)) { - read_unlock(&queue_handler_lock); - kfree(info); + if (!try_module_get(entry->elem->owner)) { + rcu_read_unlock(); + kfree(entry); return 0; } /* Bump dev refs so they don't vanish while packet is out */ - if (indev) dev_hold(indev); - if (outdev) dev_hold(outdev); - + if (indev) + dev_hold(indev); + if (outdev) + dev_hold(outdev); #ifdef CONFIG_BRIDGE_NETFILTER - if ((*skb)->nf_bridge) { - physindev = (*skb)->nf_bridge->physindev; - if (physindev) dev_hold(physindev); - physoutdev = (*skb)->nf_bridge->physoutdev; - if (physoutdev) dev_hold(physoutdev); + if (skb->nf_bridge) { + physindev = skb->nf_bridge->physindev; + if (physindev) + dev_hold(physindev); + physoutdev = skb->nf_bridge->physoutdev; + if (physoutdev) + dev_hold(physoutdev); } #endif - rerouter = rcu_dereference(queue_rerouter[pf]); - if (rerouter) - rerouter->save(*skb, info); - - status = queue_handler[pf]->outfn(*skb, info, queuenum, - queue_handler[pf]->data); + afinfo->saveroute(skb, entry); + status = qh->outfn(entry, queuenum); - read_unlock(&queue_handler_lock); + rcu_read_unlock(); if (status < 0) { - /* James M doesn't say fuck enough. */ - if (indev) dev_put(indev); - if (outdev) dev_put(outdev); -#ifdef CONFIG_BRIDGE_NETFILTER - if (physindev) dev_put(physindev); - if (physoutdev) dev_put(physoutdev); -#endif - module_put(info->elem->owner); - kfree(info); - kfree_skb(*skb); - - return 1; + nf_queue_entry_release_refs(entry); + goto err; } return 1; + +err_unlock: + rcu_read_unlock(); +err: + kfree_skb(skb); + kfree(entry); + return 1; } -void nf_reinject(struct sk_buff *skb, struct nf_info *info, - unsigned int verdict) +int nf_queue(struct sk_buff *skb, + struct list_head *elem, + u_int8_t pf, unsigned int hook, + struct net_device *indev, + struct net_device *outdev, + int (*okfn)(struct sk_buff *), + unsigned int queuenum) { - struct list_head *elem = &info->elem->list; - struct list_head *i; - struct nf_queue_rerouter *rerouter; + struct sk_buff *segs; - rcu_read_lock(); + if (!skb_is_gso(skb)) + return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn, + queuenum); - /* Release those devices we held, or Alexey will kill me. */ - if (info->indev) dev_put(info->indev); - if (info->outdev) dev_put(info->outdev); -#ifdef CONFIG_BRIDGE_NETFILTER - if (skb->nf_bridge) { - if (skb->nf_bridge->physindev) - dev_put(skb->nf_bridge->physindev); - if (skb->nf_bridge->physoutdev) - dev_put(skb->nf_bridge->physoutdev); + switch (pf) { + case AF_INET: + skb->protocol = htons(ETH_P_IP); + break; + case AF_INET6: + skb->protocol = htons(ETH_P_IPV6); + break; } -#endif - /* Drop reference to owner of hook which queued us. */ - module_put(info->elem->owner); - - list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) { - if (i == elem) - break; - } - - if (elem == &nf_hooks[info->pf][info->hook]) { - /* The module which sent it to userspace is gone. */ - NFDEBUG("%s: module disappeared, dropping packet.\n", - __FUNCTION__); - verdict = NF_DROP; - } + segs = skb_gso_segment(skb, 0); + kfree_skb(skb); + if (IS_ERR(segs)) + return 1; + + do { + struct sk_buff *nskb = segs->next; + + segs->next = NULL; + if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn, + queuenum)) + kfree_skb(segs); + segs = nskb; + } while (segs); + return 1; +} + +void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) +{ + struct sk_buff *skb = entry->skb; + struct list_head *elem = &entry->elem->list; + const struct nf_afinfo *afinfo; + + rcu_read_lock(); + + nf_queue_entry_release_refs(entry); /* Continue traversal iff userspace said ok... */ if (verdict == NF_REPEAT) { @@ -228,37 +246,38 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info, } if (verdict == NF_ACCEPT) { - rerouter = rcu_dereference(queue_rerouter[info->pf]); - if (rerouter && rerouter->reroute(&skb, info) < 0) + afinfo = nf_get_afinfo(entry->pf); + if (!afinfo || afinfo->reroute(skb, entry) < 0) verdict = NF_DROP; } if (verdict == NF_ACCEPT) { next_hook: - verdict = nf_iterate(&nf_hooks[info->pf][info->hook], - &skb, info->hook, - info->indev, info->outdev, &elem, - info->okfn, INT_MIN); + verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook], + skb, entry->hook, + entry->indev, entry->outdev, &elem, + entry->okfn, INT_MIN); } switch (verdict & NF_VERDICT_MASK) { case NF_ACCEPT: - info->okfn(skb); + case NF_STOP: + local_bh_disable(); + entry->okfn(skb); + local_bh_enable(); + case NF_STOLEN: break; - case NF_QUEUE: - if (!nf_queue(&skb, elem, info->pf, info->hook, - info->indev, info->outdev, info->okfn, - verdict >> NF_VERDICT_BITS)) + if (!__nf_queue(skb, elem, entry->pf, entry->hook, + entry->indev, entry->outdev, entry->okfn, + verdict >> NF_VERDICT_BITS)) goto next_hook; break; + default: + kfree_skb(skb); } rcu_read_unlock(); - - if (verdict == NF_DROP) - kfree_skb(skb); - - kfree(info); + kfree(entry); return; } EXPORT_SYMBOL(nf_reinject); @@ -266,7 +285,7 @@ EXPORT_SYMBOL(nf_reinject); #ifdef CONFIG_PROC_FS static void *seq_start(struct seq_file *seq, loff_t *pos) { - if (*pos >= NPROTO) + if (*pos >= ARRAY_SIZE(queue_handler)) return NULL; return pos; @@ -276,7 +295,7 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos) { (*pos)++; - if (*pos >= NPROTO) + if (*pos >= ARRAY_SIZE(queue_handler)) return NULL; return pos; @@ -291,20 +310,20 @@ static int seq_show(struct seq_file *s, void *v) { int ret; loff_t *pos = v; - struct nf_queue_handler *qh; + const struct nf_queue_handler *qh; - read_lock_bh(&queue_handler_lock); - qh = queue_handler[*pos]; + rcu_read_lock(); + qh = rcu_dereference(queue_handler[*pos]); if (!qh) ret = seq_printf(s, "%2lld NONE\n", *pos); else ret = seq_printf(s, "%2lld %s\n", *pos, qh->name); - read_unlock_bh(&queue_handler_lock); + rcu_read_unlock(); return ret; } -static struct seq_operations nfqueue_seq_ops = { +static const struct seq_operations nfqueue_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, @@ -316,7 +335,7 @@ static int nfqueue_open(struct inode *inode, struct file *file) return seq_open(file, &nfqueue_seq_ops); } -static struct file_operations nfqueue_file_ops = { +static const struct file_operations nfqueue_file_ops = { .owner = THIS_MODULE, .open = nfqueue_open, .read = seq_read, @@ -329,12 +348,9 @@ static struct file_operations nfqueue_file_ops = { int __init netfilter_queue_init(void) { #ifdef CONFIG_PROC_FS - struct proc_dir_entry *pde; - - pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter); - if (!pde) + if (!proc_create("nf_queue", S_IRUGO, + proc_net_netfilter, &nfqueue_file_ops)) return -1; - pde->proc_fops = &nfqueue_file_ops; #endif return 0; }