2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/netfilter/nfnetlink.h>
27 #include <linux/netfilter/nfnetlink_queue.h>
28 #include <linux/list.h>
30 #include <net/netfilter/nf_queue.h>
32 #include <asm/atomic.h>
34 #ifdef CONFIG_BRIDGE_NETFILTER
35 #include "../bridge/br_private.h"
38 #define NFQNL_QMAX_DEFAULT 1024
41 #define QDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
42 __FILE__, __LINE__, __FUNCTION__, \
45 #define QDEBUG(x, ...)
48 struct nfqnl_instance {
49 struct hlist_node hlist; /* global list of queues */
53 unsigned int queue_maxlen;
54 unsigned int copy_range;
55 unsigned int queue_total;
56 unsigned int queue_dropped;
57 unsigned int queue_user_dropped;
59 atomic_t id_sequence; /* 'sequence' of pkt ids */
61 u_int16_t queue_num; /* number of this queue */
66 struct list_head queue_list; /* packets in queue */
69 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
71 static DEFINE_RWLOCK(instances_lock);
73 #define INSTANCE_BUCKETS 16
74 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
78 return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
81 static struct nfqnl_instance *
82 __instance_lookup(u_int16_t queue_num)
84 struct hlist_head *head;
85 struct hlist_node *pos;
86 struct nfqnl_instance *inst;
88 head = &instance_table[instance_hashfn(queue_num)];
89 hlist_for_each_entry(inst, pos, head, hlist) {
90 if (inst->queue_num == queue_num)
96 static struct nfqnl_instance *
97 instance_lookup_get(u_int16_t queue_num)
99 struct nfqnl_instance *inst;
101 read_lock_bh(&instances_lock);
102 inst = __instance_lookup(queue_num);
104 atomic_inc(&inst->use);
105 read_unlock_bh(&instances_lock);
111 instance_put(struct nfqnl_instance *inst)
113 if (inst && atomic_dec_and_test(&inst->use)) {
114 QDEBUG("kfree(inst=%p)\n", inst);
119 static struct nfqnl_instance *
120 instance_create(u_int16_t queue_num, int pid)
122 struct nfqnl_instance *inst;
124 QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num, pid);
126 write_lock_bh(&instances_lock);
127 if (__instance_lookup(queue_num)) {
129 QDEBUG("aborting, instance already exists\n");
133 inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
137 inst->queue_num = queue_num;
138 inst->peer_pid = pid;
139 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
140 inst->copy_range = 0xfffff;
141 inst->copy_mode = NFQNL_COPY_NONE;
142 atomic_set(&inst->id_sequence, 0);
143 /* needs to be two, since we _put() after creation */
144 atomic_set(&inst->use, 2);
145 spin_lock_init(&inst->lock);
146 INIT_LIST_HEAD(&inst->queue_list);
148 if (!try_module_get(THIS_MODULE))
151 hlist_add_head(&inst->hlist,
152 &instance_table[instance_hashfn(queue_num)]);
154 write_unlock_bh(&instances_lock);
156 QDEBUG("successfully created new instance\n");
163 write_unlock_bh(&instances_lock);
167 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
171 _instance_destroy2(struct nfqnl_instance *inst, int lock)
173 /* first pull it out of the global list */
175 write_lock_bh(&instances_lock);
177 QDEBUG("removing instance %p (queuenum=%u) from hash\n",
178 inst, inst->queue_num);
179 hlist_del(&inst->hlist);
182 write_unlock_bh(&instances_lock);
184 /* then flush all pending skbs from the queue */
185 nfqnl_flush(inst, NULL, 0);
187 /* and finally put the refcount */
190 module_put(THIS_MODULE);
194 __instance_destroy(struct nfqnl_instance *inst)
196 _instance_destroy2(inst, 0);
200 instance_destroy(struct nfqnl_instance *inst)
202 _instance_destroy2(inst, 1);
206 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
208 list_add_tail(&entry->list, &queue->queue_list);
209 queue->queue_total++;
213 __nfqnl_set_mode(struct nfqnl_instance *queue,
214 unsigned char mode, unsigned int range)
219 case NFQNL_COPY_NONE:
220 case NFQNL_COPY_META:
221 queue->copy_mode = mode;
222 queue->copy_range = 0;
225 case NFQNL_COPY_PACKET:
226 queue->copy_mode = mode;
227 /* we're using struct nlattr which has 16bit nla_len */
229 queue->copy_range = 0xffff;
231 queue->copy_range = range;
241 static struct nf_queue_entry *
242 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
244 struct nf_queue_entry *entry = NULL, *i;
246 spin_lock_bh(&queue->lock);
248 list_for_each_entry(i, &queue->queue_list, list) {
256 list_del(&entry->list);
257 queue->queue_total--;
260 spin_unlock_bh(&queue->lock);
266 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
268 struct nf_queue_entry *entry, *next;
270 spin_lock_bh(&queue->lock);
271 list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
272 if (!cmpfn || cmpfn(entry, data)) {
273 list_del(&entry->list);
274 queue->queue_total--;
275 nf_reinject(entry, NF_DROP);
278 spin_unlock_bh(&queue->lock);
281 static struct sk_buff *
282 nfqnl_build_packet_message(struct nfqnl_instance *queue,
283 struct nf_queue_entry *entry, int *errp)
285 sk_buff_data_t old_tail;
289 struct nfqnl_msg_packet_hdr pmsg;
290 struct nlmsghdr *nlh;
291 struct nfgenmsg *nfmsg;
292 struct sk_buff *entskb = entry->skb;
293 struct net_device *indev;
294 struct net_device *outdev;
299 size = NLMSG_ALIGN(sizeof(struct nfgenmsg))
300 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
301 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
302 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
303 #ifdef CONFIG_BRIDGE_NETFILTER
304 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
305 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
307 + nla_total_size(sizeof(u_int32_t)) /* mark */
308 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
309 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
311 outdev = entry->outdev;
313 spin_lock_bh(&queue->lock);
315 switch (queue->copy_mode) {
316 case NFQNL_COPY_META:
317 case NFQNL_COPY_NONE:
321 case NFQNL_COPY_PACKET:
322 if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
323 entskb->ip_summed == CHECKSUM_COMPLETE) &&
324 (*errp = skb_checksum_help(entskb))) {
325 spin_unlock_bh(&queue->lock);
328 if (queue->copy_range == 0
329 || queue->copy_range > entskb->len)
330 data_len = entskb->len;
332 data_len = queue->copy_range;
334 size += nla_total_size(data_len);
339 spin_unlock_bh(&queue->lock);
343 spin_unlock_bh(&queue->lock);
345 skb = alloc_skb(size, GFP_ATOMIC);
349 old_tail = skb->tail;
350 nlh = NLMSG_PUT(skb, 0, 0,
351 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
352 sizeof(struct nfgenmsg));
353 nfmsg = NLMSG_DATA(nlh);
354 nfmsg->nfgen_family = entry->pf;
355 nfmsg->version = NFNETLINK_V0;
356 nfmsg->res_id = htons(queue->queue_num);
358 pmsg.packet_id = htonl(entry->id);
359 pmsg.hw_protocol = entskb->protocol;
360 pmsg.hook = entry->hook;
362 NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
364 indev = entry->indev;
366 tmp_uint = htonl(indev->ifindex);
367 #ifndef CONFIG_BRIDGE_NETFILTER
368 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint);
370 if (entry->pf == PF_BRIDGE) {
371 /* Case 1: indev is physical input device, we need to
372 * look for bridge group (when called from
373 * netfilter_bridge) */
374 NLA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint),
376 /* this is the bridge group "brX" */
377 tmp_uint = htonl(indev->br_port->br->dev->ifindex);
378 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
381 /* Case 2: indev is bridge group, we need to look for
382 * physical device (when called from ipv4) */
383 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
385 if (entskb->nf_bridge
386 && entskb->nf_bridge->physindev) {
387 tmp_uint = htonl(entskb->nf_bridge->physindev->ifindex);
388 NLA_PUT(skb, NFQA_IFINDEX_PHYSINDEV,
389 sizeof(tmp_uint), &tmp_uint);
396 tmp_uint = htonl(outdev->ifindex);
397 #ifndef CONFIG_BRIDGE_NETFILTER
398 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint);
400 if (entry->pf == PF_BRIDGE) {
401 /* Case 1: outdev is physical output device, we need to
402 * look for bridge group (when called from
403 * netfilter_bridge) */
404 NLA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint),
406 /* this is the bridge group "brX" */
407 tmp_uint = htonl(outdev->br_port->br->dev->ifindex);
408 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
411 /* Case 2: outdev is bridge group, we need to look for
412 * physical output device (when called from ipv4) */
413 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
415 if (entskb->nf_bridge
416 && entskb->nf_bridge->physoutdev) {
417 tmp_uint = htonl(entskb->nf_bridge->physoutdev->ifindex);
418 NLA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV,
419 sizeof(tmp_uint), &tmp_uint);
426 tmp_uint = htonl(entskb->mark);
427 NLA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint);
430 if (indev && entskb->dev) {
431 struct nfqnl_msg_packet_hw phw;
432 int len = dev_parse_header(entskb, phw.hw_addr);
434 phw.hw_addrlen = htons(len);
435 NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
439 if (entskb->tstamp.tv64) {
440 struct nfqnl_msg_packet_timestamp ts;
441 struct timeval tv = ktime_to_timeval(entskb->tstamp);
442 ts.sec = cpu_to_be64(tv.tv_sec);
443 ts.usec = cpu_to_be64(tv.tv_usec);
445 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
450 int size = nla_attr_size(data_len);
452 if (skb_tailroom(skb) < nla_total_size(data_len)) {
453 printk(KERN_WARNING "nf_queue: no tailroom!\n");
457 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
458 nla->nla_type = NFQA_PAYLOAD;
461 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
465 nlh->nlmsg_len = skb->tail - old_tail;
474 printk(KERN_ERR "nf_queue: error creating packet message\n");
479 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
481 int status = -EINVAL;
482 struct sk_buff *nskb;
483 struct nfqnl_instance *queue;
487 queue = instance_lookup_get(queuenum);
489 QDEBUG("no queue instance matching\n");
493 if (queue->copy_mode == NFQNL_COPY_NONE) {
494 QDEBUG("mode COPY_NONE, aborting\n");
499 entry->id = atomic_inc_return(&queue->id_sequence);
501 nskb = nfqnl_build_packet_message(queue, entry, &status);
505 spin_lock_bh(&queue->lock);
507 if (!queue->peer_pid)
508 goto err_out_free_nskb;
510 if (queue->queue_total >= queue->queue_maxlen) {
511 queue->queue_dropped++;
514 printk(KERN_WARNING "nf_queue: full at %d entries, "
515 "dropping packets(s). Dropped: %d\n",
516 queue->queue_total, queue->queue_dropped);
517 goto err_out_free_nskb;
520 /* nfnetlink_unicast will either free the nskb or add it to a socket */
521 status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
523 queue->queue_user_dropped++;
527 __enqueue_entry(queue, entry);
529 spin_unlock_bh(&queue->lock);
537 spin_unlock_bh(&queue->lock);
545 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
550 diff = data_len - e->skb->len;
552 if (pskb_trim(e->skb, data_len))
554 } else if (diff > 0) {
555 if (data_len > 0xFFFF)
557 if (diff > skb_tailroom(e->skb)) {
558 err = pskb_expand_head(e->skb, 0,
559 diff - skb_tailroom(e->skb),
562 printk(KERN_WARNING "nf_queue: OOM "
563 "in mangle, dropping packet\n");
567 skb_put(e->skb, diff);
569 if (!skb_make_writable(e->skb, data_len))
571 skb_copy_to_linear_data(e->skb, data, data_len);
572 e->skb->ip_summed = CHECKSUM_NONE;
577 nfqnl_set_mode(struct nfqnl_instance *queue,
578 unsigned char mode, unsigned int range)
582 spin_lock_bh(&queue->lock);
583 status = __nfqnl_set_mode(queue, mode, range);
584 spin_unlock_bh(&queue->lock);
590 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
593 if (entry->indev->ifindex == ifindex)
596 if (entry->outdev->ifindex == ifindex)
598 #ifdef CONFIG_BRIDGE_NETFILTER
599 if (entry->skb->nf_bridge) {
600 if (entry->skb->nf_bridge->physindev &&
601 entry->skb->nf_bridge->physindev->ifindex == ifindex)
603 if (entry->skb->nf_bridge->physoutdev &&
604 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
611 /* drop all packets with either indev or outdev == ifindex from all queue
614 nfqnl_dev_drop(int ifindex)
618 QDEBUG("entering for ifindex %u\n", ifindex);
620 /* this only looks like we have to hold the readlock for a way too long
621 * time, issue_verdict(), nf_reinject(), ... - but we always only
622 * issue NF_DROP, which is processed directly in nf_reinject() */
623 read_lock_bh(&instances_lock);
625 for (i = 0; i < INSTANCE_BUCKETS; i++) {
626 struct hlist_node *tmp;
627 struct nfqnl_instance *inst;
628 struct hlist_head *head = &instance_table[i];
630 hlist_for_each_entry(inst, tmp, head, hlist)
631 nfqnl_flush(inst, dev_cmp, ifindex);
634 read_unlock_bh(&instances_lock);
637 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
640 nfqnl_rcv_dev_event(struct notifier_block *this,
641 unsigned long event, void *ptr)
643 struct net_device *dev = ptr;
645 if (dev->nd_net != &init_net)
648 /* Drop any packets associated with the downed device */
649 if (event == NETDEV_DOWN)
650 nfqnl_dev_drop(dev->ifindex);
654 static struct notifier_block nfqnl_dev_notifier = {
655 .notifier_call = nfqnl_rcv_dev_event,
659 nfqnl_rcv_nl_event(struct notifier_block *this,
660 unsigned long event, void *ptr)
662 struct netlink_notify *n = ptr;
664 if (event == NETLINK_URELEASE &&
665 n->protocol == NETLINK_NETFILTER && n->pid) {
668 /* destroy all instances for this pid */
669 write_lock_bh(&instances_lock);
670 for (i = 0; i < INSTANCE_BUCKETS; i++) {
671 struct hlist_node *tmp, *t2;
672 struct nfqnl_instance *inst;
673 struct hlist_head *head = &instance_table[i];
675 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
676 if ((n->net == &init_net) &&
677 (n->pid == inst->peer_pid))
678 __instance_destroy(inst);
681 write_unlock_bh(&instances_lock);
686 static struct notifier_block nfqnl_rtnl_notifier = {
687 .notifier_call = nfqnl_rcv_nl_event,
690 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
691 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
692 [NFQA_MARK] = { .type = NLA_U32 },
693 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
697 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
698 struct nlmsghdr *nlh, struct nlattr *nfqa[])
700 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
701 u_int16_t queue_num = ntohs(nfmsg->res_id);
703 struct nfqnl_msg_verdict_hdr *vhdr;
704 struct nfqnl_instance *queue;
705 unsigned int verdict;
706 struct nf_queue_entry *entry;
709 queue = instance_lookup_get(queue_num);
713 if (queue->peer_pid != NETLINK_CB(skb).pid) {
718 if (!nfqa[NFQA_VERDICT_HDR]) {
723 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
724 verdict = ntohl(vhdr->verdict);
726 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
731 entry = find_dequeue_entry(queue, ntohl(vhdr->id));
737 if (nfqa[NFQA_PAYLOAD]) {
738 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
739 nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
744 entry->skb->mark = ntohl(*(__be32 *)
745 nla_data(nfqa[NFQA_MARK]));
747 nf_reinject(entry, verdict);
757 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
758 struct nlmsghdr *nlh, struct nlattr *nfqa[])
763 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
764 [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
765 [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
768 static const struct nf_queue_handler nfqh = {
770 .outfn = &nfqnl_enqueue_packet,
774 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
775 struct nlmsghdr *nlh, struct nlattr *nfqa[])
777 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
778 u_int16_t queue_num = ntohs(nfmsg->res_id);
779 struct nfqnl_instance *queue;
782 QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type));
784 queue = instance_lookup_get(queue_num);
785 if (nfqa[NFQA_CFG_CMD]) {
786 struct nfqnl_msg_config_cmd *cmd;
787 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
788 QDEBUG("found CFG_CMD\n");
790 switch (cmd->command) {
791 case NFQNL_CFG_CMD_BIND:
795 queue = instance_create(queue_num, NETLINK_CB(skb).pid);
799 case NFQNL_CFG_CMD_UNBIND:
803 if (queue->peer_pid != NETLINK_CB(skb).pid) {
808 instance_destroy(queue);
810 case NFQNL_CFG_CMD_PF_BIND:
811 QDEBUG("registering queue handler for pf=%u\n",
813 ret = nf_register_queue_handler(ntohs(cmd->pf), &nfqh);
815 case NFQNL_CFG_CMD_PF_UNBIND:
816 QDEBUG("unregistering queue handler for pf=%u\n",
818 ret = nf_unregister_queue_handler(ntohs(cmd->pf), &nfqh);
826 QDEBUG("no config command, and no instance ENOENT\n");
831 if (queue->peer_pid != NETLINK_CB(skb).pid) {
832 QDEBUG("no config command, and wrong pid\n");
838 if (nfqa[NFQA_CFG_PARAMS]) {
839 struct nfqnl_msg_config_params *params;
845 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
846 nfqnl_set_mode(queue, params->copy_mode,
847 ntohl(params->copy_range));
850 if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
851 __be32 *queue_maxlen;
852 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
853 spin_lock_bh(&queue->lock);
854 queue->queue_maxlen = ntohl(*queue_maxlen);
855 spin_unlock_bh(&queue->lock);
863 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
864 [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp,
865 .attr_count = NFQA_MAX, },
866 [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict,
867 .attr_count = NFQA_MAX,
868 .policy = nfqa_verdict_policy },
869 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
870 .attr_count = NFQA_CFG_MAX,
871 .policy = nfqa_cfg_policy },
874 static const struct nfnetlink_subsystem nfqnl_subsys = {
876 .subsys_id = NFNL_SUBSYS_QUEUE,
877 .cb_count = NFQNL_MSG_MAX,
881 #ifdef CONFIG_PROC_FS
886 static struct hlist_node *get_first(struct seq_file *seq)
888 struct iter_state *st = seq->private;
893 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
894 if (!hlist_empty(&instance_table[st->bucket]))
895 return instance_table[st->bucket].first;
900 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
902 struct iter_state *st = seq->private;
906 if (++st->bucket >= INSTANCE_BUCKETS)
909 h = instance_table[st->bucket].first;
914 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
916 struct hlist_node *head;
917 head = get_first(seq);
920 while (pos && (head = get_next(seq, head)))
922 return pos ? NULL : head;
925 static void *seq_start(struct seq_file *seq, loff_t *pos)
927 read_lock_bh(&instances_lock);
928 return get_idx(seq, *pos);
931 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
934 return get_next(s, v);
937 static void seq_stop(struct seq_file *s, void *v)
939 read_unlock_bh(&instances_lock);
942 static int seq_show(struct seq_file *s, void *v)
944 const struct nfqnl_instance *inst = v;
946 return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
948 inst->peer_pid, inst->queue_total,
949 inst->copy_mode, inst->copy_range,
950 inst->queue_dropped, inst->queue_user_dropped,
951 atomic_read(&inst->id_sequence),
952 atomic_read(&inst->use));
955 static const struct seq_operations nfqnl_seq_ops = {
962 static int nfqnl_open(struct inode *inode, struct file *file)
964 return seq_open_private(file, &nfqnl_seq_ops,
965 sizeof(struct iter_state));
968 static const struct file_operations nfqnl_file_ops = {
969 .owner = THIS_MODULE,
973 .release = seq_release_private,
978 static int __init nfnetlink_queue_init(void)
980 int i, status = -ENOMEM;
981 #ifdef CONFIG_PROC_FS
982 struct proc_dir_entry *proc_nfqueue;
985 for (i = 0; i < INSTANCE_BUCKETS; i++)
986 INIT_HLIST_HEAD(&instance_table[i]);
988 netlink_register_notifier(&nfqnl_rtnl_notifier);
989 status = nfnetlink_subsys_register(&nfqnl_subsys);
991 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
992 goto cleanup_netlink_notifier;
995 #ifdef CONFIG_PROC_FS
996 proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440,
1000 proc_nfqueue->proc_fops = &nfqnl_file_ops;
1003 register_netdevice_notifier(&nfqnl_dev_notifier);
1006 #ifdef CONFIG_PROC_FS
1008 nfnetlink_subsys_unregister(&nfqnl_subsys);
1010 cleanup_netlink_notifier:
1011 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1015 static void __exit nfnetlink_queue_fini(void)
1017 nf_unregister_queue_handlers(&nfqh);
1018 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1019 #ifdef CONFIG_PROC_FS
1020 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1022 nfnetlink_subsys_unregister(&nfqnl_subsys);
1023 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1026 MODULE_DESCRIPTION("netfilter packet queue handler");
1027 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1028 MODULE_LICENSE("GPL");
1029 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1031 module_init(nfnetlink_queue_init);
1032 module_exit(nfnetlink_queue_fini);