[NETFILTER]: nfnetlink_{queue,log}: return ENOTSUPP for unknown cfg commands
[safe/jmp/linux-2.6] / net / netfilter / nfnetlink_queue.c
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfetlink.
4  *
5  * (C) 2005 by Harald Welte <laforge@netfilter.org>
6  * (C) 2007 by Patrick McHardy <kaber@trash.net>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/notifier.h>
22 #include <linux/netdevice.h>
23 #include <linux/netfilter.h>
24 #include <linux/proc_fs.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/netfilter_ipv6.h>
27 #include <linux/netfilter/nfnetlink.h>
28 #include <linux/netfilter/nfnetlink_queue.h>
29 #include <linux/list.h>
30 #include <net/sock.h>
31 #include <net/netfilter/nf_queue.h>
32
33 #include <asm/atomic.h>
34
35 #ifdef CONFIG_BRIDGE_NETFILTER
36 #include "../bridge/br_private.h"
37 #endif
38
39 #define NFQNL_QMAX_DEFAULT 1024
40
41 struct nfqnl_instance {
42         struct hlist_node hlist;                /* global list of queues */
43         struct rcu_head rcu;
44
45         int peer_pid;
46         unsigned int queue_maxlen;
47         unsigned int copy_range;
48         unsigned int queue_total;
49         unsigned int queue_dropped;
50         unsigned int queue_user_dropped;
51
52         unsigned int id_sequence;               /* 'sequence' of pkt ids */
53
54         u_int16_t queue_num;                    /* number of this queue */
55         u_int8_t copy_mode;
56
57         spinlock_t lock;
58
59         struct list_head queue_list;            /* packets in queue */
60 };
61
62 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
63
64 static DEFINE_SPINLOCK(instances_lock);
65
66 #define INSTANCE_BUCKETS        16
67 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
68
69 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
70 {
71         return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
72 }
73
74 static struct nfqnl_instance *
75 instance_lookup(u_int16_t queue_num)
76 {
77         struct hlist_head *head;
78         struct hlist_node *pos;
79         struct nfqnl_instance *inst;
80
81         head = &instance_table[instance_hashfn(queue_num)];
82         hlist_for_each_entry_rcu(inst, pos, head, hlist) {
83                 if (inst->queue_num == queue_num)
84                         return inst;
85         }
86         return NULL;
87 }
88
89 static struct nfqnl_instance *
90 instance_create(u_int16_t queue_num, int pid)
91 {
92         struct nfqnl_instance *inst = NULL;
93         unsigned int h;
94
95         spin_lock(&instances_lock);
96         if (instance_lookup(queue_num))
97                 goto out_unlock;
98
99         inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
100         if (!inst)
101                 goto out_unlock;
102
103         inst->queue_num = queue_num;
104         inst->peer_pid = pid;
105         inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
106         inst->copy_range = 0xfffff;
107         inst->copy_mode = NFQNL_COPY_NONE;
108         spin_lock_init(&inst->lock);
109         INIT_LIST_HEAD(&inst->queue_list);
110         INIT_RCU_HEAD(&inst->rcu);
111
112         if (!try_module_get(THIS_MODULE))
113                 goto out_free;
114
115         h = instance_hashfn(queue_num);
116         hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
117
118         spin_unlock(&instances_lock);
119
120         return inst;
121
122 out_free:
123         kfree(inst);
124 out_unlock:
125         spin_unlock(&instances_lock);
126         return NULL;
127 }
128
129 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
130                         unsigned long data);
131
132 static void
133 instance_destroy_rcu(struct rcu_head *head)
134 {
135         struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
136                                                    rcu);
137
138         nfqnl_flush(inst, NULL, 0);
139         kfree(inst);
140         module_put(THIS_MODULE);
141 }
142
143 static void
144 __instance_destroy(struct nfqnl_instance *inst)
145 {
146         hlist_del_rcu(&inst->hlist);
147         call_rcu(&inst->rcu, instance_destroy_rcu);
148 }
149
150 static void
151 instance_destroy(struct nfqnl_instance *inst)
152 {
153         spin_lock(&instances_lock);
154         __instance_destroy(inst);
155         spin_unlock(&instances_lock);
156 }
157
158 static inline void
159 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
160 {
161        list_add_tail(&entry->list, &queue->queue_list);
162        queue->queue_total++;
163 }
164
165 static struct nf_queue_entry *
166 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
167 {
168         struct nf_queue_entry *entry = NULL, *i;
169
170         spin_lock_bh(&queue->lock);
171
172         list_for_each_entry(i, &queue->queue_list, list) {
173                 if (i->id == id) {
174                         entry = i;
175                         break;
176                 }
177         }
178
179         if (entry) {
180                 list_del(&entry->list);
181                 queue->queue_total--;
182         }
183
184         spin_unlock_bh(&queue->lock);
185
186         return entry;
187 }
188
189 static void
190 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
191 {
192         struct nf_queue_entry *entry, *next;
193
194         spin_lock_bh(&queue->lock);
195         list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
196                 if (!cmpfn || cmpfn(entry, data)) {
197                         list_del(&entry->list);
198                         queue->queue_total--;
199                         nf_reinject(entry, NF_DROP);
200                 }
201         }
202         spin_unlock_bh(&queue->lock);
203 }
204
205 static struct sk_buff *
206 nfqnl_build_packet_message(struct nfqnl_instance *queue,
207                            struct nf_queue_entry *entry)
208 {
209         sk_buff_data_t old_tail;
210         size_t size;
211         size_t data_len = 0;
212         struct sk_buff *skb;
213         struct nfqnl_msg_packet_hdr pmsg;
214         struct nlmsghdr *nlh;
215         struct nfgenmsg *nfmsg;
216         struct sk_buff *entskb = entry->skb;
217         struct net_device *indev;
218         struct net_device *outdev;
219
220         size =    NLMSG_ALIGN(sizeof(struct nfgenmsg))
221                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
222                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
223                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
224 #ifdef CONFIG_BRIDGE_NETFILTER
225                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
226                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
227 #endif
228                 + nla_total_size(sizeof(u_int32_t))     /* mark */
229                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
230                 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
231
232         outdev = entry->outdev;
233
234         spin_lock_bh(&queue->lock);
235
236         switch ((enum nfqnl_config_mode)queue->copy_mode) {
237         case NFQNL_COPY_META:
238         case NFQNL_COPY_NONE:
239                 data_len = 0;
240                 break;
241
242         case NFQNL_COPY_PACKET:
243                 if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
244                      entskb->ip_summed == CHECKSUM_COMPLETE) &&
245                     skb_checksum_help(entskb)) {
246                         spin_unlock_bh(&queue->lock);
247                         return NULL;
248                 }
249                 if (queue->copy_range == 0
250                     || queue->copy_range > entskb->len)
251                         data_len = entskb->len;
252                 else
253                         data_len = queue->copy_range;
254
255                 size += nla_total_size(data_len);
256                 break;
257         }
258
259         entry->id = queue->id_sequence++;
260
261         spin_unlock_bh(&queue->lock);
262
263         skb = alloc_skb(size, GFP_ATOMIC);
264         if (!skb)
265                 goto nlmsg_failure;
266
267         old_tail = skb->tail;
268         nlh = NLMSG_PUT(skb, 0, 0,
269                         NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
270                         sizeof(struct nfgenmsg));
271         nfmsg = NLMSG_DATA(nlh);
272         nfmsg->nfgen_family = entry->pf;
273         nfmsg->version = NFNETLINK_V0;
274         nfmsg->res_id = htons(queue->queue_num);
275
276         pmsg.packet_id          = htonl(entry->id);
277         pmsg.hw_protocol        = entskb->protocol;
278         pmsg.hook               = entry->hook;
279
280         NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
281
282         indev = entry->indev;
283         if (indev) {
284 #ifndef CONFIG_BRIDGE_NETFILTER
285                 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
286 #else
287                 if (entry->pf == PF_BRIDGE) {
288                         /* Case 1: indev is physical input device, we need to
289                          * look for bridge group (when called from
290                          * netfilter_bridge) */
291                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
292                                      htonl(indev->ifindex));
293                         /* this is the bridge group "brX" */
294                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
295                                      htonl(indev->br_port->br->dev->ifindex));
296                 } else {
297                         /* Case 2: indev is bridge group, we need to look for
298                          * physical device (when called from ipv4) */
299                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
300                                      htonl(indev->ifindex));
301                         if (entskb->nf_bridge && entskb->nf_bridge->physindev)
302                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
303                                              htonl(entskb->nf_bridge->physindev->ifindex));
304                 }
305 #endif
306         }
307
308         if (outdev) {
309 #ifndef CONFIG_BRIDGE_NETFILTER
310                 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
311 #else
312                 if (entry->pf == PF_BRIDGE) {
313                         /* Case 1: outdev is physical output device, we need to
314                          * look for bridge group (when called from
315                          * netfilter_bridge) */
316                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
317                                      htonl(outdev->ifindex));
318                         /* this is the bridge group "brX" */
319                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
320                                      htonl(outdev->br_port->br->dev->ifindex));
321                 } else {
322                         /* Case 2: outdev is bridge group, we need to look for
323                          * physical output device (when called from ipv4) */
324                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
325                                      htonl(outdev->ifindex));
326                         if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
327                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
328                                              htonl(entskb->nf_bridge->physoutdev->ifindex));
329                 }
330 #endif
331         }
332
333         if (entskb->mark)
334                 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
335
336         if (indev && entskb->dev) {
337                 struct nfqnl_msg_packet_hw phw;
338                 int len = dev_parse_header(entskb, phw.hw_addr);
339                 if (len) {
340                         phw.hw_addrlen = htons(len);
341                         NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
342                 }
343         }
344
345         if (entskb->tstamp.tv64) {
346                 struct nfqnl_msg_packet_timestamp ts;
347                 struct timeval tv = ktime_to_timeval(entskb->tstamp);
348                 ts.sec = cpu_to_be64(tv.tv_sec);
349                 ts.usec = cpu_to_be64(tv.tv_usec);
350
351                 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
352         }
353
354         if (data_len) {
355                 struct nlattr *nla;
356                 int size = nla_attr_size(data_len);
357
358                 if (skb_tailroom(skb) < nla_total_size(data_len)) {
359                         printk(KERN_WARNING "nf_queue: no tailroom!\n");
360                         goto nlmsg_failure;
361                 }
362
363                 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
364                 nla->nla_type = NFQA_PAYLOAD;
365                 nla->nla_len = size;
366
367                 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
368                         BUG();
369         }
370
371         nlh->nlmsg_len = skb->tail - old_tail;
372         return skb;
373
374 nlmsg_failure:
375 nla_put_failure:
376         if (skb)
377                 kfree_skb(skb);
378         if (net_ratelimit())
379                 printk(KERN_ERR "nf_queue: error creating packet message\n");
380         return NULL;
381 }
382
383 static int
384 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
385 {
386         struct sk_buff *nskb;
387         struct nfqnl_instance *queue;
388         int err;
389
390         /* rcu_read_lock()ed by nf_hook_slow() */
391         queue = instance_lookup(queuenum);
392         if (!queue)
393                 goto err_out;
394
395         if (queue->copy_mode == NFQNL_COPY_NONE)
396                 goto err_out;
397
398         nskb = nfqnl_build_packet_message(queue, entry);
399         if (nskb == NULL)
400                 goto err_out;
401
402         spin_lock_bh(&queue->lock);
403
404         if (!queue->peer_pid)
405                 goto err_out_free_nskb;
406
407         if (queue->queue_total >= queue->queue_maxlen) {
408                 queue->queue_dropped++;
409                 if (net_ratelimit())
410                           printk(KERN_WARNING "nf_queue: full at %d entries, "
411                                  "dropping packets(s). Dropped: %d\n",
412                                  queue->queue_total, queue->queue_dropped);
413                 goto err_out_free_nskb;
414         }
415
416         /* nfnetlink_unicast will either free the nskb or add it to a socket */
417         err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
418         if (err < 0) {
419                 queue->queue_user_dropped++;
420                 goto err_out_unlock;
421         }
422
423         __enqueue_entry(queue, entry);
424
425         spin_unlock_bh(&queue->lock);
426         return 0;
427
428 err_out_free_nskb:
429         kfree_skb(nskb);
430 err_out_unlock:
431         spin_unlock_bh(&queue->lock);
432 err_out:
433         return -1;
434 }
435
436 static int
437 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
438 {
439         int diff;
440         int err;
441
442         diff = data_len - e->skb->len;
443         if (diff < 0) {
444                 if (pskb_trim(e->skb, data_len))
445                         return -ENOMEM;
446         } else if (diff > 0) {
447                 if (data_len > 0xFFFF)
448                         return -EINVAL;
449                 if (diff > skb_tailroom(e->skb)) {
450                         err = pskb_expand_head(e->skb, 0,
451                                                diff - skb_tailroom(e->skb),
452                                                GFP_ATOMIC);
453                         if (err) {
454                                 printk(KERN_WARNING "nf_queue: OOM "
455                                       "in mangle, dropping packet\n");
456                                 return err;
457                         }
458                 }
459                 skb_put(e->skb, diff);
460         }
461         if (!skb_make_writable(e->skb, data_len))
462                 return -ENOMEM;
463         skb_copy_to_linear_data(e->skb, data, data_len);
464         e->skb->ip_summed = CHECKSUM_NONE;
465         return 0;
466 }
467
468 static int
469 nfqnl_set_mode(struct nfqnl_instance *queue,
470                unsigned char mode, unsigned int range)
471 {
472         int status = 0;
473
474         spin_lock_bh(&queue->lock);
475         switch (mode) {
476         case NFQNL_COPY_NONE:
477         case NFQNL_COPY_META:
478                 queue->copy_mode = mode;
479                 queue->copy_range = 0;
480                 break;
481
482         case NFQNL_COPY_PACKET:
483                 queue->copy_mode = mode;
484                 /* we're using struct nlattr which has 16bit nla_len */
485                 if (range > 0xffff)
486                         queue->copy_range = 0xffff;
487                 else
488                         queue->copy_range = range;
489                 break;
490
491         default:
492                 status = -EINVAL;
493
494         }
495         spin_unlock_bh(&queue->lock);
496
497         return status;
498 }
499
500 static int
501 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
502 {
503         if (entry->indev)
504                 if (entry->indev->ifindex == ifindex)
505                         return 1;
506         if (entry->outdev)
507                 if (entry->outdev->ifindex == ifindex)
508                         return 1;
509 #ifdef CONFIG_BRIDGE_NETFILTER
510         if (entry->skb->nf_bridge) {
511                 if (entry->skb->nf_bridge->physindev &&
512                     entry->skb->nf_bridge->physindev->ifindex == ifindex)
513                         return 1;
514                 if (entry->skb->nf_bridge->physoutdev &&
515                     entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
516                         return 1;
517         }
518 #endif
519         return 0;
520 }
521
522 /* drop all packets with either indev or outdev == ifindex from all queue
523  * instances */
524 static void
525 nfqnl_dev_drop(int ifindex)
526 {
527         int i;
528
529         rcu_read_lock();
530
531         for (i = 0; i < INSTANCE_BUCKETS; i++) {
532                 struct hlist_node *tmp;
533                 struct nfqnl_instance *inst;
534                 struct hlist_head *head = &instance_table[i];
535
536                 hlist_for_each_entry_rcu(inst, tmp, head, hlist)
537                         nfqnl_flush(inst, dev_cmp, ifindex);
538         }
539
540         rcu_read_unlock();
541 }
542
543 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
544
545 static int
546 nfqnl_rcv_dev_event(struct notifier_block *this,
547                     unsigned long event, void *ptr)
548 {
549         struct net_device *dev = ptr;
550
551         if (dev->nd_net != &init_net)
552                 return NOTIFY_DONE;
553
554         /* Drop any packets associated with the downed device */
555         if (event == NETDEV_DOWN)
556                 nfqnl_dev_drop(dev->ifindex);
557         return NOTIFY_DONE;
558 }
559
560 static struct notifier_block nfqnl_dev_notifier = {
561         .notifier_call  = nfqnl_rcv_dev_event,
562 };
563
564 static int
565 nfqnl_rcv_nl_event(struct notifier_block *this,
566                    unsigned long event, void *ptr)
567 {
568         struct netlink_notify *n = ptr;
569
570         if (event == NETLINK_URELEASE &&
571             n->protocol == NETLINK_NETFILTER && n->pid) {
572                 int i;
573
574                 /* destroy all instances for this pid */
575                 spin_lock(&instances_lock);
576                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
577                         struct hlist_node *tmp, *t2;
578                         struct nfqnl_instance *inst;
579                         struct hlist_head *head = &instance_table[i];
580
581                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
582                                 if ((n->net == &init_net) &&
583                                     (n->pid == inst->peer_pid))
584                                         __instance_destroy(inst);
585                         }
586                 }
587                 spin_unlock(&instances_lock);
588         }
589         return NOTIFY_DONE;
590 }
591
592 static struct notifier_block nfqnl_rtnl_notifier = {
593         .notifier_call  = nfqnl_rcv_nl_event,
594 };
595
596 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
597         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
598         [NFQA_MARK]             = { .type = NLA_U32 },
599         [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
600 };
601
602 static int
603 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
604                    struct nlmsghdr *nlh, struct nlattr *nfqa[])
605 {
606         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
607         u_int16_t queue_num = ntohs(nfmsg->res_id);
608
609         struct nfqnl_msg_verdict_hdr *vhdr;
610         struct nfqnl_instance *queue;
611         unsigned int verdict;
612         struct nf_queue_entry *entry;
613         int err;
614
615         rcu_read_lock();
616         queue = instance_lookup(queue_num);
617         if (!queue) {
618                 err = -ENODEV;
619                 goto err_out_unlock;
620         }
621
622         if (queue->peer_pid != NETLINK_CB(skb).pid) {
623                 err = -EPERM;
624                 goto err_out_unlock;
625         }
626
627         if (!nfqa[NFQA_VERDICT_HDR]) {
628                 err = -EINVAL;
629                 goto err_out_unlock;
630         }
631
632         vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
633         verdict = ntohl(vhdr->verdict);
634
635         if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
636                 err = -EINVAL;
637                 goto err_out_unlock;
638         }
639
640         entry = find_dequeue_entry(queue, ntohl(vhdr->id));
641         if (entry == NULL) {
642                 err = -ENOENT;
643                 goto err_out_unlock;
644         }
645         rcu_read_unlock();
646
647         if (nfqa[NFQA_PAYLOAD]) {
648                 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
649                                  nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
650                         verdict = NF_DROP;
651         }
652
653         if (nfqa[NFQA_MARK])
654                 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
655
656         nf_reinject(entry, verdict);
657         return 0;
658
659 err_out_unlock:
660         rcu_read_unlock();
661         return err;
662 }
663
664 static int
665 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
666                   struct nlmsghdr *nlh, struct nlattr *nfqa[])
667 {
668         return -ENOTSUPP;
669 }
670
671 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
672         [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
673         [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
674 };
675
676 static const struct nf_queue_handler nfqh = {
677         .name   = "nf_queue",
678         .outfn  = &nfqnl_enqueue_packet,
679 };
680
681 static int
682 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
683                   struct nlmsghdr *nlh, struct nlattr *nfqa[])
684 {
685         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
686         u_int16_t queue_num = ntohs(nfmsg->res_id);
687         struct nfqnl_instance *queue;
688         struct nfqnl_msg_config_cmd *cmd = NULL;
689         int ret = 0;
690
691         if (nfqa[NFQA_CFG_CMD]) {
692                 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
693
694                 /* Commands without queue context - might sleep */
695                 switch (cmd->command) {
696                 case NFQNL_CFG_CMD_PF_BIND:
697                         ret = nf_register_queue_handler(ntohs(cmd->pf),
698                                                         &nfqh);
699                         break;
700                 case NFQNL_CFG_CMD_PF_UNBIND:
701                         ret = nf_unregister_queue_handler(ntohs(cmd->pf),
702                                                           &nfqh);
703                         break;
704                 default:
705                         break;
706                 }
707
708                 if (ret < 0)
709                         return ret;
710         }
711
712         rcu_read_lock();
713         queue = instance_lookup(queue_num);
714         if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
715                 ret = -EPERM;
716                 goto err_out_unlock;
717         }
718
719         if (cmd != NULL) {
720                 switch (cmd->command) {
721                 case NFQNL_CFG_CMD_BIND:
722                         if (queue) {
723                                 ret = -EBUSY;
724                                 goto err_out_unlock;
725                         }
726                         queue = instance_create(queue_num, NETLINK_CB(skb).pid);
727                         if (!queue) {
728                                 ret = -EINVAL;
729                                 goto err_out_unlock;
730                         }
731                         break;
732                 case NFQNL_CFG_CMD_UNBIND:
733                         if (!queue) {
734                                 ret = -ENODEV;
735                                 goto err_out_unlock;
736                         }
737                         instance_destroy(queue);
738                         break;
739                 case NFQNL_CFG_CMD_PF_BIND:
740                 case NFQNL_CFG_CMD_PF_UNBIND:
741                         break;
742                 default:
743                         ret = -ENOTSUPP;
744                         break;
745                 }
746         }
747
748         if (nfqa[NFQA_CFG_PARAMS]) {
749                 struct nfqnl_msg_config_params *params;
750
751                 if (!queue) {
752                         ret = -ENODEV;
753                         goto err_out_unlock;
754                 }
755                 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
756                 nfqnl_set_mode(queue, params->copy_mode,
757                                 ntohl(params->copy_range));
758         }
759
760         if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
761                 __be32 *queue_maxlen;
762
763                 if (!queue) {
764                         ret = -ENODEV;
765                         goto err_out_unlock;
766                 }
767                 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
768                 spin_lock_bh(&queue->lock);
769                 queue->queue_maxlen = ntohl(*queue_maxlen);
770                 spin_unlock_bh(&queue->lock);
771         }
772
773 err_out_unlock:
774         rcu_read_unlock();
775         return ret;
776 }
777
778 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
779         [NFQNL_MSG_PACKET]      = { .call = nfqnl_recv_unsupp,
780                                     .attr_count = NFQA_MAX, },
781         [NFQNL_MSG_VERDICT]     = { .call = nfqnl_recv_verdict,
782                                     .attr_count = NFQA_MAX,
783                                     .policy = nfqa_verdict_policy },
784         [NFQNL_MSG_CONFIG]      = { .call = nfqnl_recv_config,
785                                     .attr_count = NFQA_CFG_MAX,
786                                     .policy = nfqa_cfg_policy },
787 };
788
789 static const struct nfnetlink_subsystem nfqnl_subsys = {
790         .name           = "nf_queue",
791         .subsys_id      = NFNL_SUBSYS_QUEUE,
792         .cb_count       = NFQNL_MSG_MAX,
793         .cb             = nfqnl_cb,
794 };
795
796 #ifdef CONFIG_PROC_FS
797 struct iter_state {
798         unsigned int bucket;
799 };
800
801 static struct hlist_node *get_first(struct seq_file *seq)
802 {
803         struct iter_state *st = seq->private;
804
805         if (!st)
806                 return NULL;
807
808         for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
809                 if (!hlist_empty(&instance_table[st->bucket]))
810                         return instance_table[st->bucket].first;
811         }
812         return NULL;
813 }
814
815 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
816 {
817         struct iter_state *st = seq->private;
818
819         h = h->next;
820         while (!h) {
821                 if (++st->bucket >= INSTANCE_BUCKETS)
822                         return NULL;
823
824                 h = instance_table[st->bucket].first;
825         }
826         return h;
827 }
828
829 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
830 {
831         struct hlist_node *head;
832         head = get_first(seq);
833
834         if (head)
835                 while (pos && (head = get_next(seq, head)))
836                         pos--;
837         return pos ? NULL : head;
838 }
839
840 static void *seq_start(struct seq_file *seq, loff_t *pos)
841 {
842         spin_lock(&instances_lock);
843         return get_idx(seq, *pos);
844 }
845
846 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
847 {
848         (*pos)++;
849         return get_next(s, v);
850 }
851
852 static void seq_stop(struct seq_file *s, void *v)
853 {
854         spin_unlock(&instances_lock);
855 }
856
857 static int seq_show(struct seq_file *s, void *v)
858 {
859         const struct nfqnl_instance *inst = v;
860
861         return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
862                           inst->queue_num,
863                           inst->peer_pid, inst->queue_total,
864                           inst->copy_mode, inst->copy_range,
865                           inst->queue_dropped, inst->queue_user_dropped,
866                           inst->id_sequence, 1);
867 }
868
869 static const struct seq_operations nfqnl_seq_ops = {
870         .start  = seq_start,
871         .next   = seq_next,
872         .stop   = seq_stop,
873         .show   = seq_show,
874 };
875
876 static int nfqnl_open(struct inode *inode, struct file *file)
877 {
878         return seq_open_private(file, &nfqnl_seq_ops,
879                         sizeof(struct iter_state));
880 }
881
882 static const struct file_operations nfqnl_file_ops = {
883         .owner   = THIS_MODULE,
884         .open    = nfqnl_open,
885         .read    = seq_read,
886         .llseek  = seq_lseek,
887         .release = seq_release_private,
888 };
889
890 #endif /* PROC_FS */
891
892 static int __init nfnetlink_queue_init(void)
893 {
894         int i, status = -ENOMEM;
895 #ifdef CONFIG_PROC_FS
896         struct proc_dir_entry *proc_nfqueue;
897 #endif
898
899         for (i = 0; i < INSTANCE_BUCKETS; i++)
900                 INIT_HLIST_HEAD(&instance_table[i]);
901
902         netlink_register_notifier(&nfqnl_rtnl_notifier);
903         status = nfnetlink_subsys_register(&nfqnl_subsys);
904         if (status < 0) {
905                 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
906                 goto cleanup_netlink_notifier;
907         }
908
909 #ifdef CONFIG_PROC_FS
910         proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440,
911                                          proc_net_netfilter);
912         if (!proc_nfqueue)
913                 goto cleanup_subsys;
914         proc_nfqueue->proc_fops = &nfqnl_file_ops;
915 #endif
916
917         register_netdevice_notifier(&nfqnl_dev_notifier);
918         return status;
919
920 #ifdef CONFIG_PROC_FS
921 cleanup_subsys:
922         nfnetlink_subsys_unregister(&nfqnl_subsys);
923 #endif
924 cleanup_netlink_notifier:
925         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
926         return status;
927 }
928
929 static void __exit nfnetlink_queue_fini(void)
930 {
931         nf_unregister_queue_handlers(&nfqh);
932         unregister_netdevice_notifier(&nfqnl_dev_notifier);
933 #ifdef CONFIG_PROC_FS
934         remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
935 #endif
936         nfnetlink_subsys_unregister(&nfqnl_subsys);
937         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
938 }
939
940 MODULE_DESCRIPTION("netfilter packet queue handler");
941 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
942 MODULE_LICENSE("GPL");
943 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
944
945 module_init(nfnetlink_queue_init);
946 module_exit(nfnetlink_queue_fini);