[NETFILTER]: {ip,ip6,nfnetlink}_queue: fix SKB_LINEAR_ASSERT when mangling packet...
[safe/jmp/linux-2.6] / net / netfilter / nfnetlink_queue.c
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfetlink.
4  *
5  * (C) 2005 by Harald Welte <laforge@netfilter.org>
6  * (C) 2007 by Patrick McHardy <kaber@trash.net>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/notifier.h>
22 #include <linux/netdevice.h>
23 #include <linux/netfilter.h>
24 #include <linux/proc_fs.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/netfilter_ipv6.h>
27 #include <linux/netfilter/nfnetlink.h>
28 #include <linux/netfilter/nfnetlink_queue.h>
29 #include <linux/list.h>
30 #include <net/sock.h>
31 #include <net/netfilter/nf_queue.h>
32
33 #include <asm/atomic.h>
34
35 #ifdef CONFIG_BRIDGE_NETFILTER
36 #include "../bridge/br_private.h"
37 #endif
38
39 #define NFQNL_QMAX_DEFAULT 1024
40
41 struct nfqnl_instance {
42         struct hlist_node hlist;                /* global list of queues */
43         struct rcu_head rcu;
44
45         int peer_pid;
46         unsigned int queue_maxlen;
47         unsigned int copy_range;
48         unsigned int queue_total;
49         unsigned int queue_dropped;
50         unsigned int queue_user_dropped;
51
52         unsigned int id_sequence;               /* 'sequence' of pkt ids */
53
54         u_int16_t queue_num;                    /* number of this queue */
55         u_int8_t copy_mode;
56
57         spinlock_t lock;
58
59         struct list_head queue_list;            /* packets in queue */
60 };
61
62 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
63
64 static DEFINE_SPINLOCK(instances_lock);
65
66 #define INSTANCE_BUCKETS        16
67 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
68
69 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
70 {
71         return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
72 }
73
74 static struct nfqnl_instance *
75 instance_lookup(u_int16_t queue_num)
76 {
77         struct hlist_head *head;
78         struct hlist_node *pos;
79         struct nfqnl_instance *inst;
80
81         head = &instance_table[instance_hashfn(queue_num)];
82         hlist_for_each_entry_rcu(inst, pos, head, hlist) {
83                 if (inst->queue_num == queue_num)
84                         return inst;
85         }
86         return NULL;
87 }
88
89 static struct nfqnl_instance *
90 instance_create(u_int16_t queue_num, int pid)
91 {
92         struct nfqnl_instance *inst;
93         unsigned int h;
94         int err;
95
96         spin_lock(&instances_lock);
97         if (instance_lookup(queue_num)) {
98                 err = -EEXIST;
99                 goto out_unlock;
100         }
101
102         inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
103         if (!inst) {
104                 err = -ENOMEM;
105                 goto out_unlock;
106         }
107
108         inst->queue_num = queue_num;
109         inst->peer_pid = pid;
110         inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
111         inst->copy_range = 0xfffff;
112         inst->copy_mode = NFQNL_COPY_NONE;
113         spin_lock_init(&inst->lock);
114         INIT_LIST_HEAD(&inst->queue_list);
115         INIT_RCU_HEAD(&inst->rcu);
116
117         if (!try_module_get(THIS_MODULE)) {
118                 err = -EAGAIN;
119                 goto out_free;
120         }
121
122         h = instance_hashfn(queue_num);
123         hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
124
125         spin_unlock(&instances_lock);
126
127         return inst;
128
129 out_free:
130         kfree(inst);
131 out_unlock:
132         spin_unlock(&instances_lock);
133         return ERR_PTR(err);
134 }
135
136 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
137                         unsigned long data);
138
139 static void
140 instance_destroy_rcu(struct rcu_head *head)
141 {
142         struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
143                                                    rcu);
144
145         nfqnl_flush(inst, NULL, 0);
146         kfree(inst);
147         module_put(THIS_MODULE);
148 }
149
150 static void
151 __instance_destroy(struct nfqnl_instance *inst)
152 {
153         hlist_del_rcu(&inst->hlist);
154         call_rcu(&inst->rcu, instance_destroy_rcu);
155 }
156
157 static void
158 instance_destroy(struct nfqnl_instance *inst)
159 {
160         spin_lock(&instances_lock);
161         __instance_destroy(inst);
162         spin_unlock(&instances_lock);
163 }
164
165 static inline void
166 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
167 {
168        list_add_tail(&entry->list, &queue->queue_list);
169        queue->queue_total++;
170 }
171
172 static struct nf_queue_entry *
173 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
174 {
175         struct nf_queue_entry *entry = NULL, *i;
176
177         spin_lock_bh(&queue->lock);
178
179         list_for_each_entry(i, &queue->queue_list, list) {
180                 if (i->id == id) {
181                         entry = i;
182                         break;
183                 }
184         }
185
186         if (entry) {
187                 list_del(&entry->list);
188                 queue->queue_total--;
189         }
190
191         spin_unlock_bh(&queue->lock);
192
193         return entry;
194 }
195
196 static void
197 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
198 {
199         struct nf_queue_entry *entry, *next;
200
201         spin_lock_bh(&queue->lock);
202         list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
203                 if (!cmpfn || cmpfn(entry, data)) {
204                         list_del(&entry->list);
205                         queue->queue_total--;
206                         nf_reinject(entry, NF_DROP);
207                 }
208         }
209         spin_unlock_bh(&queue->lock);
210 }
211
212 static struct sk_buff *
213 nfqnl_build_packet_message(struct nfqnl_instance *queue,
214                            struct nf_queue_entry *entry)
215 {
216         sk_buff_data_t old_tail;
217         size_t size;
218         size_t data_len = 0;
219         struct sk_buff *skb;
220         struct nfqnl_msg_packet_hdr pmsg;
221         struct nlmsghdr *nlh;
222         struct nfgenmsg *nfmsg;
223         struct sk_buff *entskb = entry->skb;
224         struct net_device *indev;
225         struct net_device *outdev;
226
227         size =    NLMSG_ALIGN(sizeof(struct nfgenmsg))
228                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
229                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
230                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
231 #ifdef CONFIG_BRIDGE_NETFILTER
232                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
233                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
234 #endif
235                 + nla_total_size(sizeof(u_int32_t))     /* mark */
236                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
237                 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
238
239         outdev = entry->outdev;
240
241         spin_lock_bh(&queue->lock);
242
243         switch ((enum nfqnl_config_mode)queue->copy_mode) {
244         case NFQNL_COPY_META:
245         case NFQNL_COPY_NONE:
246                 data_len = 0;
247                 break;
248
249         case NFQNL_COPY_PACKET:
250                 if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
251                      entskb->ip_summed == CHECKSUM_COMPLETE) &&
252                     skb_checksum_help(entskb)) {
253                         spin_unlock_bh(&queue->lock);
254                         return NULL;
255                 }
256                 if (queue->copy_range == 0
257                     || queue->copy_range > entskb->len)
258                         data_len = entskb->len;
259                 else
260                         data_len = queue->copy_range;
261
262                 size += nla_total_size(data_len);
263                 break;
264         }
265
266         entry->id = queue->id_sequence++;
267
268         spin_unlock_bh(&queue->lock);
269
270         skb = alloc_skb(size, GFP_ATOMIC);
271         if (!skb)
272                 goto nlmsg_failure;
273
274         old_tail = skb->tail;
275         nlh = NLMSG_PUT(skb, 0, 0,
276                         NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
277                         sizeof(struct nfgenmsg));
278         nfmsg = NLMSG_DATA(nlh);
279         nfmsg->nfgen_family = entry->pf;
280         nfmsg->version = NFNETLINK_V0;
281         nfmsg->res_id = htons(queue->queue_num);
282
283         pmsg.packet_id          = htonl(entry->id);
284         pmsg.hw_protocol        = entskb->protocol;
285         pmsg.hook               = entry->hook;
286
287         NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
288
289         indev = entry->indev;
290         if (indev) {
291 #ifndef CONFIG_BRIDGE_NETFILTER
292                 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
293 #else
294                 if (entry->pf == PF_BRIDGE) {
295                         /* Case 1: indev is physical input device, we need to
296                          * look for bridge group (when called from
297                          * netfilter_bridge) */
298                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
299                                      htonl(indev->ifindex));
300                         /* this is the bridge group "brX" */
301                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
302                                      htonl(indev->br_port->br->dev->ifindex));
303                 } else {
304                         /* Case 2: indev is bridge group, we need to look for
305                          * physical device (when called from ipv4) */
306                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
307                                      htonl(indev->ifindex));
308                         if (entskb->nf_bridge && entskb->nf_bridge->physindev)
309                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
310                                              htonl(entskb->nf_bridge->physindev->ifindex));
311                 }
312 #endif
313         }
314
315         if (outdev) {
316 #ifndef CONFIG_BRIDGE_NETFILTER
317                 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
318 #else
319                 if (entry->pf == PF_BRIDGE) {
320                         /* Case 1: outdev is physical output device, we need to
321                          * look for bridge group (when called from
322                          * netfilter_bridge) */
323                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
324                                      htonl(outdev->ifindex));
325                         /* this is the bridge group "brX" */
326                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
327                                      htonl(outdev->br_port->br->dev->ifindex));
328                 } else {
329                         /* Case 2: outdev is bridge group, we need to look for
330                          * physical output device (when called from ipv4) */
331                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
332                                      htonl(outdev->ifindex));
333                         if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
334                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
335                                              htonl(entskb->nf_bridge->physoutdev->ifindex));
336                 }
337 #endif
338         }
339
340         if (entskb->mark)
341                 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
342
343         if (indev && entskb->dev) {
344                 struct nfqnl_msg_packet_hw phw;
345                 int len = dev_parse_header(entskb, phw.hw_addr);
346                 if (len) {
347                         phw.hw_addrlen = htons(len);
348                         NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
349                 }
350         }
351
352         if (entskb->tstamp.tv64) {
353                 struct nfqnl_msg_packet_timestamp ts;
354                 struct timeval tv = ktime_to_timeval(entskb->tstamp);
355                 ts.sec = cpu_to_be64(tv.tv_sec);
356                 ts.usec = cpu_to_be64(tv.tv_usec);
357
358                 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
359         }
360
361         if (data_len) {
362                 struct nlattr *nla;
363                 int sz = nla_attr_size(data_len);
364
365                 if (skb_tailroom(skb) < nla_total_size(data_len)) {
366                         printk(KERN_WARNING "nf_queue: no tailroom!\n");
367                         goto nlmsg_failure;
368                 }
369
370                 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
371                 nla->nla_type = NFQA_PAYLOAD;
372                 nla->nla_len = sz;
373
374                 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
375                         BUG();
376         }
377
378         nlh->nlmsg_len = skb->tail - old_tail;
379         return skb;
380
381 nlmsg_failure:
382 nla_put_failure:
383         if (skb)
384                 kfree_skb(skb);
385         if (net_ratelimit())
386                 printk(KERN_ERR "nf_queue: error creating packet message\n");
387         return NULL;
388 }
389
390 static int
391 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
392 {
393         struct sk_buff *nskb;
394         struct nfqnl_instance *queue;
395         int err;
396
397         /* rcu_read_lock()ed by nf_hook_slow() */
398         queue = instance_lookup(queuenum);
399         if (!queue)
400                 goto err_out;
401
402         if (queue->copy_mode == NFQNL_COPY_NONE)
403                 goto err_out;
404
405         nskb = nfqnl_build_packet_message(queue, entry);
406         if (nskb == NULL)
407                 goto err_out;
408
409         spin_lock_bh(&queue->lock);
410
411         if (!queue->peer_pid)
412                 goto err_out_free_nskb;
413
414         if (queue->queue_total >= queue->queue_maxlen) {
415                 queue->queue_dropped++;
416                 if (net_ratelimit())
417                           printk(KERN_WARNING "nf_queue: full at %d entries, "
418                                  "dropping packets(s). Dropped: %d\n",
419                                  queue->queue_total, queue->queue_dropped);
420                 goto err_out_free_nskb;
421         }
422
423         /* nfnetlink_unicast will either free the nskb or add it to a socket */
424         err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
425         if (err < 0) {
426                 queue->queue_user_dropped++;
427                 goto err_out_unlock;
428         }
429
430         __enqueue_entry(queue, entry);
431
432         spin_unlock_bh(&queue->lock);
433         return 0;
434
435 err_out_free_nskb:
436         kfree_skb(nskb);
437 err_out_unlock:
438         spin_unlock_bh(&queue->lock);
439 err_out:
440         return -1;
441 }
442
443 static int
444 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
445 {
446         struct sk_buff *nskb;
447         int diff;
448
449         diff = data_len - e->skb->len;
450         if (diff < 0) {
451                 if (pskb_trim(e->skb, data_len))
452                         return -ENOMEM;
453         } else if (diff > 0) {
454                 if (data_len > 0xFFFF)
455                         return -EINVAL;
456                 if (diff > skb_tailroom(e->skb)) {
457                         nskb = skb_copy_expand(e->skb, 0,
458                                                diff - skb_tailroom(e->skb),
459                                                GFP_ATOMIC);
460                         if (!nskb) {
461                                 printk(KERN_WARNING "nf_queue: OOM "
462                                       "in mangle, dropping packet\n");
463                                 return -ENOMEM;
464                         }
465                         kfree_skb(e->skb);
466                         e->skb = nskb;
467                 }
468                 skb_put(e->skb, diff);
469         }
470         if (!skb_make_writable(e->skb, data_len))
471                 return -ENOMEM;
472         skb_copy_to_linear_data(e->skb, data, data_len);
473         e->skb->ip_summed = CHECKSUM_NONE;
474         return 0;
475 }
476
477 static int
478 nfqnl_set_mode(struct nfqnl_instance *queue,
479                unsigned char mode, unsigned int range)
480 {
481         int status = 0;
482
483         spin_lock_bh(&queue->lock);
484         switch (mode) {
485         case NFQNL_COPY_NONE:
486         case NFQNL_COPY_META:
487                 queue->copy_mode = mode;
488                 queue->copy_range = 0;
489                 break;
490
491         case NFQNL_COPY_PACKET:
492                 queue->copy_mode = mode;
493                 /* we're using struct nlattr which has 16bit nla_len */
494                 if (range > 0xffff)
495                         queue->copy_range = 0xffff;
496                 else
497                         queue->copy_range = range;
498                 break;
499
500         default:
501                 status = -EINVAL;
502
503         }
504         spin_unlock_bh(&queue->lock);
505
506         return status;
507 }
508
509 static int
510 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
511 {
512         if (entry->indev)
513                 if (entry->indev->ifindex == ifindex)
514                         return 1;
515         if (entry->outdev)
516                 if (entry->outdev->ifindex == ifindex)
517                         return 1;
518 #ifdef CONFIG_BRIDGE_NETFILTER
519         if (entry->skb->nf_bridge) {
520                 if (entry->skb->nf_bridge->physindev &&
521                     entry->skb->nf_bridge->physindev->ifindex == ifindex)
522                         return 1;
523                 if (entry->skb->nf_bridge->physoutdev &&
524                     entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
525                         return 1;
526         }
527 #endif
528         return 0;
529 }
530
531 /* drop all packets with either indev or outdev == ifindex from all queue
532  * instances */
533 static void
534 nfqnl_dev_drop(int ifindex)
535 {
536         int i;
537
538         rcu_read_lock();
539
540         for (i = 0; i < INSTANCE_BUCKETS; i++) {
541                 struct hlist_node *tmp;
542                 struct nfqnl_instance *inst;
543                 struct hlist_head *head = &instance_table[i];
544
545                 hlist_for_each_entry_rcu(inst, tmp, head, hlist)
546                         nfqnl_flush(inst, dev_cmp, ifindex);
547         }
548
549         rcu_read_unlock();
550 }
551
552 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
553
554 static int
555 nfqnl_rcv_dev_event(struct notifier_block *this,
556                     unsigned long event, void *ptr)
557 {
558         struct net_device *dev = ptr;
559
560         if (dev->nd_net != &init_net)
561                 return NOTIFY_DONE;
562
563         /* Drop any packets associated with the downed device */
564         if (event == NETDEV_DOWN)
565                 nfqnl_dev_drop(dev->ifindex);
566         return NOTIFY_DONE;
567 }
568
569 static struct notifier_block nfqnl_dev_notifier = {
570         .notifier_call  = nfqnl_rcv_dev_event,
571 };
572
573 static int
574 nfqnl_rcv_nl_event(struct notifier_block *this,
575                    unsigned long event, void *ptr)
576 {
577         struct netlink_notify *n = ptr;
578
579         if (event == NETLINK_URELEASE &&
580             n->protocol == NETLINK_NETFILTER && n->pid) {
581                 int i;
582
583                 /* destroy all instances for this pid */
584                 spin_lock(&instances_lock);
585                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
586                         struct hlist_node *tmp, *t2;
587                         struct nfqnl_instance *inst;
588                         struct hlist_head *head = &instance_table[i];
589
590                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
591                                 if ((n->net == &init_net) &&
592                                     (n->pid == inst->peer_pid))
593                                         __instance_destroy(inst);
594                         }
595                 }
596                 spin_unlock(&instances_lock);
597         }
598         return NOTIFY_DONE;
599 }
600
601 static struct notifier_block nfqnl_rtnl_notifier = {
602         .notifier_call  = nfqnl_rcv_nl_event,
603 };
604
605 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
606         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
607         [NFQA_MARK]             = { .type = NLA_U32 },
608         [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
609 };
610
611 static int
612 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
613                    struct nlmsghdr *nlh, struct nlattr *nfqa[])
614 {
615         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
616         u_int16_t queue_num = ntohs(nfmsg->res_id);
617
618         struct nfqnl_msg_verdict_hdr *vhdr;
619         struct nfqnl_instance *queue;
620         unsigned int verdict;
621         struct nf_queue_entry *entry;
622         int err;
623
624         rcu_read_lock();
625         queue = instance_lookup(queue_num);
626         if (!queue) {
627                 err = -ENODEV;
628                 goto err_out_unlock;
629         }
630
631         if (queue->peer_pid != NETLINK_CB(skb).pid) {
632                 err = -EPERM;
633                 goto err_out_unlock;
634         }
635
636         if (!nfqa[NFQA_VERDICT_HDR]) {
637                 err = -EINVAL;
638                 goto err_out_unlock;
639         }
640
641         vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
642         verdict = ntohl(vhdr->verdict);
643
644         if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
645                 err = -EINVAL;
646                 goto err_out_unlock;
647         }
648
649         entry = find_dequeue_entry(queue, ntohl(vhdr->id));
650         if (entry == NULL) {
651                 err = -ENOENT;
652                 goto err_out_unlock;
653         }
654         rcu_read_unlock();
655
656         if (nfqa[NFQA_PAYLOAD]) {
657                 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
658                                  nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
659                         verdict = NF_DROP;
660         }
661
662         if (nfqa[NFQA_MARK])
663                 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
664
665         nf_reinject(entry, verdict);
666         return 0;
667
668 err_out_unlock:
669         rcu_read_unlock();
670         return err;
671 }
672
673 static int
674 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
675                   struct nlmsghdr *nlh, struct nlattr *nfqa[])
676 {
677         return -ENOTSUPP;
678 }
679
680 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
681         [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
682         [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
683 };
684
685 static const struct nf_queue_handler nfqh = {
686         .name   = "nf_queue",
687         .outfn  = &nfqnl_enqueue_packet,
688 };
689
690 static int
691 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
692                   struct nlmsghdr *nlh, struct nlattr *nfqa[])
693 {
694         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
695         u_int16_t queue_num = ntohs(nfmsg->res_id);
696         struct nfqnl_instance *queue;
697         struct nfqnl_msg_config_cmd *cmd = NULL;
698         int ret = 0;
699
700         if (nfqa[NFQA_CFG_CMD]) {
701                 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
702
703                 /* Commands without queue context - might sleep */
704                 switch (cmd->command) {
705                 case NFQNL_CFG_CMD_PF_BIND:
706                         ret = nf_register_queue_handler(ntohs(cmd->pf),
707                                                         &nfqh);
708                         break;
709                 case NFQNL_CFG_CMD_PF_UNBIND:
710                         ret = nf_unregister_queue_handler(ntohs(cmd->pf),
711                                                           &nfqh);
712                         break;
713                 default:
714                         break;
715                 }
716
717                 if (ret < 0)
718                         return ret;
719         }
720
721         rcu_read_lock();
722         queue = instance_lookup(queue_num);
723         if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
724                 ret = -EPERM;
725                 goto err_out_unlock;
726         }
727
728         if (cmd != NULL) {
729                 switch (cmd->command) {
730                 case NFQNL_CFG_CMD_BIND:
731                         if (queue) {
732                                 ret = -EBUSY;
733                                 goto err_out_unlock;
734                         }
735                         queue = instance_create(queue_num, NETLINK_CB(skb).pid);
736                         if (IS_ERR(queue)) {
737                                 ret = PTR_ERR(queue);
738                                 goto err_out_unlock;
739                         }
740                         break;
741                 case NFQNL_CFG_CMD_UNBIND:
742                         if (!queue) {
743                                 ret = -ENODEV;
744                                 goto err_out_unlock;
745                         }
746                         instance_destroy(queue);
747                         break;
748                 case NFQNL_CFG_CMD_PF_BIND:
749                 case NFQNL_CFG_CMD_PF_UNBIND:
750                         break;
751                 default:
752                         ret = -ENOTSUPP;
753                         break;
754                 }
755         }
756
757         if (nfqa[NFQA_CFG_PARAMS]) {
758                 struct nfqnl_msg_config_params *params;
759
760                 if (!queue) {
761                         ret = -ENODEV;
762                         goto err_out_unlock;
763                 }
764                 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
765                 nfqnl_set_mode(queue, params->copy_mode,
766                                 ntohl(params->copy_range));
767         }
768
769         if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
770                 __be32 *queue_maxlen;
771
772                 if (!queue) {
773                         ret = -ENODEV;
774                         goto err_out_unlock;
775                 }
776                 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
777                 spin_lock_bh(&queue->lock);
778                 queue->queue_maxlen = ntohl(*queue_maxlen);
779                 spin_unlock_bh(&queue->lock);
780         }
781
782 err_out_unlock:
783         rcu_read_unlock();
784         return ret;
785 }
786
787 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
788         [NFQNL_MSG_PACKET]      = { .call = nfqnl_recv_unsupp,
789                                     .attr_count = NFQA_MAX, },
790         [NFQNL_MSG_VERDICT]     = { .call = nfqnl_recv_verdict,
791                                     .attr_count = NFQA_MAX,
792                                     .policy = nfqa_verdict_policy },
793         [NFQNL_MSG_CONFIG]      = { .call = nfqnl_recv_config,
794                                     .attr_count = NFQA_CFG_MAX,
795                                     .policy = nfqa_cfg_policy },
796 };
797
798 static const struct nfnetlink_subsystem nfqnl_subsys = {
799         .name           = "nf_queue",
800         .subsys_id      = NFNL_SUBSYS_QUEUE,
801         .cb_count       = NFQNL_MSG_MAX,
802         .cb             = nfqnl_cb,
803 };
804
805 #ifdef CONFIG_PROC_FS
806 struct iter_state {
807         unsigned int bucket;
808 };
809
810 static struct hlist_node *get_first(struct seq_file *seq)
811 {
812         struct iter_state *st = seq->private;
813
814         if (!st)
815                 return NULL;
816
817         for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
818                 if (!hlist_empty(&instance_table[st->bucket]))
819                         return instance_table[st->bucket].first;
820         }
821         return NULL;
822 }
823
824 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
825 {
826         struct iter_state *st = seq->private;
827
828         h = h->next;
829         while (!h) {
830                 if (++st->bucket >= INSTANCE_BUCKETS)
831                         return NULL;
832
833                 h = instance_table[st->bucket].first;
834         }
835         return h;
836 }
837
838 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
839 {
840         struct hlist_node *head;
841         head = get_first(seq);
842
843         if (head)
844                 while (pos && (head = get_next(seq, head)))
845                         pos--;
846         return pos ? NULL : head;
847 }
848
849 static void *seq_start(struct seq_file *seq, loff_t *pos)
850         __acquires(instances_lock)
851 {
852         spin_lock(&instances_lock);
853         return get_idx(seq, *pos);
854 }
855
856 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
857 {
858         (*pos)++;
859         return get_next(s, v);
860 }
861
862 static void seq_stop(struct seq_file *s, void *v)
863         __releases(instances_lock)
864 {
865         spin_unlock(&instances_lock);
866 }
867
868 static int seq_show(struct seq_file *s, void *v)
869 {
870         const struct nfqnl_instance *inst = v;
871
872         return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
873                           inst->queue_num,
874                           inst->peer_pid, inst->queue_total,
875                           inst->copy_mode, inst->copy_range,
876                           inst->queue_dropped, inst->queue_user_dropped,
877                           inst->id_sequence, 1);
878 }
879
880 static const struct seq_operations nfqnl_seq_ops = {
881         .start  = seq_start,
882         .next   = seq_next,
883         .stop   = seq_stop,
884         .show   = seq_show,
885 };
886
887 static int nfqnl_open(struct inode *inode, struct file *file)
888 {
889         return seq_open_private(file, &nfqnl_seq_ops,
890                         sizeof(struct iter_state));
891 }
892
893 static const struct file_operations nfqnl_file_ops = {
894         .owner   = THIS_MODULE,
895         .open    = nfqnl_open,
896         .read    = seq_read,
897         .llseek  = seq_lseek,
898         .release = seq_release_private,
899 };
900
901 #endif /* PROC_FS */
902
903 static int __init nfnetlink_queue_init(void)
904 {
905         int i, status = -ENOMEM;
906 #ifdef CONFIG_PROC_FS
907         struct proc_dir_entry *proc_nfqueue;
908 #endif
909
910         for (i = 0; i < INSTANCE_BUCKETS; i++)
911                 INIT_HLIST_HEAD(&instance_table[i]);
912
913         netlink_register_notifier(&nfqnl_rtnl_notifier);
914         status = nfnetlink_subsys_register(&nfqnl_subsys);
915         if (status < 0) {
916                 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
917                 goto cleanup_netlink_notifier;
918         }
919
920 #ifdef CONFIG_PROC_FS
921         proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440,
922                                          proc_net_netfilter);
923         if (!proc_nfqueue)
924                 goto cleanup_subsys;
925         proc_nfqueue->proc_fops = &nfqnl_file_ops;
926 #endif
927
928         register_netdevice_notifier(&nfqnl_dev_notifier);
929         return status;
930
931 #ifdef CONFIG_PROC_FS
932 cleanup_subsys:
933         nfnetlink_subsys_unregister(&nfqnl_subsys);
934 #endif
935 cleanup_netlink_notifier:
936         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
937         return status;
938 }
939
940 static void __exit nfnetlink_queue_fini(void)
941 {
942         nf_unregister_queue_handlers(&nfqh);
943         unregister_netdevice_notifier(&nfqnl_dev_notifier);
944 #ifdef CONFIG_PROC_FS
945         remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
946 #endif
947         nfnetlink_subsys_unregister(&nfqnl_subsys);
948         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
949 }
950
951 MODULE_DESCRIPTION("netfilter packet queue handler");
952 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
953 MODULE_LICENSE("GPL");
954 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
955
956 module_init(nfnetlink_queue_init);
957 module_exit(nfnetlink_queue_fini);