2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 06 Jun 2002 Andras Kis-Szabo <kisza@sch.bme.hu>
15 * - new extension header parser code
16 * 15 Oct 2005 Harald Welte <laforge@netfilter.org>
17 * - Unification of {ip,ip6}_tables into x_tables
18 * - Removed tcp and udp code, since it's not ipv6 specific
21 #include <linux/capability.h>
22 #include <linux/config.h>
24 #include <linux/skbuff.h>
25 #include <linux/kmod.h>
26 #include <linux/vmalloc.h>
27 #include <linux/netdevice.h>
28 #include <linux/module.h>
29 #include <linux/icmpv6.h>
31 #include <asm/uaccess.h>
32 #include <asm/semaphore.h>
33 #include <linux/proc_fs.h>
34 #include <linux/cpumask.h>
36 #include <linux/netfilter_ipv6/ip6_tables.h>
37 #include <linux/netfilter/x_tables.h>
39 MODULE_LICENSE("GPL");
40 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
41 MODULE_DESCRIPTION("IPv6 packet filter");
43 #define IPV6_HDR_LEN (sizeof(struct ipv6hdr))
44 #define IPV6_OPTHDR_LEN (sizeof(struct ipv6_opt_hdr))
46 /*#define DEBUG_IP_FIREWALL*/
47 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
48 /*#define DEBUG_IP_FIREWALL_USER*/
50 #ifdef DEBUG_IP_FIREWALL
51 #define dprintf(format, args...) printk(format , ## args)
53 #define dprintf(format, args...)
56 #ifdef DEBUG_IP_FIREWALL_USER
57 #define duprintf(format, args...) printk(format , ## args)
59 #define duprintf(format, args...)
62 #ifdef CONFIG_NETFILTER_DEBUG
63 #define IP_NF_ASSERT(x) \
66 printk("IP_NF_ASSERT: %s:%s:%u\n", \
67 __FUNCTION__, __FILE__, __LINE__); \
70 #define IP_NF_ASSERT(x)
74 #include <linux/netfilter_ipv4/listhelp.h>
77 /* All the better to debug you with... */
83 We keep a set of rules for each CPU, so we can avoid write-locking
84 them in the softirq when updating the counters and therefore
85 only need to read-lock in the softirq; doing a write_lock_bh() in user
86 context stops packets coming through and allows user context to read
87 the counters or update the rules.
89 Hence the start of any table is given by get_table() below. */
92 #define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
93 #define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
94 #define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
98 ip6_masked_addrcmp(const struct in6_addr *addr1, const struct in6_addr *mask,
99 const struct in6_addr *addr2)
102 for( i = 0; i < 16; i++){
103 if((addr1->s6_addr[i] & mask->s6_addr[i]) !=
104 (addr2->s6_addr[i] & mask->s6_addr[i]))
110 /* Check for an extension */
112 ip6t_ext_hdr(u8 nexthdr)
114 return ( (nexthdr == IPPROTO_HOPOPTS) ||
115 (nexthdr == IPPROTO_ROUTING) ||
116 (nexthdr == IPPROTO_FRAGMENT) ||
117 (nexthdr == IPPROTO_ESP) ||
118 (nexthdr == IPPROTO_AH) ||
119 (nexthdr == IPPROTO_NONE) ||
120 (nexthdr == IPPROTO_DSTOPTS) );
123 /* Returns whether matches rule or not. */
125 ip6_packet_match(const struct sk_buff *skb,
128 const struct ip6t_ip6 *ip6info,
129 unsigned int *protoff,
134 const struct ipv6hdr *ipv6 = skb->nh.ipv6h;
136 #define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg))
138 if (FWINV(ip6_masked_addrcmp(&ipv6->saddr, &ip6info->smsk,
139 &ip6info->src), IP6T_INV_SRCIP)
140 || FWINV(ip6_masked_addrcmp(&ipv6->daddr, &ip6info->dmsk,
141 &ip6info->dst), IP6T_INV_DSTIP)) {
142 dprintf("Source or dest mismatch.\n");
144 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
145 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
146 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
147 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
148 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
149 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
153 /* Look for ifname matches; this should unroll nicely. */
154 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
155 ret |= (((const unsigned long *)indev)[i]
156 ^ ((const unsigned long *)ip6info->iniface)[i])
157 & ((const unsigned long *)ip6info->iniface_mask)[i];
160 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
161 dprintf("VIA in mismatch (%s vs %s).%s\n",
162 indev, ip6info->iniface,
163 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
167 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
168 ret |= (((const unsigned long *)outdev)[i]
169 ^ ((const unsigned long *)ip6info->outiface)[i])
170 & ((const unsigned long *)ip6info->outiface_mask)[i];
173 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
174 dprintf("VIA out mismatch (%s vs %s).%s\n",
175 outdev, ip6info->outiface,
176 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
180 /* ... might want to do something with class and flowlabel here ... */
182 /* look for the desired protocol header */
183 if((ip6info->flags & IP6T_F_PROTO)) {
185 unsigned short _frag_off;
187 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
191 *fragoff = _frag_off;
193 dprintf("Packet protocol %hi ?= %s%hi.\n",
195 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
198 if (ip6info->proto == protohdr) {
199 if(ip6info->invflags & IP6T_INV_PROTO) {
205 /* We need match for the '-p all', too! */
206 if ((ip6info->proto != 0) &&
207 !(ip6info->invflags & IP6T_INV_PROTO))
213 /* should be ip6 safe */
215 ip6_checkentry(const struct ip6t_ip6 *ipv6)
217 if (ipv6->flags & ~IP6T_F_MASK) {
218 duprintf("Unknown flag bits set: %08X\n",
219 ipv6->flags & ~IP6T_F_MASK);
222 if (ipv6->invflags & ~IP6T_INV_MASK) {
223 duprintf("Unknown invflag bits set: %08X\n",
224 ipv6->invflags & ~IP6T_INV_MASK);
231 ip6t_error(struct sk_buff **pskb,
232 const struct net_device *in,
233 const struct net_device *out,
234 unsigned int hooknum,
235 const void *targinfo,
239 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
245 int do_match(struct ip6t_entry_match *m,
246 const struct sk_buff *skb,
247 const struct net_device *in,
248 const struct net_device *out,
250 unsigned int protoff,
253 /* Stop iteration if it doesn't match */
254 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
255 offset, protoff, hotdrop))
261 static inline struct ip6t_entry *
262 get_entry(void *base, unsigned int offset)
264 return (struct ip6t_entry *)(base + offset);
267 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
269 ip6t_do_table(struct sk_buff **pskb,
271 const struct net_device *in,
272 const struct net_device *out,
273 struct xt_table *table,
276 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
278 unsigned int protoff = 0;
280 /* Initializing verdict to NF_DROP keeps gcc happy. */
281 unsigned int verdict = NF_DROP;
282 const char *indev, *outdev;
284 struct ip6t_entry *e, *back;
285 struct xt_table_info *private;
288 indev = in ? in->name : nulldevname;
289 outdev = out ? out->name : nulldevname;
290 /* We handle fragments by dealing with the first fragment as
291 * if it was a normal packet. All other fragments are treated
292 * normally, except that they will NEVER match rules that ask
293 * things we don't know, ie. tcp syn flag or ports). If the
294 * rule is also a fragment-specific rule, non-fragments won't
297 read_lock_bh(&table->lock);
298 private = table->private;
299 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
300 table_base = (void *)private->entries[smp_processor_id()];
301 e = get_entry(table_base, private->hook_entry[hook]);
303 #ifdef CONFIG_NETFILTER_DEBUG
304 /* Check noone else using our table */
305 if (((struct ip6t_entry *)table_base)->comefrom != 0xdead57ac
306 && ((struct ip6t_entry *)table_base)->comefrom != 0xeeeeeeec) {
307 printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
310 &((struct ip6t_entry *)table_base)->comefrom,
311 ((struct ip6t_entry *)table_base)->comefrom);
313 ((struct ip6t_entry *)table_base)->comefrom = 0x57acc001;
316 /* For return from builtin chain */
317 back = get_entry(table_base, private->underflow[hook]);
322 if (ip6_packet_match(*pskb, indev, outdev, &e->ipv6,
323 &protoff, &offset)) {
324 struct ip6t_entry_target *t;
326 if (IP6T_MATCH_ITERATE(e, do_match,
328 offset, protoff, &hotdrop) != 0)
331 ADD_COUNTER(e->counters,
332 ntohs((*pskb)->nh.ipv6h->payload_len)
336 t = ip6t_get_target(e);
337 IP_NF_ASSERT(t->u.kernel.target);
338 /* Standard target? */
339 if (!t->u.kernel.target->target) {
342 v = ((struct ip6t_standard_target *)t)->verdict;
344 /* Pop from stack? */
345 if (v != IP6T_RETURN) {
346 verdict = (unsigned)(-v) - 1;
350 back = get_entry(table_base,
354 if (table_base + v != (void *)e + e->next_offset
355 && !(e->ipv6.flags & IP6T_F_GOTO)) {
356 /* Save old back ptr in next entry */
357 struct ip6t_entry *next
358 = (void *)e + e->next_offset;
360 = (void *)back - table_base;
361 /* set back pointer to next entry */
365 e = get_entry(table_base, v);
367 /* Targets which reenter must return
369 #ifdef CONFIG_NETFILTER_DEBUG
370 ((struct ip6t_entry *)table_base)->comefrom
373 verdict = t->u.kernel.target->target(pskb,
380 #ifdef CONFIG_NETFILTER_DEBUG
381 if (((struct ip6t_entry *)table_base)->comefrom
383 && verdict == IP6T_CONTINUE) {
384 printk("Target %s reentered!\n",
385 t->u.kernel.target->name);
388 ((struct ip6t_entry *)table_base)->comefrom
391 if (verdict == IP6T_CONTINUE)
392 e = (void *)e + e->next_offset;
400 e = (void *)e + e->next_offset;
404 #ifdef CONFIG_NETFILTER_DEBUG
405 ((struct ip6t_entry *)table_base)->comefrom = 0xdead57ac;
407 read_unlock_bh(&table->lock);
409 #ifdef DEBUG_ALLOW_ALL
418 /* All zeroes == unconditional rule. */
420 unconditional(const struct ip6t_ip6 *ipv6)
424 for (i = 0; i < sizeof(*ipv6); i++)
425 if (((char *)ipv6)[i])
428 return (i == sizeof(*ipv6));
431 /* Figures out from what hook each rule can be called: returns 0 if
432 there are loops. Puts hook bitmask in comefrom. */
434 mark_source_chains(struct xt_table_info *newinfo,
435 unsigned int valid_hooks, void *entry0)
439 /* No recursion; use packet counter to save back ptrs (reset
440 to 0 as we leave), and comefrom to save source hook bitmask */
441 for (hook = 0; hook < NF_IP6_NUMHOOKS; hook++) {
442 unsigned int pos = newinfo->hook_entry[hook];
444 = (struct ip6t_entry *)(entry0 + pos);
446 if (!(valid_hooks & (1 << hook)))
449 /* Set initial back pointer. */
450 e->counters.pcnt = pos;
453 struct ip6t_standard_target *t
454 = (void *)ip6t_get_target(e);
456 if (e->comefrom & (1 << NF_IP6_NUMHOOKS)) {
457 printk("iptables: loop hook %u pos %u %08X.\n",
458 hook, pos, e->comefrom);
462 |= ((1 << hook) | (1 << NF_IP6_NUMHOOKS));
464 /* Unconditional return/END. */
465 if (e->target_offset == sizeof(struct ip6t_entry)
466 && (strcmp(t->target.u.user.name,
467 IP6T_STANDARD_TARGET) == 0)
469 && unconditional(&e->ipv6)) {
470 unsigned int oldpos, size;
472 /* Return: backtrack through the last
475 e->comefrom ^= (1<<NF_IP6_NUMHOOKS);
476 #ifdef DEBUG_IP_FIREWALL_USER
478 & (1 << NF_IP6_NUMHOOKS)) {
479 duprintf("Back unset "
486 pos = e->counters.pcnt;
487 e->counters.pcnt = 0;
489 /* We're at the start. */
493 e = (struct ip6t_entry *)
495 } while (oldpos == pos + e->next_offset);
498 size = e->next_offset;
499 e = (struct ip6t_entry *)
500 (entry0 + pos + size);
501 e->counters.pcnt = pos;
504 int newpos = t->verdict;
506 if (strcmp(t->target.u.user.name,
507 IP6T_STANDARD_TARGET) == 0
509 /* This a jump; chase it. */
510 duprintf("Jump rule %u -> %u\n",
513 /* ... this is a fallthru */
514 newpos = pos + e->next_offset;
516 e = (struct ip6t_entry *)
518 e->counters.pcnt = pos;
523 duprintf("Finished chain %u\n", hook);
529 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
531 if (i && (*i)-- == 0)
534 if (m->u.kernel.match->destroy)
535 m->u.kernel.match->destroy(m->u.kernel.match, m->data,
536 m->u.match_size - sizeof(*m));
537 module_put(m->u.kernel.match->me);
542 standard_check(const struct ip6t_entry_target *t,
543 unsigned int max_offset)
545 struct ip6t_standard_target *targ = (void *)t;
547 /* Check standard info. */
548 if (targ->verdict >= 0
549 && targ->verdict > max_offset - sizeof(struct ip6t_entry)) {
550 duprintf("ip6t_standard_check: bad verdict (%i)\n",
554 if (targ->verdict < -NF_MAX_VERDICT - 1) {
555 duprintf("ip6t_standard_check: bad negative verdict (%i)\n",
563 check_match(struct ip6t_entry_match *m,
565 const struct ip6t_ip6 *ipv6,
566 unsigned int hookmask,
569 struct ip6t_match *match;
572 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
574 "ip6t_%s", m->u.user.name);
575 if (IS_ERR(match) || !match) {
576 duprintf("check_match: `%s' not found\n", m->u.user.name);
577 return match ? PTR_ERR(match) : -ENOENT;
579 m->u.kernel.match = match;
581 ret = xt_check_match(match, AF_INET6, m->u.match_size - sizeof(*m),
582 name, hookmask, ipv6->proto,
583 ipv6->invflags & IP6T_INV_PROTO);
587 if (m->u.kernel.match->checkentry
588 && !m->u.kernel.match->checkentry(name, ipv6, match, m->data,
589 m->u.match_size - sizeof(*m),
591 duprintf("ip_tables: check failed for `%s'.\n",
592 m->u.kernel.match->name);
600 module_put(m->u.kernel.match->me);
604 static struct ip6t_target ip6t_standard_target;
607 check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
610 struct ip6t_entry_target *t;
611 struct ip6t_target *target;
615 if (!ip6_checkentry(&e->ipv6)) {
616 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
621 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6, e->comefrom, &j);
623 goto cleanup_matches;
625 t = ip6t_get_target(e);
626 target = try_then_request_module(xt_find_target(AF_INET6,
629 "ip6t_%s", t->u.user.name);
630 if (IS_ERR(target) || !target) {
631 duprintf("check_entry: `%s' not found\n", t->u.user.name);
632 ret = target ? PTR_ERR(target) : -ENOENT;
633 goto cleanup_matches;
635 t->u.kernel.target = target;
637 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
638 name, e->comefrom, e->ipv6.proto,
639 e->ipv6.invflags & IP6T_INV_PROTO);
643 if (t->u.kernel.target == &ip6t_standard_target) {
644 if (!standard_check(t, size)) {
646 goto cleanup_matches;
648 } else if (t->u.kernel.target->checkentry
649 && !t->u.kernel.target->checkentry(name, e, target, t->data,
653 duprintf("ip_tables: check failed for `%s'.\n",
654 t->u.kernel.target->name);
662 module_put(t->u.kernel.target->me);
664 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
669 check_entry_size_and_hooks(struct ip6t_entry *e,
670 struct xt_table_info *newinfo,
672 unsigned char *limit,
673 const unsigned int *hook_entries,
674 const unsigned int *underflows,
679 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
680 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
681 duprintf("Bad offset %p\n", e);
686 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
687 duprintf("checking: element %p size %u\n",
692 /* Check hooks & underflows */
693 for (h = 0; h < NF_IP6_NUMHOOKS; h++) {
694 if ((unsigned char *)e - base == hook_entries[h])
695 newinfo->hook_entry[h] = hook_entries[h];
696 if ((unsigned char *)e - base == underflows[h])
697 newinfo->underflow[h] = underflows[h];
700 /* FIXME: underflows must be unconditional, standard verdicts
701 < 0 (not IP6T_RETURN). --RR */
703 /* Clear counters and comefrom */
704 e->counters = ((struct xt_counters) { 0, 0 });
712 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
714 struct ip6t_entry_target *t;
716 if (i && (*i)-- == 0)
719 /* Cleanup all matches */
720 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
721 t = ip6t_get_target(e);
722 if (t->u.kernel.target->destroy)
723 t->u.kernel.target->destroy(t->u.kernel.target, t->data,
724 t->u.target_size - sizeof(*t));
725 module_put(t->u.kernel.target->me);
729 /* Checks and translates the user-supplied table segment (held in
732 translate_table(const char *name,
733 unsigned int valid_hooks,
734 struct xt_table_info *newinfo,
738 const unsigned int *hook_entries,
739 const unsigned int *underflows)
744 newinfo->size = size;
745 newinfo->number = number;
747 /* Init all hooks to impossible value. */
748 for (i = 0; i < NF_IP6_NUMHOOKS; i++) {
749 newinfo->hook_entry[i] = 0xFFFFFFFF;
750 newinfo->underflow[i] = 0xFFFFFFFF;
753 duprintf("translate_table: size %u\n", newinfo->size);
755 /* Walk through entries, checking offsets. */
756 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
757 check_entry_size_and_hooks,
761 hook_entries, underflows, &i);
766 duprintf("translate_table: %u not %u entries\n",
771 /* Check hooks all assigned */
772 for (i = 0; i < NF_IP6_NUMHOOKS; i++) {
773 /* Only hooks which are valid */
774 if (!(valid_hooks & (1 << i)))
776 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
777 duprintf("Invalid hook entry %u %u\n",
781 if (newinfo->underflow[i] == 0xFFFFFFFF) {
782 duprintf("Invalid underflow %u %u\n",
788 if (!mark_source_chains(newinfo, valid_hooks, entry0))
791 /* Finally, each sanity check must pass */
793 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
794 check_entry, name, size, &i);
797 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
802 /* And one copy for every other CPU */
804 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
805 memcpy(newinfo->entries[i], entry0, newinfo->size);
813 add_entry_to_counter(const struct ip6t_entry *e,
814 struct xt_counters total[],
817 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
824 set_entry_to_counter(const struct ip6t_entry *e,
825 struct ip6t_counters total[],
828 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
835 get_counters(const struct xt_table_info *t,
836 struct xt_counters counters[])
842 /* Instead of clearing (by a previous call to memset())
843 * the counters and using adds, we set the counters
844 * with data used by 'current' CPU
845 * We dont care about preemption here.
847 curcpu = raw_smp_processor_id();
850 IP6T_ENTRY_ITERATE(t->entries[curcpu],
852 set_entry_to_counter,
860 IP6T_ENTRY_ITERATE(t->entries[cpu],
862 add_entry_to_counter,
869 copy_entries_to_user(unsigned int total_size,
870 struct xt_table *table,
871 void __user *userptr)
873 unsigned int off, num, countersize;
874 struct ip6t_entry *e;
875 struct xt_counters *counters;
876 struct xt_table_info *private = table->private;
880 /* We need atomic snapshot of counters: rest doesn't change
881 (other than comefrom, which userspace doesn't care
883 countersize = sizeof(struct xt_counters) * private->number;
884 counters = vmalloc(countersize);
886 if (counters == NULL)
889 /* First, sum counters... */
890 write_lock_bh(&table->lock);
891 get_counters(private, counters);
892 write_unlock_bh(&table->lock);
894 /* choose the copy that is on ourc node/cpu */
895 loc_cpu_entry = private->entries[raw_smp_processor_id()];
896 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
901 /* FIXME: use iterator macros --RR */
902 /* ... then go back and fix counters and names */
903 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
905 struct ip6t_entry_match *m;
906 struct ip6t_entry_target *t;
908 e = (struct ip6t_entry *)(loc_cpu_entry + off);
909 if (copy_to_user(userptr + off
910 + offsetof(struct ip6t_entry, counters),
912 sizeof(counters[num])) != 0) {
917 for (i = sizeof(struct ip6t_entry);
918 i < e->target_offset;
919 i += m->u.match_size) {
922 if (copy_to_user(userptr + off + i
923 + offsetof(struct ip6t_entry_match,
925 m->u.kernel.match->name,
926 strlen(m->u.kernel.match->name)+1)
933 t = ip6t_get_target(e);
934 if (copy_to_user(userptr + off + e->target_offset
935 + offsetof(struct ip6t_entry_target,
937 t->u.kernel.target->name,
938 strlen(t->u.kernel.target->name)+1) != 0) {
950 get_entries(const struct ip6t_get_entries *entries,
951 struct ip6t_get_entries __user *uptr)
956 t = xt_find_table_lock(AF_INET6, entries->name);
957 if (t && !IS_ERR(t)) {
958 struct xt_table_info *private = t->private;
959 duprintf("t->private->number = %u\n", private->number);
960 if (entries->size == private->size)
961 ret = copy_entries_to_user(private->size,
962 t, uptr->entrytable);
964 duprintf("get_entries: I've got %u not %u!\n",
965 private->size, entries->size);
971 ret = t ? PTR_ERR(t) : -ENOENT;
977 do_replace(void __user *user, unsigned int len)
980 struct ip6t_replace tmp;
982 struct xt_table_info *newinfo, *oldinfo;
983 struct xt_counters *counters;
984 void *loc_cpu_entry, *loc_cpu_old_entry;
986 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
990 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
993 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
996 newinfo = xt_alloc_table_info(tmp.size);
1000 /* choose the copy that is on our node/cpu */
1001 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1002 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1008 counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
1014 ret = translate_table(tmp.name, tmp.valid_hooks,
1015 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1016 tmp.hook_entry, tmp.underflow);
1018 goto free_newinfo_counters;
1020 duprintf("ip_tables: Translated table\n");
1022 t = try_then_request_module(xt_find_table_lock(AF_INET6, tmp.name),
1023 "ip6table_%s", tmp.name);
1024 if (!t || IS_ERR(t)) {
1025 ret = t ? PTR_ERR(t) : -ENOENT;
1026 goto free_newinfo_counters_untrans;
1030 if (tmp.valid_hooks != t->valid_hooks) {
1031 duprintf("Valid hook crap: %08X vs %08X\n",
1032 tmp.valid_hooks, t->valid_hooks);
1037 oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
1041 /* Update module usage count based on number of rules */
1042 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1043 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1044 if ((oldinfo->number > oldinfo->initial_entries) ||
1045 (newinfo->number <= oldinfo->initial_entries))
1047 if ((oldinfo->number > oldinfo->initial_entries) &&
1048 (newinfo->number <= oldinfo->initial_entries))
1051 /* Get the old counters. */
1052 get_counters(oldinfo, counters);
1053 /* Decrease module usage counts and free resource */
1054 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1055 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1056 xt_free_table_info(oldinfo);
1057 if (copy_to_user(tmp.counters, counters,
1058 sizeof(struct xt_counters) * tmp.num_counters) != 0)
1067 free_newinfo_counters_untrans:
1068 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1069 free_newinfo_counters:
1072 xt_free_table_info(newinfo);
1076 /* We're lazy, and add to the first CPU; overflow works its fey magic
1077 * and everything is OK. */
1079 add_counter_to_entry(struct ip6t_entry *e,
1080 const struct xt_counters addme[],
1084 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1086 (long unsigned int)e->counters.pcnt,
1087 (long unsigned int)e->counters.bcnt,
1088 (long unsigned int)addme[*i].pcnt,
1089 (long unsigned int)addme[*i].bcnt);
1092 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1099 do_add_counters(void __user *user, unsigned int len)
1102 struct xt_counters_info tmp, *paddc;
1103 struct xt_table_info *private;
1106 void *loc_cpu_entry;
1108 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1111 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
1114 paddc = vmalloc(len);
1118 if (copy_from_user(paddc, user, len) != 0) {
1123 t = xt_find_table_lock(AF_INET6, tmp.name);
1124 if (!t || IS_ERR(t)) {
1125 ret = t ? PTR_ERR(t) : -ENOENT;
1129 write_lock_bh(&t->lock);
1130 private = t->private;
1131 if (private->number != paddc->num_counters) {
1133 goto unlock_up_free;
1137 /* Choose the copy that is on our node */
1138 loc_cpu_entry = private->entries[smp_processor_id()];
1139 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1141 add_counter_to_entry,
1145 write_unlock_bh(&t->lock);
1155 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1159 if (!capable(CAP_NET_ADMIN))
1163 case IP6T_SO_SET_REPLACE:
1164 ret = do_replace(user, len);
1167 case IP6T_SO_SET_ADD_COUNTERS:
1168 ret = do_add_counters(user, len);
1172 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1180 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1184 if (!capable(CAP_NET_ADMIN))
1188 case IP6T_SO_GET_INFO: {
1189 char name[IP6T_TABLE_MAXNAMELEN];
1192 if (*len != sizeof(struct ip6t_getinfo)) {
1193 duprintf("length %u != %u\n", *len,
1194 sizeof(struct ip6t_getinfo));
1199 if (copy_from_user(name, user, sizeof(name)) != 0) {
1203 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1205 t = try_then_request_module(xt_find_table_lock(AF_INET6, name),
1206 "ip6table_%s", name);
1207 if (t && !IS_ERR(t)) {
1208 struct ip6t_getinfo info;
1209 struct xt_table_info *private = t->private;
1211 info.valid_hooks = t->valid_hooks;
1212 memcpy(info.hook_entry, private->hook_entry,
1213 sizeof(info.hook_entry));
1214 memcpy(info.underflow, private->underflow,
1215 sizeof(info.underflow));
1216 info.num_entries = private->number;
1217 info.size = private->size;
1218 memcpy(info.name, name, sizeof(info.name));
1220 if (copy_to_user(user, &info, *len) != 0)
1227 ret = t ? PTR_ERR(t) : -ENOENT;
1231 case IP6T_SO_GET_ENTRIES: {
1232 struct ip6t_get_entries get;
1234 if (*len < sizeof(get)) {
1235 duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1237 } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1239 } else if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1240 duprintf("get_entries: %u != %u\n", *len,
1241 sizeof(struct ip6t_get_entries) + get.size);
1244 ret = get_entries(&get, user);
1248 case IP6T_SO_GET_REVISION_MATCH:
1249 case IP6T_SO_GET_REVISION_TARGET: {
1250 struct ip6t_get_revision rev;
1253 if (*len != sizeof(rev)) {
1257 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1262 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1267 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1270 "ip6t_%s", rev.name);
1275 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
1282 int ip6t_register_table(struct xt_table *table,
1283 const struct ip6t_replace *repl)
1286 struct xt_table_info *newinfo;
1287 static struct xt_table_info bootstrap
1288 = { 0, 0, 0, { 0 }, { 0 }, { } };
1289 void *loc_cpu_entry;
1291 newinfo = xt_alloc_table_info(repl->size);
1295 /* choose the copy on our node/cpu */
1296 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1297 memcpy(loc_cpu_entry, repl->entries, repl->size);
1299 ret = translate_table(table->name, table->valid_hooks,
1300 newinfo, loc_cpu_entry, repl->size,
1305 xt_free_table_info(newinfo);
1309 if (xt_register_table(table, &bootstrap, newinfo) != 0) {
1310 xt_free_table_info(newinfo);
1317 void ip6t_unregister_table(struct xt_table *table)
1319 struct xt_table_info *private;
1320 void *loc_cpu_entry;
1322 private = xt_unregister_table(table);
1324 /* Decrease module usage counts and free resources */
1325 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1326 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
1327 xt_free_table_info(private);
1330 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1332 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1333 u_int8_t type, u_int8_t code,
1336 return (type == test_type && code >= min_code && code <= max_code)
1341 icmp6_match(const struct sk_buff *skb,
1342 const struct net_device *in,
1343 const struct net_device *out,
1344 const void *matchinfo,
1346 unsigned int protoff,
1349 struct icmp6hdr _icmp, *ic;
1350 const struct ip6t_icmp *icmpinfo = matchinfo;
1352 /* Must not be a fragment. */
1356 ic = skb_header_pointer(skb, protoff, sizeof(_icmp), &_icmp);
1358 /* We've been asked to examine this packet, and we
1359 can't. Hence, no choice but to drop. */
1360 duprintf("Dropping evil ICMP tinygram.\n");
1365 return icmp6_type_code_match(icmpinfo->type,
1368 ic->icmp6_type, ic->icmp6_code,
1369 !!(icmpinfo->invflags&IP6T_ICMP_INV));
1372 /* Called when user tries to insert an entry of this type. */
1374 icmp6_checkentry(const char *tablename,
1377 unsigned int matchsize,
1378 unsigned int hook_mask)
1380 const struct ip6t_icmp *icmpinfo = matchinfo;
1382 /* Must specify no unknown invflags */
1383 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
1386 /* The built-in targets: standard (NULL) and error. */
1387 static struct ip6t_target ip6t_standard_target = {
1388 .name = IP6T_STANDARD_TARGET,
1389 .targetsize = sizeof(int),
1392 static struct ip6t_target ip6t_error_target = {
1393 .name = IP6T_ERROR_TARGET,
1394 .target = ip6t_error,
1395 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
1398 static struct nf_sockopt_ops ip6t_sockopts = {
1400 .set_optmin = IP6T_BASE_CTL,
1401 .set_optmax = IP6T_SO_SET_MAX+1,
1402 .set = do_ip6t_set_ctl,
1403 .get_optmin = IP6T_BASE_CTL,
1404 .get_optmax = IP6T_SO_GET_MAX+1,
1405 .get = do_ip6t_get_ctl,
1408 static struct ip6t_match icmp6_matchstruct = {
1410 .match = &icmp6_match,
1411 .matchsize = sizeof(struct ip6t_icmp),
1412 .checkentry = icmp6_checkentry,
1413 .proto = IPPROTO_ICMPV6,
1416 static int __init init(void)
1420 xt_proto_init(AF_INET6);
1422 /* Noone else will be downing sem now, so we won't sleep */
1423 xt_register_target(AF_INET6, &ip6t_standard_target);
1424 xt_register_target(AF_INET6, &ip6t_error_target);
1425 xt_register_match(AF_INET6, &icmp6_matchstruct);
1427 /* Register setsockopt */
1428 ret = nf_register_sockopt(&ip6t_sockopts);
1430 duprintf("Unable to register sockopts.\n");
1431 xt_proto_fini(AF_INET6);
1435 printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
1439 static void __exit fini(void)
1441 nf_unregister_sockopt(&ip6t_sockopts);
1442 xt_unregister_match(AF_INET6, &icmp6_matchstruct);
1443 xt_unregister_target(AF_INET6, &ip6t_error_target);
1444 xt_unregister_target(AF_INET6, &ip6t_standard_target);
1445 xt_proto_fini(AF_INET6);
1449 * find the offset to specified header or the protocol number of last header
1450 * if target < 0. "last header" is transport protocol header, ESP, or
1453 * If target header is found, its offset is set in *offset and return protocol
1454 * number. Otherwise, return -1.
1456 * Note that non-1st fragment is special case that "the protocol number
1457 * of last header" is "next header" field in Fragment header. In this case,
1458 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
1462 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
1463 int target, unsigned short *fragoff)
1465 unsigned int start = (u8*)(skb->nh.ipv6h + 1) - skb->data;
1466 u8 nexthdr = skb->nh.ipv6h->nexthdr;
1467 unsigned int len = skb->len - start;
1472 while (nexthdr != target) {
1473 struct ipv6_opt_hdr _hdr, *hp;
1474 unsigned int hdrlen;
1476 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
1482 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
1485 if (nexthdr == NEXTHDR_FRAGMENT) {
1486 unsigned short _frag_off, *fp;
1487 fp = skb_header_pointer(skb,
1488 start+offsetof(struct frag_hdr,
1495 _frag_off = ntohs(*fp) & ~0x7;
1498 ((!ipv6_ext_hdr(hp->nexthdr)) ||
1499 nexthdr == NEXTHDR_NONE)) {
1501 *fragoff = _frag_off;
1507 } else if (nexthdr == NEXTHDR_AUTH)
1508 hdrlen = (hp->hdrlen + 2) << 2;
1510 hdrlen = ipv6_optlen(hp);
1512 nexthdr = hp->nexthdr;
1521 EXPORT_SYMBOL(ip6t_register_table);
1522 EXPORT_SYMBOL(ip6t_unregister_table);
1523 EXPORT_SYMBOL(ip6t_do_table);
1524 EXPORT_SYMBOL(ip6t_ext_hdr);
1525 EXPORT_SYMBOL(ipv6_find_hdr);
1526 EXPORT_SYMBOL(ip6_masked_addrcmp);