2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
109 &ip6info->src), IP6T_INV_SRCIP)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
111 &ip6info->dst), IP6T_INV_DSTIP)) {
112 dprintf("Source or dest mismatch.\n");
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev, ip6info->iniface,
133 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev, ip6info->outiface,
146 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
150 /* ... might want to do something with class and flowlabel here ... */
152 /* look for the desired protocol header */
153 if((ip6info->flags & IP6T_F_PROTO)) {
155 unsigned short _frag_off;
157 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
163 *fragoff = _frag_off;
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
167 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
170 if (ip6info->proto == protohdr) {
171 if(ip6info->invflags & IP6T_INV_PROTO) {
177 /* We need match for the '-p all', too! */
178 if ((ip6info->proto != 0) &&
179 !(ip6info->invflags & IP6T_INV_PROTO))
185 /* should be ip6 safe */
187 ip6_checkentry(const struct ip6t_ip6 *ipv6)
189 if (ipv6->flags & ~IP6T_F_MASK) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6->flags & ~IP6T_F_MASK);
194 if (ipv6->invflags & ~IP6T_INV_MASK) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6->invflags & ~IP6T_INV_MASK);
203 ip6t_error(struct sk_buff *skb,
204 const struct net_device *in,
205 const struct net_device *out,
206 unsigned int hooknum,
207 const struct xt_target *target,
208 const void *targinfo)
211 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
216 /* Performance critical - called for every packet */
218 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
219 struct xt_match_param *par)
221 par->match = m->u.kernel.match;
222 par->matchinfo = m->data;
224 /* Stop iteration if it doesn't match */
225 if (!m->u.kernel.match->match(skb, par))
231 static inline struct ip6t_entry *
232 get_entry(void *base, unsigned int offset)
234 return (struct ip6t_entry *)(base + offset);
237 /* All zeroes == unconditional rule. */
238 /* Mildly perf critical (only if packet tracing is on) */
240 unconditional(const struct ip6t_ip6 *ipv6)
244 for (i = 0; i < sizeof(*ipv6); i++)
245 if (((char *)ipv6)[i])
248 return (i == sizeof(*ipv6));
251 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
252 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
253 /* This cries for unification! */
254 static const char *const hooknames[] = {
255 [NF_INET_PRE_ROUTING] = "PREROUTING",
256 [NF_INET_LOCAL_IN] = "INPUT",
257 [NF_INET_FORWARD] = "FORWARD",
258 [NF_INET_LOCAL_OUT] = "OUTPUT",
259 [NF_INET_POST_ROUTING] = "POSTROUTING",
262 enum nf_ip_trace_comments {
263 NF_IP6_TRACE_COMMENT_RULE,
264 NF_IP6_TRACE_COMMENT_RETURN,
265 NF_IP6_TRACE_COMMENT_POLICY,
268 static const char *const comments[] = {
269 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
270 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
271 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
274 static struct nf_loginfo trace_loginfo = {
275 .type = NF_LOG_TYPE_LOG,
279 .logflags = NF_LOG_MASK,
284 /* Mildly perf critical (only if packet tracing is on) */
286 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
287 char *hookname, char **chainname,
288 char **comment, unsigned int *rulenum)
290 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
292 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
293 /* Head of user chain: ERROR target with chainname */
294 *chainname = t->target.data;
299 if (s->target_offset == sizeof(struct ip6t_entry)
300 && strcmp(t->target.u.kernel.target->name,
301 IP6T_STANDARD_TARGET) == 0
303 && unconditional(&s->ipv6)) {
304 /* Tail of chains: STANDARD target (return/policy) */
305 *comment = *chainname == hookname
306 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
307 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
316 static void trace_packet(struct sk_buff *skb,
318 const struct net_device *in,
319 const struct net_device *out,
320 const char *tablename,
321 struct xt_table_info *private,
322 struct ip6t_entry *e)
325 const struct ip6t_entry *root;
326 char *hookname, *chainname, *comment;
327 unsigned int rulenum = 0;
329 table_base = (void *)private->entries[smp_processor_id()];
330 root = get_entry(table_base, private->hook_entry[hook]);
332 hookname = chainname = (char *)hooknames[hook];
333 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
335 IP6T_ENTRY_ITERATE(root,
336 private->size - private->hook_entry[hook],
337 get_chainname_rulenum,
338 e, hookname, &chainname, &comment, &rulenum);
340 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
341 "TRACE: %s:%s:%s:%u ",
342 tablename, chainname, comment, rulenum);
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
348 ip6t_do_table(struct sk_buff *skb,
350 const struct net_device *in,
351 const struct net_device *out,
352 struct xt_table *table)
354 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
355 bool hotdrop = false;
356 /* Initializing verdict to NF_DROP keeps gcc happy. */
357 unsigned int verdict = NF_DROP;
358 const char *indev, *outdev;
360 struct ip6t_entry *e, *back;
361 struct xt_table_info *private;
362 struct xt_match_param mtpar;
365 indev = in ? in->name : nulldevname;
366 outdev = out ? out->name : nulldevname;
367 /* We handle fragments by dealing with the first fragment as
368 * if it was a normal packet. All other fragments are treated
369 * normally, except that they will NEVER match rules that ask
370 * things we don't know, ie. tcp syn flag or ports). If the
371 * rule is also a fragment-specific rule, non-fragments won't
373 mtpar.hotdrop = &hotdrop;
377 read_lock_bh(&table->lock);
378 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
379 private = table->private;
380 table_base = (void *)private->entries[smp_processor_id()];
381 e = get_entry(table_base, private->hook_entry[hook]);
383 /* For return from builtin chain */
384 back = get_entry(table_base, private->underflow[hook]);
389 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
390 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
391 struct ip6t_entry_target *t;
393 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
396 ADD_COUNTER(e->counters,
397 ntohs(ipv6_hdr(skb)->payload_len) +
398 sizeof(struct ipv6hdr), 1);
400 t = ip6t_get_target(e);
401 IP_NF_ASSERT(t->u.kernel.target);
403 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
404 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
405 /* The packet is traced: log it */
406 if (unlikely(skb->nf_trace))
407 trace_packet(skb, hook, in, out,
408 table->name, private, e);
410 /* Standard target? */
411 if (!t->u.kernel.target->target) {
414 v = ((struct ip6t_standard_target *)t)->verdict;
416 /* Pop from stack? */
417 if (v != IP6T_RETURN) {
418 verdict = (unsigned)(-v) - 1;
422 back = get_entry(table_base,
426 if (table_base + v != (void *)e + e->next_offset
427 && !(e->ipv6.flags & IP6T_F_GOTO)) {
428 /* Save old back ptr in next entry */
429 struct ip6t_entry *next
430 = (void *)e + e->next_offset;
432 = (void *)back - table_base;
433 /* set back pointer to next entry */
437 e = get_entry(table_base, v);
439 /* Targets which reenter must return
441 #ifdef CONFIG_NETFILTER_DEBUG
442 ((struct ip6t_entry *)table_base)->comefrom
445 verdict = t->u.kernel.target->target(skb,
451 #ifdef CONFIG_NETFILTER_DEBUG
452 if (((struct ip6t_entry *)table_base)->comefrom
454 && verdict == IP6T_CONTINUE) {
455 printk("Target %s reentered!\n",
456 t->u.kernel.target->name);
459 ((struct ip6t_entry *)table_base)->comefrom
462 if (verdict == IP6T_CONTINUE)
463 e = (void *)e + e->next_offset;
471 e = (void *)e + e->next_offset;
475 #ifdef CONFIG_NETFILTER_DEBUG
476 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
478 read_unlock_bh(&table->lock);
480 #ifdef DEBUG_ALLOW_ALL
489 /* Figures out from what hook each rule can be called: returns 0 if
490 there are loops. Puts hook bitmask in comefrom. */
492 mark_source_chains(struct xt_table_info *newinfo,
493 unsigned int valid_hooks, void *entry0)
497 /* No recursion; use packet counter to save back ptrs (reset
498 to 0 as we leave), and comefrom to save source hook bitmask */
499 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
500 unsigned int pos = newinfo->hook_entry[hook];
501 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
503 if (!(valid_hooks & (1 << hook)))
506 /* Set initial back pointer. */
507 e->counters.pcnt = pos;
510 struct ip6t_standard_target *t
511 = (void *)ip6t_get_target(e);
512 int visited = e->comefrom & (1 << hook);
514 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
515 printk("iptables: loop hook %u pos %u %08X.\n",
516 hook, pos, e->comefrom);
519 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
521 /* Unconditional return/END. */
522 if ((e->target_offset == sizeof(struct ip6t_entry)
523 && (strcmp(t->target.u.user.name,
524 IP6T_STANDARD_TARGET) == 0)
526 && unconditional(&e->ipv6)) || visited) {
527 unsigned int oldpos, size;
529 if (t->verdict < -NF_MAX_VERDICT - 1) {
530 duprintf("mark_source_chains: bad "
531 "negative verdict (%i)\n",
536 /* Return: backtrack through the last
539 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
540 #ifdef DEBUG_IP_FIREWALL_USER
542 & (1 << NF_INET_NUMHOOKS)) {
543 duprintf("Back unset "
550 pos = e->counters.pcnt;
551 e->counters.pcnt = 0;
553 /* We're at the start. */
557 e = (struct ip6t_entry *)
559 } while (oldpos == pos + e->next_offset);
562 size = e->next_offset;
563 e = (struct ip6t_entry *)
564 (entry0 + pos + size);
565 e->counters.pcnt = pos;
568 int newpos = t->verdict;
570 if (strcmp(t->target.u.user.name,
571 IP6T_STANDARD_TARGET) == 0
573 if (newpos > newinfo->size -
574 sizeof(struct ip6t_entry)) {
575 duprintf("mark_source_chains: "
576 "bad verdict (%i)\n",
580 /* This a jump; chase it. */
581 duprintf("Jump rule %u -> %u\n",
584 /* ... this is a fallthru */
585 newpos = pos + e->next_offset;
587 e = (struct ip6t_entry *)
589 e->counters.pcnt = pos;
594 duprintf("Finished chain %u\n", hook);
600 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
602 if (i && (*i)-- == 0)
605 if (m->u.kernel.match->destroy)
606 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
607 module_put(m->u.kernel.match->me);
612 check_entry(struct ip6t_entry *e, const char *name)
614 struct ip6t_entry_target *t;
616 if (!ip6_checkentry(&e->ipv6)) {
617 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
621 if (e->target_offset + sizeof(struct ip6t_entry_target) >
625 t = ip6t_get_target(e);
626 if (e->target_offset + t->u.target_size > e->next_offset)
632 static int check_match(struct ip6t_entry_match *m, const char *name,
633 const struct ip6t_ip6 *ipv6,
634 unsigned int hookmask, unsigned int *i)
636 struct xt_match *match;
639 match = m->u.kernel.match;
640 ret = xt_check_match(match, AF_INET6, m->u.match_size - sizeof(*m),
641 name, hookmask, ipv6->proto,
642 ipv6->invflags & IP6T_INV_PROTO, ipv6, m->data);
644 duprintf("ip_tables: check failed for `%s'.\n",
645 m->u.kernel.match->name);
653 find_check_match(struct ip6t_entry_match *m,
655 const struct ip6t_ip6 *ipv6,
656 unsigned int hookmask,
659 struct xt_match *match;
662 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
664 "ip6t_%s", m->u.user.name);
665 if (IS_ERR(match) || !match) {
666 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
667 return match ? PTR_ERR(match) : -ENOENT;
669 m->u.kernel.match = match;
671 ret = check_match(m, name, ipv6, hookmask, i);
677 module_put(m->u.kernel.match->me);
681 static int check_target(struct ip6t_entry *e, const char *name)
683 struct ip6t_entry_target *t;
684 struct xt_target *target;
687 t = ip6t_get_target(e);
688 target = t->u.kernel.target;
689 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
690 name, e->comefrom, e->ipv6.proto,
691 e->ipv6.invflags & IP6T_INV_PROTO, e, t->data);
693 duprintf("ip_tables: check failed for `%s'.\n",
694 t->u.kernel.target->name);
701 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
704 struct ip6t_entry_target *t;
705 struct xt_target *target;
709 ret = check_entry(e, name);
714 ret = IP6T_MATCH_ITERATE(e, find_check_match, name, &e->ipv6,
717 goto cleanup_matches;
719 t = ip6t_get_target(e);
720 target = try_then_request_module(xt_find_target(AF_INET6,
723 "ip6t_%s", t->u.user.name);
724 if (IS_ERR(target) || !target) {
725 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
726 ret = target ? PTR_ERR(target) : -ENOENT;
727 goto cleanup_matches;
729 t->u.kernel.target = target;
731 ret = check_target(e, name);
738 module_put(t->u.kernel.target->me);
740 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
745 check_entry_size_and_hooks(struct ip6t_entry *e,
746 struct xt_table_info *newinfo,
748 unsigned char *limit,
749 const unsigned int *hook_entries,
750 const unsigned int *underflows,
755 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
756 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
757 duprintf("Bad offset %p\n", e);
762 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
763 duprintf("checking: element %p size %u\n",
768 /* Check hooks & underflows */
769 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
770 if ((unsigned char *)e - base == hook_entries[h])
771 newinfo->hook_entry[h] = hook_entries[h];
772 if ((unsigned char *)e - base == underflows[h])
773 newinfo->underflow[h] = underflows[h];
776 /* FIXME: underflows must be unconditional, standard verdicts
777 < 0 (not IP6T_RETURN). --RR */
779 /* Clear counters and comefrom */
780 e->counters = ((struct xt_counters) { 0, 0 });
788 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
790 struct ip6t_entry_target *t;
792 if (i && (*i)-- == 0)
795 /* Cleanup all matches */
796 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
797 t = ip6t_get_target(e);
798 if (t->u.kernel.target->destroy)
799 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
800 module_put(t->u.kernel.target->me);
804 /* Checks and translates the user-supplied table segment (held in
807 translate_table(const char *name,
808 unsigned int valid_hooks,
809 struct xt_table_info *newinfo,
813 const unsigned int *hook_entries,
814 const unsigned int *underflows)
819 newinfo->size = size;
820 newinfo->number = number;
822 /* Init all hooks to impossible value. */
823 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
824 newinfo->hook_entry[i] = 0xFFFFFFFF;
825 newinfo->underflow[i] = 0xFFFFFFFF;
828 duprintf("translate_table: size %u\n", newinfo->size);
830 /* Walk through entries, checking offsets. */
831 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
832 check_entry_size_and_hooks,
836 hook_entries, underflows, &i);
841 duprintf("translate_table: %u not %u entries\n",
846 /* Check hooks all assigned */
847 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
848 /* Only hooks which are valid */
849 if (!(valid_hooks & (1 << i)))
851 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
852 duprintf("Invalid hook entry %u %u\n",
856 if (newinfo->underflow[i] == 0xFFFFFFFF) {
857 duprintf("Invalid underflow %u %u\n",
863 if (!mark_source_chains(newinfo, valid_hooks, entry0))
866 /* Finally, each sanity check must pass */
868 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
869 find_check_entry, name, size, &i);
872 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
877 /* And one copy for every other CPU */
878 for_each_possible_cpu(i) {
879 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
880 memcpy(newinfo->entries[i], entry0, newinfo->size);
888 add_entry_to_counter(const struct ip6t_entry *e,
889 struct xt_counters total[],
892 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
899 set_entry_to_counter(const struct ip6t_entry *e,
900 struct ip6t_counters total[],
903 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
910 get_counters(const struct xt_table_info *t,
911 struct xt_counters counters[])
917 /* Instead of clearing (by a previous call to memset())
918 * the counters and using adds, we set the counters
919 * with data used by 'current' CPU
920 * We dont care about preemption here.
922 curcpu = raw_smp_processor_id();
925 IP6T_ENTRY_ITERATE(t->entries[curcpu],
927 set_entry_to_counter,
931 for_each_possible_cpu(cpu) {
935 IP6T_ENTRY_ITERATE(t->entries[cpu],
937 add_entry_to_counter,
943 static struct xt_counters *alloc_counters(struct xt_table *table)
945 unsigned int countersize;
946 struct xt_counters *counters;
947 const struct xt_table_info *private = table->private;
949 /* We need atomic snapshot of counters: rest doesn't change
950 (other than comefrom, which userspace doesn't care
952 countersize = sizeof(struct xt_counters) * private->number;
953 counters = vmalloc_node(countersize, numa_node_id());
955 if (counters == NULL)
956 return ERR_PTR(-ENOMEM);
958 /* First, sum counters... */
959 write_lock_bh(&table->lock);
960 get_counters(private, counters);
961 write_unlock_bh(&table->lock);
967 copy_entries_to_user(unsigned int total_size,
968 struct xt_table *table,
969 void __user *userptr)
971 unsigned int off, num;
972 struct ip6t_entry *e;
973 struct xt_counters *counters;
974 const struct xt_table_info *private = table->private;
976 const void *loc_cpu_entry;
978 counters = alloc_counters(table);
979 if (IS_ERR(counters))
980 return PTR_ERR(counters);
982 /* choose the copy that is on our node/cpu, ...
983 * This choice is lazy (because current thread is
984 * allowed to migrate to another cpu)
986 loc_cpu_entry = private->entries[raw_smp_processor_id()];
987 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
992 /* FIXME: use iterator macros --RR */
993 /* ... then go back and fix counters and names */
994 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
996 const struct ip6t_entry_match *m;
997 const struct ip6t_entry_target *t;
999 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1000 if (copy_to_user(userptr + off
1001 + offsetof(struct ip6t_entry, counters),
1003 sizeof(counters[num])) != 0) {
1008 for (i = sizeof(struct ip6t_entry);
1009 i < e->target_offset;
1010 i += m->u.match_size) {
1013 if (copy_to_user(userptr + off + i
1014 + offsetof(struct ip6t_entry_match,
1016 m->u.kernel.match->name,
1017 strlen(m->u.kernel.match->name)+1)
1024 t = ip6t_get_target(e);
1025 if (copy_to_user(userptr + off + e->target_offset
1026 + offsetof(struct ip6t_entry_target,
1028 t->u.kernel.target->name,
1029 strlen(t->u.kernel.target->name)+1) != 0) {
1040 #ifdef CONFIG_COMPAT
1041 static void compat_standard_from_user(void *dst, void *src)
1043 int v = *(compat_int_t *)src;
1046 v += xt_compat_calc_jump(AF_INET6, v);
1047 memcpy(dst, &v, sizeof(v));
1050 static int compat_standard_to_user(void __user *dst, void *src)
1052 compat_int_t cv = *(int *)src;
1055 cv -= xt_compat_calc_jump(AF_INET6, cv);
1056 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1060 compat_calc_match(struct ip6t_entry_match *m, int *size)
1062 *size += xt_compat_match_offset(m->u.kernel.match);
1066 static int compat_calc_entry(struct ip6t_entry *e,
1067 const struct xt_table_info *info,
1068 void *base, struct xt_table_info *newinfo)
1070 struct ip6t_entry_target *t;
1071 unsigned int entry_offset;
1074 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1075 entry_offset = (void *)e - base;
1076 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1077 t = ip6t_get_target(e);
1078 off += xt_compat_target_offset(t->u.kernel.target);
1079 newinfo->size -= off;
1080 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1084 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1085 if (info->hook_entry[i] &&
1086 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1087 newinfo->hook_entry[i] -= off;
1088 if (info->underflow[i] &&
1089 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1090 newinfo->underflow[i] -= off;
1095 static int compat_table_info(const struct xt_table_info *info,
1096 struct xt_table_info *newinfo)
1098 void *loc_cpu_entry;
1100 if (!newinfo || !info)
1103 /* we dont care about newinfo->entries[] */
1104 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1105 newinfo->initial_entries = 0;
1106 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1107 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1108 compat_calc_entry, info, loc_cpu_entry,
1113 static int get_info(struct net *net, void __user *user, int *len, int compat)
1115 char name[IP6T_TABLE_MAXNAMELEN];
1119 if (*len != sizeof(struct ip6t_getinfo)) {
1120 duprintf("length %u != %zu\n", *len,
1121 sizeof(struct ip6t_getinfo));
1125 if (copy_from_user(name, user, sizeof(name)) != 0)
1128 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1129 #ifdef CONFIG_COMPAT
1131 xt_compat_lock(AF_INET6);
1133 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1134 "ip6table_%s", name);
1135 if (t && !IS_ERR(t)) {
1136 struct ip6t_getinfo info;
1137 const struct xt_table_info *private = t->private;
1139 #ifdef CONFIG_COMPAT
1141 struct xt_table_info tmp;
1142 ret = compat_table_info(private, &tmp);
1143 xt_compat_flush_offsets(AF_INET6);
1147 info.valid_hooks = t->valid_hooks;
1148 memcpy(info.hook_entry, private->hook_entry,
1149 sizeof(info.hook_entry));
1150 memcpy(info.underflow, private->underflow,
1151 sizeof(info.underflow));
1152 info.num_entries = private->number;
1153 info.size = private->size;
1154 strcpy(info.name, name);
1156 if (copy_to_user(user, &info, *len) != 0)
1164 ret = t ? PTR_ERR(t) : -ENOENT;
1165 #ifdef CONFIG_COMPAT
1167 xt_compat_unlock(AF_INET6);
1173 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1176 struct ip6t_get_entries get;
1179 if (*len < sizeof(get)) {
1180 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1183 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1185 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1186 duprintf("get_entries: %u != %zu\n",
1187 *len, sizeof(get) + get.size);
1191 t = xt_find_table_lock(net, AF_INET6, get.name);
1192 if (t && !IS_ERR(t)) {
1193 struct xt_table_info *private = t->private;
1194 duprintf("t->private->number = %u\n", private->number);
1195 if (get.size == private->size)
1196 ret = copy_entries_to_user(private->size,
1197 t, uptr->entrytable);
1199 duprintf("get_entries: I've got %u not %u!\n",
1200 private->size, get.size);
1206 ret = t ? PTR_ERR(t) : -ENOENT;
1212 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1213 struct xt_table_info *newinfo, unsigned int num_counters,
1214 void __user *counters_ptr)
1218 struct xt_table_info *oldinfo;
1219 struct xt_counters *counters;
1220 const void *loc_cpu_old_entry;
1223 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1230 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1231 "ip6table_%s", name);
1232 if (!t || IS_ERR(t)) {
1233 ret = t ? PTR_ERR(t) : -ENOENT;
1234 goto free_newinfo_counters_untrans;
1238 if (valid_hooks != t->valid_hooks) {
1239 duprintf("Valid hook crap: %08X vs %08X\n",
1240 valid_hooks, t->valid_hooks);
1245 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1249 /* Update module usage count based on number of rules */
1250 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1251 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1252 if ((oldinfo->number > oldinfo->initial_entries) ||
1253 (newinfo->number <= oldinfo->initial_entries))
1255 if ((oldinfo->number > oldinfo->initial_entries) &&
1256 (newinfo->number <= oldinfo->initial_entries))
1259 /* Get the old counters. */
1260 get_counters(oldinfo, counters);
1261 /* Decrease module usage counts and free resource */
1262 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1263 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1265 xt_free_table_info(oldinfo);
1266 if (copy_to_user(counters_ptr, counters,
1267 sizeof(struct xt_counters) * num_counters) != 0)
1276 free_newinfo_counters_untrans:
1283 do_replace(struct net *net, void __user *user, unsigned int len)
1286 struct ip6t_replace tmp;
1287 struct xt_table_info *newinfo;
1288 void *loc_cpu_entry;
1290 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1293 /* overflow check */
1294 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1297 newinfo = xt_alloc_table_info(tmp.size);
1301 /* choose the copy that is on our node/cpu */
1302 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1303 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1309 ret = translate_table(tmp.name, tmp.valid_hooks,
1310 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1311 tmp.hook_entry, tmp.underflow);
1315 duprintf("ip_tables: Translated table\n");
1317 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1318 tmp.num_counters, tmp.counters);
1320 goto free_newinfo_untrans;
1323 free_newinfo_untrans:
1324 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1326 xt_free_table_info(newinfo);
1330 /* We're lazy, and add to the first CPU; overflow works its fey magic
1331 * and everything is OK. */
1333 add_counter_to_entry(struct ip6t_entry *e,
1334 const struct xt_counters addme[],
1338 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1340 (long unsigned int)e->counters.pcnt,
1341 (long unsigned int)e->counters.bcnt,
1342 (long unsigned int)addme[*i].pcnt,
1343 (long unsigned int)addme[*i].bcnt);
1346 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1353 do_add_counters(struct net *net, void __user *user, unsigned int len,
1357 struct xt_counters_info tmp;
1358 struct xt_counters *paddc;
1359 unsigned int num_counters;
1364 const struct xt_table_info *private;
1366 const void *loc_cpu_entry;
1367 #ifdef CONFIG_COMPAT
1368 struct compat_xt_counters_info compat_tmp;
1372 size = sizeof(struct compat_xt_counters_info);
1377 size = sizeof(struct xt_counters_info);
1380 if (copy_from_user(ptmp, user, size) != 0)
1383 #ifdef CONFIG_COMPAT
1385 num_counters = compat_tmp.num_counters;
1386 name = compat_tmp.name;
1390 num_counters = tmp.num_counters;
1394 if (len != size + num_counters * sizeof(struct xt_counters))
1397 paddc = vmalloc_node(len - size, numa_node_id());
1401 if (copy_from_user(paddc, user + size, len - size) != 0) {
1406 t = xt_find_table_lock(net, AF_INET6, name);
1407 if (!t || IS_ERR(t)) {
1408 ret = t ? PTR_ERR(t) : -ENOENT;
1412 write_lock_bh(&t->lock);
1413 private = t->private;
1414 if (private->number != num_counters) {
1416 goto unlock_up_free;
1420 /* Choose the copy that is on our node */
1421 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1422 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1424 add_counter_to_entry,
1428 write_unlock_bh(&t->lock);
1437 #ifdef CONFIG_COMPAT
1438 struct compat_ip6t_replace {
1439 char name[IP6T_TABLE_MAXNAMELEN];
1443 u32 hook_entry[NF_INET_NUMHOOKS];
1444 u32 underflow[NF_INET_NUMHOOKS];
1446 compat_uptr_t counters; /* struct ip6t_counters * */
1447 struct compat_ip6t_entry entries[0];
1451 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1452 unsigned int *size, struct xt_counters *counters,
1455 struct ip6t_entry_target *t;
1456 struct compat_ip6t_entry __user *ce;
1457 u_int16_t target_offset, next_offset;
1458 compat_uint_t origsize;
1463 ce = (struct compat_ip6t_entry __user *)*dstptr;
1464 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1467 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1470 *dstptr += sizeof(struct compat_ip6t_entry);
1471 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1473 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1474 target_offset = e->target_offset - (origsize - *size);
1477 t = ip6t_get_target(e);
1478 ret = xt_compat_target_to_user(t, dstptr, size);
1482 next_offset = e->next_offset - (origsize - *size);
1483 if (put_user(target_offset, &ce->target_offset))
1485 if (put_user(next_offset, &ce->next_offset))
1495 compat_find_calc_match(struct ip6t_entry_match *m,
1497 const struct ip6t_ip6 *ipv6,
1498 unsigned int hookmask,
1499 int *size, unsigned int *i)
1501 struct xt_match *match;
1503 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1504 m->u.user.revision),
1505 "ip6t_%s", m->u.user.name);
1506 if (IS_ERR(match) || !match) {
1507 duprintf("compat_check_calc_match: `%s' not found\n",
1509 return match ? PTR_ERR(match) : -ENOENT;
1511 m->u.kernel.match = match;
1512 *size += xt_compat_match_offset(match);
1519 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1521 if (i && (*i)-- == 0)
1524 module_put(m->u.kernel.match->me);
1529 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1531 struct ip6t_entry_target *t;
1533 if (i && (*i)-- == 0)
1536 /* Cleanup all matches */
1537 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1538 t = compat_ip6t_get_target(e);
1539 module_put(t->u.kernel.target->me);
1544 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1545 struct xt_table_info *newinfo,
1547 unsigned char *base,
1548 unsigned char *limit,
1549 unsigned int *hook_entries,
1550 unsigned int *underflows,
1554 struct ip6t_entry_target *t;
1555 struct xt_target *target;
1556 unsigned int entry_offset;
1560 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1561 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1562 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1563 duprintf("Bad offset %p, limit = %p\n", e, limit);
1567 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1568 sizeof(struct compat_xt_entry_target)) {
1569 duprintf("checking: element %p size %u\n",
1574 /* For purposes of check_entry casting the compat entry is fine */
1575 ret = check_entry((struct ip6t_entry *)e, name);
1579 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1580 entry_offset = (void *)e - (void *)base;
1582 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1583 &e->ipv6, e->comefrom, &off, &j);
1585 goto release_matches;
1587 t = compat_ip6t_get_target(e);
1588 target = try_then_request_module(xt_find_target(AF_INET6,
1590 t->u.user.revision),
1591 "ip6t_%s", t->u.user.name);
1592 if (IS_ERR(target) || !target) {
1593 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1595 ret = target ? PTR_ERR(target) : -ENOENT;
1596 goto release_matches;
1598 t->u.kernel.target = target;
1600 off += xt_compat_target_offset(target);
1602 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1606 /* Check hooks & underflows */
1607 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1608 if ((unsigned char *)e - base == hook_entries[h])
1609 newinfo->hook_entry[h] = hook_entries[h];
1610 if ((unsigned char *)e - base == underflows[h])
1611 newinfo->underflow[h] = underflows[h];
1614 /* Clear counters and comefrom */
1615 memset(&e->counters, 0, sizeof(e->counters));
1622 module_put(t->u.kernel.target->me);
1624 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1629 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1630 unsigned int *size, const char *name,
1631 struct xt_table_info *newinfo, unsigned char *base)
1633 struct ip6t_entry_target *t;
1634 struct xt_target *target;
1635 struct ip6t_entry *de;
1636 unsigned int origsize;
1641 de = (struct ip6t_entry *)*dstptr;
1642 memcpy(de, e, sizeof(struct ip6t_entry));
1643 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1645 *dstptr += sizeof(struct ip6t_entry);
1646 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1648 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1652 de->target_offset = e->target_offset - (origsize - *size);
1653 t = compat_ip6t_get_target(e);
1654 target = t->u.kernel.target;
1655 xt_compat_target_from_user(t, dstptr, size);
1657 de->next_offset = e->next_offset - (origsize - *size);
1658 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1659 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1660 newinfo->hook_entry[h] -= origsize - *size;
1661 if ((unsigned char *)de - base < newinfo->underflow[h])
1662 newinfo->underflow[h] -= origsize - *size;
1667 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1674 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6,
1677 goto cleanup_matches;
1679 ret = check_target(e, name);
1681 goto cleanup_matches;
1687 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1692 translate_compat_table(const char *name,
1693 unsigned int valid_hooks,
1694 struct xt_table_info **pinfo,
1696 unsigned int total_size,
1697 unsigned int number,
1698 unsigned int *hook_entries,
1699 unsigned int *underflows)
1702 struct xt_table_info *newinfo, *info;
1703 void *pos, *entry0, *entry1;
1710 info->number = number;
1712 /* Init all hooks to impossible value. */
1713 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1714 info->hook_entry[i] = 0xFFFFFFFF;
1715 info->underflow[i] = 0xFFFFFFFF;
1718 duprintf("translate_compat_table: size %u\n", info->size);
1720 xt_compat_lock(AF_INET6);
1721 /* Walk through entries, checking offsets. */
1722 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1723 check_compat_entry_size_and_hooks,
1724 info, &size, entry0,
1725 entry0 + total_size,
1726 hook_entries, underflows, &j, name);
1732 duprintf("translate_compat_table: %u not %u entries\n",
1737 /* Check hooks all assigned */
1738 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1739 /* Only hooks which are valid */
1740 if (!(valid_hooks & (1 << i)))
1742 if (info->hook_entry[i] == 0xFFFFFFFF) {
1743 duprintf("Invalid hook entry %u %u\n",
1744 i, hook_entries[i]);
1747 if (info->underflow[i] == 0xFFFFFFFF) {
1748 duprintf("Invalid underflow %u %u\n",
1755 newinfo = xt_alloc_table_info(size);
1759 newinfo->number = number;
1760 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1761 newinfo->hook_entry[i] = info->hook_entry[i];
1762 newinfo->underflow[i] = info->underflow[i];
1764 entry1 = newinfo->entries[raw_smp_processor_id()];
1767 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1768 compat_copy_entry_from_user,
1769 &pos, &size, name, newinfo, entry1);
1770 xt_compat_flush_offsets(AF_INET6);
1771 xt_compat_unlock(AF_INET6);
1776 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1780 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1784 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1785 compat_release_entry, &j);
1786 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1787 xt_free_table_info(newinfo);
1791 /* And one copy for every other CPU */
1792 for_each_possible_cpu(i)
1793 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1794 memcpy(newinfo->entries[i], entry1, newinfo->size);
1798 xt_free_table_info(info);
1802 xt_free_table_info(newinfo);
1804 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1807 xt_compat_flush_offsets(AF_INET6);
1808 xt_compat_unlock(AF_INET6);
1813 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1816 struct compat_ip6t_replace tmp;
1817 struct xt_table_info *newinfo;
1818 void *loc_cpu_entry;
1820 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1823 /* overflow check */
1824 if (tmp.size >= INT_MAX / num_possible_cpus())
1826 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1829 newinfo = xt_alloc_table_info(tmp.size);
1833 /* choose the copy that is on our node/cpu */
1834 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1835 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1841 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1842 &newinfo, &loc_cpu_entry, tmp.size,
1843 tmp.num_entries, tmp.hook_entry,
1848 duprintf("compat_do_replace: Translated table\n");
1850 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1851 tmp.num_counters, compat_ptr(tmp.counters));
1853 goto free_newinfo_untrans;
1856 free_newinfo_untrans:
1857 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1859 xt_free_table_info(newinfo);
1864 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1869 if (!capable(CAP_NET_ADMIN))
1873 case IP6T_SO_SET_REPLACE:
1874 ret = compat_do_replace(sock_net(sk), user, len);
1877 case IP6T_SO_SET_ADD_COUNTERS:
1878 ret = do_add_counters(sock_net(sk), user, len, 1);
1882 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1889 struct compat_ip6t_get_entries {
1890 char name[IP6T_TABLE_MAXNAMELEN];
1892 struct compat_ip6t_entry entrytable[0];
1896 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1897 void __user *userptr)
1899 struct xt_counters *counters;
1900 const struct xt_table_info *private = table->private;
1904 const void *loc_cpu_entry;
1907 counters = alloc_counters(table);
1908 if (IS_ERR(counters))
1909 return PTR_ERR(counters);
1911 /* choose the copy that is on our node/cpu, ...
1912 * This choice is lazy (because current thread is
1913 * allowed to migrate to another cpu)
1915 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1918 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1919 compat_copy_entry_to_user,
1920 &pos, &size, counters, &i);
1927 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1931 struct compat_ip6t_get_entries get;
1934 if (*len < sizeof(get)) {
1935 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1939 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1942 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1943 duprintf("compat_get_entries: %u != %zu\n",
1944 *len, sizeof(get) + get.size);
1948 xt_compat_lock(AF_INET6);
1949 t = xt_find_table_lock(net, AF_INET6, get.name);
1950 if (t && !IS_ERR(t)) {
1951 const struct xt_table_info *private = t->private;
1952 struct xt_table_info info;
1953 duprintf("t->private->number = %u\n", private->number);
1954 ret = compat_table_info(private, &info);
1955 if (!ret && get.size == info.size) {
1956 ret = compat_copy_entries_to_user(private->size,
1957 t, uptr->entrytable);
1959 duprintf("compat_get_entries: I've got %u not %u!\n",
1960 private->size, get.size);
1963 xt_compat_flush_offsets(AF_INET6);
1967 ret = t ? PTR_ERR(t) : -ENOENT;
1969 xt_compat_unlock(AF_INET6);
1973 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1976 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1980 if (!capable(CAP_NET_ADMIN))
1984 case IP6T_SO_GET_INFO:
1985 ret = get_info(sock_net(sk), user, len, 1);
1987 case IP6T_SO_GET_ENTRIES:
1988 ret = compat_get_entries(sock_net(sk), user, len);
1991 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1998 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2002 if (!capable(CAP_NET_ADMIN))
2006 case IP6T_SO_SET_REPLACE:
2007 ret = do_replace(sock_net(sk), user, len);
2010 case IP6T_SO_SET_ADD_COUNTERS:
2011 ret = do_add_counters(sock_net(sk), user, len, 0);
2015 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2023 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2027 if (!capable(CAP_NET_ADMIN))
2031 case IP6T_SO_GET_INFO:
2032 ret = get_info(sock_net(sk), user, len, 0);
2035 case IP6T_SO_GET_ENTRIES:
2036 ret = get_entries(sock_net(sk), user, len);
2039 case IP6T_SO_GET_REVISION_MATCH:
2040 case IP6T_SO_GET_REVISION_TARGET: {
2041 struct ip6t_get_revision rev;
2044 if (*len != sizeof(rev)) {
2048 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2053 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2058 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2061 "ip6t_%s", rev.name);
2066 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2073 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2074 const struct ip6t_replace *repl)
2077 struct xt_table_info *newinfo;
2078 struct xt_table_info bootstrap
2079 = { 0, 0, 0, { 0 }, { 0 }, { } };
2080 void *loc_cpu_entry;
2081 struct xt_table *new_table;
2083 newinfo = xt_alloc_table_info(repl->size);
2089 /* choose the copy on our node/cpu, but dont care about preemption */
2090 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2091 memcpy(loc_cpu_entry, repl->entries, repl->size);
2093 ret = translate_table(table->name, table->valid_hooks,
2094 newinfo, loc_cpu_entry, repl->size,
2101 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2102 if (IS_ERR(new_table)) {
2103 ret = PTR_ERR(new_table);
2109 xt_free_table_info(newinfo);
2111 return ERR_PTR(ret);
2114 void ip6t_unregister_table(struct xt_table *table)
2116 struct xt_table_info *private;
2117 void *loc_cpu_entry;
2118 struct module *table_owner = table->me;
2120 private = xt_unregister_table(table);
2122 /* Decrease module usage counts and free resources */
2123 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2124 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2125 if (private->number > private->initial_entries)
2126 module_put(table_owner);
2127 xt_free_table_info(private);
2130 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2132 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2133 u_int8_t type, u_int8_t code,
2136 return (type == test_type && code >= min_code && code <= max_code)
2141 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2143 const struct icmp6hdr *ic;
2144 struct icmp6hdr _icmph;
2145 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2147 /* Must not be a fragment. */
2148 if (par->fragoff != 0)
2151 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2153 /* We've been asked to examine this packet, and we
2154 * can't. Hence, no choice but to drop.
2156 duprintf("Dropping evil ICMP tinygram.\n");
2157 *par->hotdrop = true;
2161 return icmp6_type_code_match(icmpinfo->type,
2164 ic->icmp6_type, ic->icmp6_code,
2165 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2168 /* Called when user tries to insert an entry of this type. */
2170 icmp6_checkentry(const char *tablename,
2172 const struct xt_match *match,
2174 unsigned int hook_mask)
2176 const struct ip6t_icmp *icmpinfo = matchinfo;
2178 /* Must specify no unknown invflags */
2179 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2182 /* The built-in targets: standard (NULL) and error. */
2183 static struct xt_target ip6t_standard_target __read_mostly = {
2184 .name = IP6T_STANDARD_TARGET,
2185 .targetsize = sizeof(int),
2187 #ifdef CONFIG_COMPAT
2188 .compatsize = sizeof(compat_int_t),
2189 .compat_from_user = compat_standard_from_user,
2190 .compat_to_user = compat_standard_to_user,
2194 static struct xt_target ip6t_error_target __read_mostly = {
2195 .name = IP6T_ERROR_TARGET,
2196 .target = ip6t_error,
2197 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2201 static struct nf_sockopt_ops ip6t_sockopts = {
2203 .set_optmin = IP6T_BASE_CTL,
2204 .set_optmax = IP6T_SO_SET_MAX+1,
2205 .set = do_ip6t_set_ctl,
2206 #ifdef CONFIG_COMPAT
2207 .compat_set = compat_do_ip6t_set_ctl,
2209 .get_optmin = IP6T_BASE_CTL,
2210 .get_optmax = IP6T_SO_GET_MAX+1,
2211 .get = do_ip6t_get_ctl,
2212 #ifdef CONFIG_COMPAT
2213 .compat_get = compat_do_ip6t_get_ctl,
2215 .owner = THIS_MODULE,
2218 static struct xt_match icmp6_matchstruct __read_mostly = {
2220 .match = icmp6_match,
2221 .matchsize = sizeof(struct ip6t_icmp),
2222 .checkentry = icmp6_checkentry,
2223 .proto = IPPROTO_ICMPV6,
2227 static int __net_init ip6_tables_net_init(struct net *net)
2229 return xt_proto_init(net, AF_INET6);
2232 static void __net_exit ip6_tables_net_exit(struct net *net)
2234 xt_proto_fini(net, AF_INET6);
2237 static struct pernet_operations ip6_tables_net_ops = {
2238 .init = ip6_tables_net_init,
2239 .exit = ip6_tables_net_exit,
2242 static int __init ip6_tables_init(void)
2246 ret = register_pernet_subsys(&ip6_tables_net_ops);
2250 /* Noone else will be downing sem now, so we won't sleep */
2251 ret = xt_register_target(&ip6t_standard_target);
2254 ret = xt_register_target(&ip6t_error_target);
2257 ret = xt_register_match(&icmp6_matchstruct);
2261 /* Register setsockopt */
2262 ret = nf_register_sockopt(&ip6t_sockopts);
2266 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2270 xt_unregister_match(&icmp6_matchstruct);
2272 xt_unregister_target(&ip6t_error_target);
2274 xt_unregister_target(&ip6t_standard_target);
2276 unregister_pernet_subsys(&ip6_tables_net_ops);
2281 static void __exit ip6_tables_fini(void)
2283 nf_unregister_sockopt(&ip6t_sockopts);
2285 xt_unregister_match(&icmp6_matchstruct);
2286 xt_unregister_target(&ip6t_error_target);
2287 xt_unregister_target(&ip6t_standard_target);
2289 unregister_pernet_subsys(&ip6_tables_net_ops);
2293 * find the offset to specified header or the protocol number of last header
2294 * if target < 0. "last header" is transport protocol header, ESP, or
2297 * If target header is found, its offset is set in *offset and return protocol
2298 * number. Otherwise, return -1.
2300 * If the first fragment doesn't contain the final protocol header or
2301 * NEXTHDR_NONE it is considered invalid.
2303 * Note that non-1st fragment is special case that "the protocol number
2304 * of last header" is "next header" field in Fragment header. In this case,
2305 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2309 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2310 int target, unsigned short *fragoff)
2312 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2313 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2314 unsigned int len = skb->len - start;
2319 while (nexthdr != target) {
2320 struct ipv6_opt_hdr _hdr, *hp;
2321 unsigned int hdrlen;
2323 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2329 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2332 if (nexthdr == NEXTHDR_FRAGMENT) {
2333 unsigned short _frag_off;
2335 fp = skb_header_pointer(skb,
2336 start+offsetof(struct frag_hdr,
2343 _frag_off = ntohs(*fp) & ~0x7;
2346 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2347 hp->nexthdr == NEXTHDR_NONE)) {
2349 *fragoff = _frag_off;
2355 } else if (nexthdr == NEXTHDR_AUTH)
2356 hdrlen = (hp->hdrlen + 2) << 2;
2358 hdrlen = ipv6_optlen(hp);
2360 nexthdr = hp->nexthdr;
2369 EXPORT_SYMBOL(ip6t_register_table);
2370 EXPORT_SYMBOL(ip6t_unregister_table);
2371 EXPORT_SYMBOL(ip6t_do_table);
2372 EXPORT_SYMBOL(ip6t_ext_hdr);
2373 EXPORT_SYMBOL(ipv6_find_hdr);
2375 module_init(ip6_tables_init);
2376 module_exit(ip6_tables_fini);