2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
175 /* should be ip6 safe */
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
202 /* Performance critical - called for every packet */
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
226 unconditional(const struct ip6t_ip6 *ipv6)
230 for (i = 0; i < sizeof(*ipv6); i++)
231 if (((char *)ipv6)[i])
234 return (i == sizeof(*ipv6));
237 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
238 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
239 /* This cries for unification! */
240 static const char *const hooknames[] = {
241 [NF_INET_PRE_ROUTING] = "PREROUTING",
242 [NF_INET_LOCAL_IN] = "INPUT",
243 [NF_INET_FORWARD] = "FORWARD",
244 [NF_INET_LOCAL_OUT] = "OUTPUT",
245 [NF_INET_POST_ROUTING] = "POSTROUTING",
248 enum nf_ip_trace_comments {
249 NF_IP6_TRACE_COMMENT_RULE,
250 NF_IP6_TRACE_COMMENT_RETURN,
251 NF_IP6_TRACE_COMMENT_POLICY,
254 static const char *const comments[] = {
255 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
256 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
257 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
260 static struct nf_loginfo trace_loginfo = {
261 .type = NF_LOG_TYPE_LOG,
265 .logflags = NF_LOG_MASK,
270 /* Mildly perf critical (only if packet tracing is on) */
272 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
273 char *hookname, char **chainname,
274 char **comment, unsigned int *rulenum)
276 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
278 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
279 /* Head of user chain: ERROR target with chainname */
280 *chainname = t->target.data;
285 if (s->target_offset == sizeof(struct ip6t_entry)
286 && strcmp(t->target.u.kernel.target->name,
287 IP6T_STANDARD_TARGET) == 0
289 && unconditional(&s->ipv6)) {
290 /* Tail of chains: STANDARD target (return/policy) */
291 *comment = *chainname == hookname
292 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
293 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
302 static void trace_packet(struct sk_buff *skb,
304 const struct net_device *in,
305 const struct net_device *out,
306 const char *tablename,
307 struct xt_table_info *private,
308 struct ip6t_entry *e)
311 const struct ip6t_entry *root;
312 char *hookname, *chainname, *comment;
313 unsigned int rulenum = 0;
315 table_base = (void *)private->entries[smp_processor_id()];
316 root = get_entry(table_base, private->hook_entry[hook]);
318 hookname = chainname = (char *)hooknames[hook];
319 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
321 IP6T_ENTRY_ITERATE(root,
322 private->size - private->hook_entry[hook],
323 get_chainname_rulenum,
324 e, hookname, &chainname, &comment, &rulenum);
326 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
327 "TRACE: %s:%s:%s:%u ",
328 tablename, chainname, comment, rulenum);
332 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
334 ip6t_do_table(struct sk_buff *skb,
336 const struct net_device *in,
337 const struct net_device *out,
338 struct xt_table *table)
340 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
341 bool hotdrop = false;
342 /* Initializing verdict to NF_DROP keeps gcc happy. */
343 unsigned int verdict = NF_DROP;
344 const char *indev, *outdev;
346 struct ip6t_entry *e, *back;
347 struct xt_table_info *private;
348 struct xt_match_param mtpar;
349 struct xt_target_param tgpar;
352 indev = in ? in->name : nulldevname;
353 outdev = out ? out->name : nulldevname;
354 /* We handle fragments by dealing with the first fragment as
355 * if it was a normal packet. All other fragments are treated
356 * normally, except that they will NEVER match rules that ask
357 * things we don't know, ie. tcp syn flag or ports). If the
358 * rule is also a fragment-specific rule, non-fragments won't
360 mtpar.hotdrop = &hotdrop;
361 mtpar.in = tgpar.in = in;
362 mtpar.out = tgpar.out = out;
363 mtpar.family = tgpar.family = NFPROTO_IPV6;
364 tgpar.hooknum = hook;
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
369 private = rcu_dereference(table->private);
370 table_base = rcu_dereference(private->entries[smp_processor_id()]);
372 e = get_entry(table_base, private->hook_entry[hook]);
374 /* For return from builtin chain */
375 back = get_entry(table_base, private->underflow[hook]);
380 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
381 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
382 struct ip6t_entry_target *t;
384 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
387 ADD_COUNTER(e->counters,
388 ntohs(ipv6_hdr(skb)->payload_len) +
389 sizeof(struct ipv6hdr), 1);
391 t = ip6t_get_target(e);
392 IP_NF_ASSERT(t->u.kernel.target);
394 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
395 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
396 /* The packet is traced: log it */
397 if (unlikely(skb->nf_trace))
398 trace_packet(skb, hook, in, out,
399 table->name, private, e);
401 /* Standard target? */
402 if (!t->u.kernel.target->target) {
405 v = ((struct ip6t_standard_target *)t)->verdict;
407 /* Pop from stack? */
408 if (v != IP6T_RETURN) {
409 verdict = (unsigned)(-v) - 1;
413 back = get_entry(table_base,
417 if (table_base + v != (void *)e + e->next_offset
418 && !(e->ipv6.flags & IP6T_F_GOTO)) {
419 /* Save old back ptr in next entry */
420 struct ip6t_entry *next
421 = (void *)e + e->next_offset;
423 = (void *)back - table_base;
424 /* set back pointer to next entry */
428 e = get_entry(table_base, v);
430 /* Targets which reenter must return
432 tgpar.target = t->u.kernel.target;
433 tgpar.targinfo = t->data;
435 #ifdef CONFIG_NETFILTER_DEBUG
436 ((struct ip6t_entry *)table_base)->comefrom
439 verdict = t->u.kernel.target->target(skb,
442 #ifdef CONFIG_NETFILTER_DEBUG
443 if (((struct ip6t_entry *)table_base)->comefrom
445 && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
450 ((struct ip6t_entry *)table_base)->comefrom
453 if (verdict == IP6T_CONTINUE)
454 e = (void *)e + e->next_offset;
462 e = (void *)e + e->next_offset;
466 #ifdef CONFIG_NETFILTER_DEBUG
467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
471 #ifdef DEBUG_ALLOW_ALL
480 /* Figures out from what hook each rule can be called: returns 0 if
481 there are loops. Puts hook bitmask in comefrom. */
483 mark_source_chains(struct xt_table_info *newinfo,
484 unsigned int valid_hooks, void *entry0)
488 /* No recursion; use packet counter to save back ptrs (reset
489 to 0 as we leave), and comefrom to save source hook bitmask */
490 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
491 unsigned int pos = newinfo->hook_entry[hook];
492 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
494 if (!(valid_hooks & (1 << hook)))
497 /* Set initial back pointer. */
498 e->counters.pcnt = pos;
501 struct ip6t_standard_target *t
502 = (void *)ip6t_get_target(e);
503 int visited = e->comefrom & (1 << hook);
505 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
506 printk("iptables: loop hook %u pos %u %08X.\n",
507 hook, pos, e->comefrom);
510 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
512 /* Unconditional return/END. */
513 if ((e->target_offset == sizeof(struct ip6t_entry)
514 && (strcmp(t->target.u.user.name,
515 IP6T_STANDARD_TARGET) == 0)
517 && unconditional(&e->ipv6)) || visited) {
518 unsigned int oldpos, size;
520 if (t->verdict < -NF_MAX_VERDICT - 1) {
521 duprintf("mark_source_chains: bad "
522 "negative verdict (%i)\n",
527 /* Return: backtrack through the last
530 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
531 #ifdef DEBUG_IP_FIREWALL_USER
533 & (1 << NF_INET_NUMHOOKS)) {
534 duprintf("Back unset "
541 pos = e->counters.pcnt;
542 e->counters.pcnt = 0;
544 /* We're at the start. */
548 e = (struct ip6t_entry *)
550 } while (oldpos == pos + e->next_offset);
553 size = e->next_offset;
554 e = (struct ip6t_entry *)
555 (entry0 + pos + size);
556 e->counters.pcnt = pos;
559 int newpos = t->verdict;
561 if (strcmp(t->target.u.user.name,
562 IP6T_STANDARD_TARGET) == 0
564 if (newpos > newinfo->size -
565 sizeof(struct ip6t_entry)) {
566 duprintf("mark_source_chains: "
567 "bad verdict (%i)\n",
571 /* This a jump; chase it. */
572 duprintf("Jump rule %u -> %u\n",
575 /* ... this is a fallthru */
576 newpos = pos + e->next_offset;
578 e = (struct ip6t_entry *)
580 e->counters.pcnt = pos;
585 duprintf("Finished chain %u\n", hook);
591 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
593 struct xt_mtdtor_param par;
595 if (i && (*i)-- == 0)
598 par.match = m->u.kernel.match;
599 par.matchinfo = m->data;
600 par.family = NFPROTO_IPV6;
601 if (par.match->destroy != NULL)
602 par.match->destroy(&par);
603 module_put(par.match->me);
608 check_entry(struct ip6t_entry *e, const char *name)
610 struct ip6t_entry_target *t;
612 if (!ip6_checkentry(&e->ipv6)) {
613 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
617 if (e->target_offset + sizeof(struct ip6t_entry_target) >
621 t = ip6t_get_target(e);
622 if (e->target_offset + t->u.target_size > e->next_offset)
628 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
631 const struct ip6t_ip6 *ipv6 = par->entryinfo;
634 par->match = m->u.kernel.match;
635 par->matchinfo = m->data;
637 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
638 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
640 duprintf("ip_tables: check failed for `%s'.\n",
649 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
652 struct xt_match *match;
655 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
657 "ip6t_%s", m->u.user.name);
658 if (IS_ERR(match) || !match) {
659 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
660 return match ? PTR_ERR(match) : -ENOENT;
662 m->u.kernel.match = match;
664 ret = check_match(m, par, i);
670 module_put(m->u.kernel.match->me);
674 static int check_target(struct ip6t_entry *e, const char *name)
676 struct ip6t_entry_target *t = ip6t_get_target(e);
677 struct xt_tgchk_param par = {
680 .target = t->u.kernel.target,
682 .hook_mask = e->comefrom,
683 .family = NFPROTO_IPV6,
687 t = ip6t_get_target(e);
688 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
689 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
691 duprintf("ip_tables: check failed for `%s'.\n",
692 t->u.kernel.target->name);
699 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
702 struct ip6t_entry_target *t;
703 struct xt_target *target;
706 struct xt_mtchk_param mtpar;
708 ret = check_entry(e, name);
714 mtpar.entryinfo = &e->ipv6;
715 mtpar.hook_mask = e->comefrom;
716 mtpar.family = NFPROTO_IPV6;
717 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
719 goto cleanup_matches;
721 t = ip6t_get_target(e);
722 target = try_then_request_module(xt_find_target(AF_INET6,
725 "ip6t_%s", t->u.user.name);
726 if (IS_ERR(target) || !target) {
727 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
728 ret = target ? PTR_ERR(target) : -ENOENT;
729 goto cleanup_matches;
731 t->u.kernel.target = target;
733 ret = check_target(e, name);
740 module_put(t->u.kernel.target->me);
742 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
747 check_entry_size_and_hooks(struct ip6t_entry *e,
748 struct xt_table_info *newinfo,
750 unsigned char *limit,
751 const unsigned int *hook_entries,
752 const unsigned int *underflows,
757 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
758 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
759 duprintf("Bad offset %p\n", e);
764 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
765 duprintf("checking: element %p size %u\n",
770 /* Check hooks & underflows */
771 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
772 if ((unsigned char *)e - base == hook_entries[h])
773 newinfo->hook_entry[h] = hook_entries[h];
774 if ((unsigned char *)e - base == underflows[h])
775 newinfo->underflow[h] = underflows[h];
778 /* FIXME: underflows must be unconditional, standard verdicts
779 < 0 (not IP6T_RETURN). --RR */
781 /* Clear counters and comefrom */
782 e->counters = ((struct xt_counters) { 0, 0 });
790 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
792 struct xt_tgdtor_param par;
793 struct ip6t_entry_target *t;
795 if (i && (*i)-- == 0)
798 /* Cleanup all matches */
799 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
800 t = ip6t_get_target(e);
802 par.target = t->u.kernel.target;
803 par.targinfo = t->data;
804 par.family = NFPROTO_IPV6;
805 if (par.target->destroy != NULL)
806 par.target->destroy(&par);
807 module_put(par.target->me);
811 /* Checks and translates the user-supplied table segment (held in
814 translate_table(const char *name,
815 unsigned int valid_hooks,
816 struct xt_table_info *newinfo,
820 const unsigned int *hook_entries,
821 const unsigned int *underflows)
826 newinfo->size = size;
827 newinfo->number = number;
829 /* Init all hooks to impossible value. */
830 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
831 newinfo->hook_entry[i] = 0xFFFFFFFF;
832 newinfo->underflow[i] = 0xFFFFFFFF;
835 duprintf("translate_table: size %u\n", newinfo->size);
837 /* Walk through entries, checking offsets. */
838 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
839 check_entry_size_and_hooks,
843 hook_entries, underflows, &i);
848 duprintf("translate_table: %u not %u entries\n",
853 /* Check hooks all assigned */
854 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
855 /* Only hooks which are valid */
856 if (!(valid_hooks & (1 << i)))
858 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
859 duprintf("Invalid hook entry %u %u\n",
863 if (newinfo->underflow[i] == 0xFFFFFFFF) {
864 duprintf("Invalid underflow %u %u\n",
870 if (!mark_source_chains(newinfo, valid_hooks, entry0))
873 /* Finally, each sanity check must pass */
875 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
876 find_check_entry, name, size, &i);
879 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
884 /* And one copy for every other CPU */
885 for_each_possible_cpu(i) {
886 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
887 memcpy(newinfo->entries[i], entry0, newinfo->size);
895 add_entry_to_counter(const struct ip6t_entry *e,
896 struct xt_counters total[],
899 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
906 set_entry_to_counter(const struct ip6t_entry *e,
907 struct ip6t_counters total[],
910 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
917 get_counters(const struct xt_table_info *t,
918 struct xt_counters counters[])
924 /* Instead of clearing (by a previous call to memset())
925 * the counters and using adds, we set the counters
926 * with data used by 'current' CPU
927 * We dont care about preemption here.
929 curcpu = raw_smp_processor_id();
932 IP6T_ENTRY_ITERATE(t->entries[curcpu],
934 set_entry_to_counter,
938 for_each_possible_cpu(cpu) {
942 IP6T_ENTRY_ITERATE(t->entries[cpu],
944 add_entry_to_counter,
950 /* We're lazy, and add to the first CPU; overflow works its fey magic
951 * and everything is OK. */
953 add_counter_to_entry(struct ip6t_entry *e,
954 const struct xt_counters addme[],
957 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
963 /* Take values from counters and add them back onto the current cpu */
964 static void put_counters(struct xt_table_info *t,
965 const struct xt_counters counters[])
970 cpu = smp_processor_id();
972 IP6T_ENTRY_ITERATE(t->entries[cpu],
974 add_counter_to_entry,
981 zero_entry_counter(struct ip6t_entry *e, void *arg)
983 e->counters.bcnt = 0;
984 e->counters.pcnt = 0;
989 clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
992 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
994 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
995 for_each_possible_cpu(cpu) {
996 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
997 IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
998 zero_entry_counter, NULL);
1002 static struct xt_counters *alloc_counters(struct xt_table *table)
1004 unsigned int countersize;
1005 struct xt_counters *counters;
1006 struct xt_table_info *private = table->private;
1007 struct xt_table_info *info;
1009 /* We need atomic snapshot of counters: rest doesn't change
1010 (other than comefrom, which userspace doesn't care
1012 countersize = sizeof(struct xt_counters) * private->number;
1013 counters = vmalloc_node(countersize, numa_node_id());
1015 if (counters == NULL)
1018 info = xt_alloc_table_info(private->size);
1022 clone_counters(info, private);
1024 mutex_lock(&table->lock);
1025 xt_table_entry_swap_rcu(private, info);
1026 synchronize_net(); /* Wait until smoke has cleared */
1028 get_counters(info, counters);
1029 put_counters(private, counters);
1030 mutex_unlock(&table->lock);
1032 xt_free_table_info(info);
1037 return ERR_PTR(-ENOMEM);
1041 copy_entries_to_user(unsigned int total_size,
1042 struct xt_table *table,
1043 void __user *userptr)
1045 unsigned int off, num;
1046 struct ip6t_entry *e;
1047 struct xt_counters *counters;
1048 const struct xt_table_info *private = table->private;
1050 const void *loc_cpu_entry;
1052 counters = alloc_counters(table);
1053 if (IS_ERR(counters))
1054 return PTR_ERR(counters);
1056 /* choose the copy that is on our node/cpu, ...
1057 * This choice is lazy (because current thread is
1058 * allowed to migrate to another cpu)
1060 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1061 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1066 /* FIXME: use iterator macros --RR */
1067 /* ... then go back and fix counters and names */
1068 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1070 const struct ip6t_entry_match *m;
1071 const struct ip6t_entry_target *t;
1073 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1074 if (copy_to_user(userptr + off
1075 + offsetof(struct ip6t_entry, counters),
1077 sizeof(counters[num])) != 0) {
1082 for (i = sizeof(struct ip6t_entry);
1083 i < e->target_offset;
1084 i += m->u.match_size) {
1087 if (copy_to_user(userptr + off + i
1088 + offsetof(struct ip6t_entry_match,
1090 m->u.kernel.match->name,
1091 strlen(m->u.kernel.match->name)+1)
1098 t = ip6t_get_target(e);
1099 if (copy_to_user(userptr + off + e->target_offset
1100 + offsetof(struct ip6t_entry_target,
1102 t->u.kernel.target->name,
1103 strlen(t->u.kernel.target->name)+1) != 0) {
1114 #ifdef CONFIG_COMPAT
1115 static void compat_standard_from_user(void *dst, void *src)
1117 int v = *(compat_int_t *)src;
1120 v += xt_compat_calc_jump(AF_INET6, v);
1121 memcpy(dst, &v, sizeof(v));
1124 static int compat_standard_to_user(void __user *dst, void *src)
1126 compat_int_t cv = *(int *)src;
1129 cv -= xt_compat_calc_jump(AF_INET6, cv);
1130 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1134 compat_calc_match(struct ip6t_entry_match *m, int *size)
1136 *size += xt_compat_match_offset(m->u.kernel.match);
1140 static int compat_calc_entry(struct ip6t_entry *e,
1141 const struct xt_table_info *info,
1142 void *base, struct xt_table_info *newinfo)
1144 struct ip6t_entry_target *t;
1145 unsigned int entry_offset;
1148 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1149 entry_offset = (void *)e - base;
1150 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1151 t = ip6t_get_target(e);
1152 off += xt_compat_target_offset(t->u.kernel.target);
1153 newinfo->size -= off;
1154 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1158 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1159 if (info->hook_entry[i] &&
1160 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1161 newinfo->hook_entry[i] -= off;
1162 if (info->underflow[i] &&
1163 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1164 newinfo->underflow[i] -= off;
1169 static int compat_table_info(const struct xt_table_info *info,
1170 struct xt_table_info *newinfo)
1172 void *loc_cpu_entry;
1174 if (!newinfo || !info)
1177 /* we dont care about newinfo->entries[] */
1178 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1179 newinfo->initial_entries = 0;
1180 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1181 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1182 compat_calc_entry, info, loc_cpu_entry,
1187 static int get_info(struct net *net, void __user *user, int *len, int compat)
1189 char name[IP6T_TABLE_MAXNAMELEN];
1193 if (*len != sizeof(struct ip6t_getinfo)) {
1194 duprintf("length %u != %zu\n", *len,
1195 sizeof(struct ip6t_getinfo));
1199 if (copy_from_user(name, user, sizeof(name)) != 0)
1202 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1203 #ifdef CONFIG_COMPAT
1205 xt_compat_lock(AF_INET6);
1207 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1208 "ip6table_%s", name);
1209 if (t && !IS_ERR(t)) {
1210 struct ip6t_getinfo info;
1211 const struct xt_table_info *private = t->private;
1213 #ifdef CONFIG_COMPAT
1215 struct xt_table_info tmp;
1216 ret = compat_table_info(private, &tmp);
1217 xt_compat_flush_offsets(AF_INET6);
1221 info.valid_hooks = t->valid_hooks;
1222 memcpy(info.hook_entry, private->hook_entry,
1223 sizeof(info.hook_entry));
1224 memcpy(info.underflow, private->underflow,
1225 sizeof(info.underflow));
1226 info.num_entries = private->number;
1227 info.size = private->size;
1228 strcpy(info.name, name);
1230 if (copy_to_user(user, &info, *len) != 0)
1238 ret = t ? PTR_ERR(t) : -ENOENT;
1239 #ifdef CONFIG_COMPAT
1241 xt_compat_unlock(AF_INET6);
1247 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1250 struct ip6t_get_entries get;
1253 if (*len < sizeof(get)) {
1254 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1257 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1259 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1260 duprintf("get_entries: %u != %zu\n",
1261 *len, sizeof(get) + get.size);
1265 t = xt_find_table_lock(net, AF_INET6, get.name);
1266 if (t && !IS_ERR(t)) {
1267 struct xt_table_info *private = t->private;
1268 duprintf("t->private->number = %u\n", private->number);
1269 if (get.size == private->size)
1270 ret = copy_entries_to_user(private->size,
1271 t, uptr->entrytable);
1273 duprintf("get_entries: I've got %u not %u!\n",
1274 private->size, get.size);
1280 ret = t ? PTR_ERR(t) : -ENOENT;
1286 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1287 struct xt_table_info *newinfo, unsigned int num_counters,
1288 void __user *counters_ptr)
1292 struct xt_table_info *oldinfo;
1293 struct xt_counters *counters;
1294 const void *loc_cpu_old_entry;
1297 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1304 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1305 "ip6table_%s", name);
1306 if (!t || IS_ERR(t)) {
1307 ret = t ? PTR_ERR(t) : -ENOENT;
1308 goto free_newinfo_counters_untrans;
1312 if (valid_hooks != t->valid_hooks) {
1313 duprintf("Valid hook crap: %08X vs %08X\n",
1314 valid_hooks, t->valid_hooks);
1319 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1323 /* Update module usage count based on number of rules */
1324 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1325 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1326 if ((oldinfo->number > oldinfo->initial_entries) ||
1327 (newinfo->number <= oldinfo->initial_entries))
1329 if ((oldinfo->number > oldinfo->initial_entries) &&
1330 (newinfo->number <= oldinfo->initial_entries))
1333 /* Get the old counters. */
1334 get_counters(oldinfo, counters);
1335 /* Decrease module usage counts and free resource */
1336 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1337 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1339 xt_free_table_info(oldinfo);
1340 if (copy_to_user(counters_ptr, counters,
1341 sizeof(struct xt_counters) * num_counters) != 0)
1350 free_newinfo_counters_untrans:
1357 do_replace(struct net *net, void __user *user, unsigned int len)
1360 struct ip6t_replace tmp;
1361 struct xt_table_info *newinfo;
1362 void *loc_cpu_entry;
1364 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1367 /* overflow check */
1368 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1371 newinfo = xt_alloc_table_info(tmp.size);
1375 /* choose the copy that is on our node/cpu */
1376 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1377 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1383 ret = translate_table(tmp.name, tmp.valid_hooks,
1384 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1385 tmp.hook_entry, tmp.underflow);
1389 duprintf("ip_tables: Translated table\n");
1391 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1392 tmp.num_counters, tmp.counters);
1394 goto free_newinfo_untrans;
1397 free_newinfo_untrans:
1398 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1400 xt_free_table_info(newinfo);
1405 do_add_counters(struct net *net, void __user *user, unsigned int len,
1409 struct xt_counters_info tmp;
1410 struct xt_counters *paddc;
1411 unsigned int num_counters;
1416 const struct xt_table_info *private;
1418 const void *loc_cpu_entry;
1419 #ifdef CONFIG_COMPAT
1420 struct compat_xt_counters_info compat_tmp;
1424 size = sizeof(struct compat_xt_counters_info);
1429 size = sizeof(struct xt_counters_info);
1432 if (copy_from_user(ptmp, user, size) != 0)
1435 #ifdef CONFIG_COMPAT
1437 num_counters = compat_tmp.num_counters;
1438 name = compat_tmp.name;
1442 num_counters = tmp.num_counters;
1446 if (len != size + num_counters * sizeof(struct xt_counters))
1449 paddc = vmalloc_node(len - size, numa_node_id());
1453 if (copy_from_user(paddc, user + size, len - size) != 0) {
1458 t = xt_find_table_lock(net, AF_INET6, name);
1459 if (!t || IS_ERR(t)) {
1460 ret = t ? PTR_ERR(t) : -ENOENT;
1464 mutex_lock(&t->lock);
1465 private = t->private;
1466 if (private->number != num_counters) {
1468 goto unlock_up_free;
1473 /* Choose the copy that is on our node */
1474 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1475 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1477 add_counter_to_entry,
1482 mutex_unlock(&t->lock);
1491 #ifdef CONFIG_COMPAT
1492 struct compat_ip6t_replace {
1493 char name[IP6T_TABLE_MAXNAMELEN];
1497 u32 hook_entry[NF_INET_NUMHOOKS];
1498 u32 underflow[NF_INET_NUMHOOKS];
1500 compat_uptr_t counters; /* struct ip6t_counters * */
1501 struct compat_ip6t_entry entries[0];
1505 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1506 unsigned int *size, struct xt_counters *counters,
1509 struct ip6t_entry_target *t;
1510 struct compat_ip6t_entry __user *ce;
1511 u_int16_t target_offset, next_offset;
1512 compat_uint_t origsize;
1517 ce = (struct compat_ip6t_entry __user *)*dstptr;
1518 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1521 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1524 *dstptr += sizeof(struct compat_ip6t_entry);
1525 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1527 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1528 target_offset = e->target_offset - (origsize - *size);
1531 t = ip6t_get_target(e);
1532 ret = xt_compat_target_to_user(t, dstptr, size);
1536 next_offset = e->next_offset - (origsize - *size);
1537 if (put_user(target_offset, &ce->target_offset))
1539 if (put_user(next_offset, &ce->next_offset))
1549 compat_find_calc_match(struct ip6t_entry_match *m,
1551 const struct ip6t_ip6 *ipv6,
1552 unsigned int hookmask,
1553 int *size, unsigned int *i)
1555 struct xt_match *match;
1557 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1558 m->u.user.revision),
1559 "ip6t_%s", m->u.user.name);
1560 if (IS_ERR(match) || !match) {
1561 duprintf("compat_check_calc_match: `%s' not found\n",
1563 return match ? PTR_ERR(match) : -ENOENT;
1565 m->u.kernel.match = match;
1566 *size += xt_compat_match_offset(match);
1573 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1575 if (i && (*i)-- == 0)
1578 module_put(m->u.kernel.match->me);
1583 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1585 struct ip6t_entry_target *t;
1587 if (i && (*i)-- == 0)
1590 /* Cleanup all matches */
1591 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1592 t = compat_ip6t_get_target(e);
1593 module_put(t->u.kernel.target->me);
1598 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1599 struct xt_table_info *newinfo,
1601 unsigned char *base,
1602 unsigned char *limit,
1603 unsigned int *hook_entries,
1604 unsigned int *underflows,
1608 struct ip6t_entry_target *t;
1609 struct xt_target *target;
1610 unsigned int entry_offset;
1614 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1615 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1616 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1617 duprintf("Bad offset %p, limit = %p\n", e, limit);
1621 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1622 sizeof(struct compat_xt_entry_target)) {
1623 duprintf("checking: element %p size %u\n",
1628 /* For purposes of check_entry casting the compat entry is fine */
1629 ret = check_entry((struct ip6t_entry *)e, name);
1633 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1634 entry_offset = (void *)e - (void *)base;
1636 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1637 &e->ipv6, e->comefrom, &off, &j);
1639 goto release_matches;
1641 t = compat_ip6t_get_target(e);
1642 target = try_then_request_module(xt_find_target(AF_INET6,
1644 t->u.user.revision),
1645 "ip6t_%s", t->u.user.name);
1646 if (IS_ERR(target) || !target) {
1647 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1649 ret = target ? PTR_ERR(target) : -ENOENT;
1650 goto release_matches;
1652 t->u.kernel.target = target;
1654 off += xt_compat_target_offset(target);
1656 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1660 /* Check hooks & underflows */
1661 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1662 if ((unsigned char *)e - base == hook_entries[h])
1663 newinfo->hook_entry[h] = hook_entries[h];
1664 if ((unsigned char *)e - base == underflows[h])
1665 newinfo->underflow[h] = underflows[h];
1668 /* Clear counters and comefrom */
1669 memset(&e->counters, 0, sizeof(e->counters));
1676 module_put(t->u.kernel.target->me);
1678 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1683 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1684 unsigned int *size, const char *name,
1685 struct xt_table_info *newinfo, unsigned char *base)
1687 struct ip6t_entry_target *t;
1688 struct xt_target *target;
1689 struct ip6t_entry *de;
1690 unsigned int origsize;
1695 de = (struct ip6t_entry *)*dstptr;
1696 memcpy(de, e, sizeof(struct ip6t_entry));
1697 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1699 *dstptr += sizeof(struct ip6t_entry);
1700 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1702 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1706 de->target_offset = e->target_offset - (origsize - *size);
1707 t = compat_ip6t_get_target(e);
1708 target = t->u.kernel.target;
1709 xt_compat_target_from_user(t, dstptr, size);
1711 de->next_offset = e->next_offset - (origsize - *size);
1712 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1713 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1714 newinfo->hook_entry[h] -= origsize - *size;
1715 if ((unsigned char *)de - base < newinfo->underflow[h])
1716 newinfo->underflow[h] -= origsize - *size;
1721 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1726 struct xt_mtchk_param mtpar;
1730 mtpar.entryinfo = &e->ipv6;
1731 mtpar.hook_mask = e->comefrom;
1732 mtpar.family = NFPROTO_IPV6;
1733 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1735 goto cleanup_matches;
1737 ret = check_target(e, name);
1739 goto cleanup_matches;
1745 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1750 translate_compat_table(const char *name,
1751 unsigned int valid_hooks,
1752 struct xt_table_info **pinfo,
1754 unsigned int total_size,
1755 unsigned int number,
1756 unsigned int *hook_entries,
1757 unsigned int *underflows)
1760 struct xt_table_info *newinfo, *info;
1761 void *pos, *entry0, *entry1;
1768 info->number = number;
1770 /* Init all hooks to impossible value. */
1771 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1772 info->hook_entry[i] = 0xFFFFFFFF;
1773 info->underflow[i] = 0xFFFFFFFF;
1776 duprintf("translate_compat_table: size %u\n", info->size);
1778 xt_compat_lock(AF_INET6);
1779 /* Walk through entries, checking offsets. */
1780 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1781 check_compat_entry_size_and_hooks,
1782 info, &size, entry0,
1783 entry0 + total_size,
1784 hook_entries, underflows, &j, name);
1790 duprintf("translate_compat_table: %u not %u entries\n",
1795 /* Check hooks all assigned */
1796 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1797 /* Only hooks which are valid */
1798 if (!(valid_hooks & (1 << i)))
1800 if (info->hook_entry[i] == 0xFFFFFFFF) {
1801 duprintf("Invalid hook entry %u %u\n",
1802 i, hook_entries[i]);
1805 if (info->underflow[i] == 0xFFFFFFFF) {
1806 duprintf("Invalid underflow %u %u\n",
1813 newinfo = xt_alloc_table_info(size);
1817 newinfo->number = number;
1818 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1819 newinfo->hook_entry[i] = info->hook_entry[i];
1820 newinfo->underflow[i] = info->underflow[i];
1822 entry1 = newinfo->entries[raw_smp_processor_id()];
1825 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1826 compat_copy_entry_from_user,
1827 &pos, &size, name, newinfo, entry1);
1828 xt_compat_flush_offsets(AF_INET6);
1829 xt_compat_unlock(AF_INET6);
1834 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1838 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1842 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1843 compat_release_entry, &j);
1844 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1845 xt_free_table_info(newinfo);
1849 /* And one copy for every other CPU */
1850 for_each_possible_cpu(i)
1851 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1852 memcpy(newinfo->entries[i], entry1, newinfo->size);
1856 xt_free_table_info(info);
1860 xt_free_table_info(newinfo);
1862 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1865 xt_compat_flush_offsets(AF_INET6);
1866 xt_compat_unlock(AF_INET6);
1871 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1874 struct compat_ip6t_replace tmp;
1875 struct xt_table_info *newinfo;
1876 void *loc_cpu_entry;
1878 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1881 /* overflow check */
1882 if (tmp.size >= INT_MAX / num_possible_cpus())
1884 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1887 newinfo = xt_alloc_table_info(tmp.size);
1891 /* choose the copy that is on our node/cpu */
1892 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1893 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1899 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1900 &newinfo, &loc_cpu_entry, tmp.size,
1901 tmp.num_entries, tmp.hook_entry,
1906 duprintf("compat_do_replace: Translated table\n");
1908 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1909 tmp.num_counters, compat_ptr(tmp.counters));
1911 goto free_newinfo_untrans;
1914 free_newinfo_untrans:
1915 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1917 xt_free_table_info(newinfo);
1922 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1927 if (!capable(CAP_NET_ADMIN))
1931 case IP6T_SO_SET_REPLACE:
1932 ret = compat_do_replace(sock_net(sk), user, len);
1935 case IP6T_SO_SET_ADD_COUNTERS:
1936 ret = do_add_counters(sock_net(sk), user, len, 1);
1940 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1947 struct compat_ip6t_get_entries {
1948 char name[IP6T_TABLE_MAXNAMELEN];
1950 struct compat_ip6t_entry entrytable[0];
1954 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1955 void __user *userptr)
1957 struct xt_counters *counters;
1958 const struct xt_table_info *private = table->private;
1962 const void *loc_cpu_entry;
1965 counters = alloc_counters(table);
1966 if (IS_ERR(counters))
1967 return PTR_ERR(counters);
1969 /* choose the copy that is on our node/cpu, ...
1970 * This choice is lazy (because current thread is
1971 * allowed to migrate to another cpu)
1973 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1976 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1977 compat_copy_entry_to_user,
1978 &pos, &size, counters, &i);
1985 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1989 struct compat_ip6t_get_entries get;
1992 if (*len < sizeof(get)) {
1993 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1997 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
2000 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
2001 duprintf("compat_get_entries: %u != %zu\n",
2002 *len, sizeof(get) + get.size);
2006 xt_compat_lock(AF_INET6);
2007 t = xt_find_table_lock(net, AF_INET6, get.name);
2008 if (t && !IS_ERR(t)) {
2009 const struct xt_table_info *private = t->private;
2010 struct xt_table_info info;
2011 duprintf("t->private->number = %u\n", private->number);
2012 ret = compat_table_info(private, &info);
2013 if (!ret && get.size == info.size) {
2014 ret = compat_copy_entries_to_user(private->size,
2015 t, uptr->entrytable);
2017 duprintf("compat_get_entries: I've got %u not %u!\n",
2018 private->size, get.size);
2021 xt_compat_flush_offsets(AF_INET6);
2025 ret = t ? PTR_ERR(t) : -ENOENT;
2027 xt_compat_unlock(AF_INET6);
2031 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2034 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2038 if (!capable(CAP_NET_ADMIN))
2042 case IP6T_SO_GET_INFO:
2043 ret = get_info(sock_net(sk), user, len, 1);
2045 case IP6T_SO_GET_ENTRIES:
2046 ret = compat_get_entries(sock_net(sk), user, len);
2049 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2056 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2060 if (!capable(CAP_NET_ADMIN))
2064 case IP6T_SO_SET_REPLACE:
2065 ret = do_replace(sock_net(sk), user, len);
2068 case IP6T_SO_SET_ADD_COUNTERS:
2069 ret = do_add_counters(sock_net(sk), user, len, 0);
2073 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2081 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2085 if (!capable(CAP_NET_ADMIN))
2089 case IP6T_SO_GET_INFO:
2090 ret = get_info(sock_net(sk), user, len, 0);
2093 case IP6T_SO_GET_ENTRIES:
2094 ret = get_entries(sock_net(sk), user, len);
2097 case IP6T_SO_GET_REVISION_MATCH:
2098 case IP6T_SO_GET_REVISION_TARGET: {
2099 struct ip6t_get_revision rev;
2102 if (*len != sizeof(rev)) {
2106 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2111 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2116 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2119 "ip6t_%s", rev.name);
2124 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2131 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2132 const struct ip6t_replace *repl)
2135 struct xt_table_info *newinfo;
2136 struct xt_table_info bootstrap
2137 = { 0, 0, 0, { 0 }, { 0 }, { } };
2138 void *loc_cpu_entry;
2139 struct xt_table *new_table;
2141 newinfo = xt_alloc_table_info(repl->size);
2147 /* choose the copy on our node/cpu, but dont care about preemption */
2148 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2149 memcpy(loc_cpu_entry, repl->entries, repl->size);
2151 ret = translate_table(table->name, table->valid_hooks,
2152 newinfo, loc_cpu_entry, repl->size,
2159 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2160 if (IS_ERR(new_table)) {
2161 ret = PTR_ERR(new_table);
2167 xt_free_table_info(newinfo);
2169 return ERR_PTR(ret);
2172 void ip6t_unregister_table(struct xt_table *table)
2174 struct xt_table_info *private;
2175 void *loc_cpu_entry;
2176 struct module *table_owner = table->me;
2178 private = xt_unregister_table(table);
2180 /* Decrease module usage counts and free resources */
2181 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2182 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2183 if (private->number > private->initial_entries)
2184 module_put(table_owner);
2185 xt_free_table_info(private);
2188 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2190 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2191 u_int8_t type, u_int8_t code,
2194 return (type == test_type && code >= min_code && code <= max_code)
2199 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2201 const struct icmp6hdr *ic;
2202 struct icmp6hdr _icmph;
2203 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2205 /* Must not be a fragment. */
2206 if (par->fragoff != 0)
2209 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2211 /* We've been asked to examine this packet, and we
2212 * can't. Hence, no choice but to drop.
2214 duprintf("Dropping evil ICMP tinygram.\n");
2215 *par->hotdrop = true;
2219 return icmp6_type_code_match(icmpinfo->type,
2222 ic->icmp6_type, ic->icmp6_code,
2223 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2226 /* Called when user tries to insert an entry of this type. */
2227 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2229 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2231 /* Must specify no unknown invflags */
2232 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2235 /* The built-in targets: standard (NULL) and error. */
2236 static struct xt_target ip6t_standard_target __read_mostly = {
2237 .name = IP6T_STANDARD_TARGET,
2238 .targetsize = sizeof(int),
2240 #ifdef CONFIG_COMPAT
2241 .compatsize = sizeof(compat_int_t),
2242 .compat_from_user = compat_standard_from_user,
2243 .compat_to_user = compat_standard_to_user,
2247 static struct xt_target ip6t_error_target __read_mostly = {
2248 .name = IP6T_ERROR_TARGET,
2249 .target = ip6t_error,
2250 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2254 static struct nf_sockopt_ops ip6t_sockopts = {
2256 .set_optmin = IP6T_BASE_CTL,
2257 .set_optmax = IP6T_SO_SET_MAX+1,
2258 .set = do_ip6t_set_ctl,
2259 #ifdef CONFIG_COMPAT
2260 .compat_set = compat_do_ip6t_set_ctl,
2262 .get_optmin = IP6T_BASE_CTL,
2263 .get_optmax = IP6T_SO_GET_MAX+1,
2264 .get = do_ip6t_get_ctl,
2265 #ifdef CONFIG_COMPAT
2266 .compat_get = compat_do_ip6t_get_ctl,
2268 .owner = THIS_MODULE,
2271 static struct xt_match icmp6_matchstruct __read_mostly = {
2273 .match = icmp6_match,
2274 .matchsize = sizeof(struct ip6t_icmp),
2275 .checkentry = icmp6_checkentry,
2276 .proto = IPPROTO_ICMPV6,
2280 static int __net_init ip6_tables_net_init(struct net *net)
2282 return xt_proto_init(net, AF_INET6);
2285 static void __net_exit ip6_tables_net_exit(struct net *net)
2287 xt_proto_fini(net, AF_INET6);
2290 static struct pernet_operations ip6_tables_net_ops = {
2291 .init = ip6_tables_net_init,
2292 .exit = ip6_tables_net_exit,
2295 static int __init ip6_tables_init(void)
2299 ret = register_pernet_subsys(&ip6_tables_net_ops);
2303 /* Noone else will be downing sem now, so we won't sleep */
2304 ret = xt_register_target(&ip6t_standard_target);
2307 ret = xt_register_target(&ip6t_error_target);
2310 ret = xt_register_match(&icmp6_matchstruct);
2314 /* Register setsockopt */
2315 ret = nf_register_sockopt(&ip6t_sockopts);
2319 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2323 xt_unregister_match(&icmp6_matchstruct);
2325 xt_unregister_target(&ip6t_error_target);
2327 xt_unregister_target(&ip6t_standard_target);
2329 unregister_pernet_subsys(&ip6_tables_net_ops);
2334 static void __exit ip6_tables_fini(void)
2336 nf_unregister_sockopt(&ip6t_sockopts);
2338 xt_unregister_match(&icmp6_matchstruct);
2339 xt_unregister_target(&ip6t_error_target);
2340 xt_unregister_target(&ip6t_standard_target);
2342 unregister_pernet_subsys(&ip6_tables_net_ops);
2346 * find the offset to specified header or the protocol number of last header
2347 * if target < 0. "last header" is transport protocol header, ESP, or
2350 * If target header is found, its offset is set in *offset and return protocol
2351 * number. Otherwise, return -1.
2353 * If the first fragment doesn't contain the final protocol header or
2354 * NEXTHDR_NONE it is considered invalid.
2356 * Note that non-1st fragment is special case that "the protocol number
2357 * of last header" is "next header" field in Fragment header. In this case,
2358 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2362 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2363 int target, unsigned short *fragoff)
2365 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2366 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2367 unsigned int len = skb->len - start;
2372 while (nexthdr != target) {
2373 struct ipv6_opt_hdr _hdr, *hp;
2374 unsigned int hdrlen;
2376 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2382 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2385 if (nexthdr == NEXTHDR_FRAGMENT) {
2386 unsigned short _frag_off;
2388 fp = skb_header_pointer(skb,
2389 start+offsetof(struct frag_hdr,
2396 _frag_off = ntohs(*fp) & ~0x7;
2399 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2400 hp->nexthdr == NEXTHDR_NONE)) {
2402 *fragoff = _frag_off;
2408 } else if (nexthdr == NEXTHDR_AUTH)
2409 hdrlen = (hp->hdrlen + 2) << 2;
2411 hdrlen = ipv6_optlen(hp);
2413 nexthdr = hp->nexthdr;
2422 EXPORT_SYMBOL(ip6t_register_table);
2423 EXPORT_SYMBOL(ip6t_unregister_table);
2424 EXPORT_SYMBOL(ip6t_do_table);
2425 EXPORT_SYMBOL(ip6t_ext_hdr);
2426 EXPORT_SYMBOL(ipv6_find_hdr);
2428 module_init(ip6_tables_init);
2429 module_exit(ip6_tables_fini);