2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP) ||
109 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
175 /* should be ip6 safe */
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
202 /* Performance critical - called for every packet */
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
225 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
227 static const struct ip6t_ip6 uncond;
229 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
232 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
233 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
234 /* This cries for unification! */
235 static const char *const hooknames[] = {
236 [NF_INET_PRE_ROUTING] = "PREROUTING",
237 [NF_INET_LOCAL_IN] = "INPUT",
238 [NF_INET_FORWARD] = "FORWARD",
239 [NF_INET_LOCAL_OUT] = "OUTPUT",
240 [NF_INET_POST_ROUTING] = "POSTROUTING",
243 enum nf_ip_trace_comments {
244 NF_IP6_TRACE_COMMENT_RULE,
245 NF_IP6_TRACE_COMMENT_RETURN,
246 NF_IP6_TRACE_COMMENT_POLICY,
249 static const char *const comments[] = {
250 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
251 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
252 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
255 static struct nf_loginfo trace_loginfo = {
256 .type = NF_LOG_TYPE_LOG,
260 .logflags = NF_LOG_MASK,
265 /* Mildly perf critical (only if packet tracing is on) */
267 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
268 const char *hookname, const char **chainname,
269 const char **comment, unsigned int *rulenum)
271 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
273 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
274 /* Head of user chain: ERROR target with chainname */
275 *chainname = t->target.data;
280 if (s->target_offset == sizeof(struct ip6t_entry) &&
281 strcmp(t->target.u.kernel.target->name,
282 IP6T_STANDARD_TARGET) == 0 &&
284 unconditional(&s->ipv6)) {
285 /* Tail of chains: STANDARD target (return/policy) */
286 *comment = *chainname == hookname
287 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
288 : comments[NF_IP6_TRACE_COMMENT_RETURN];
297 static void trace_packet(struct sk_buff *skb,
299 const struct net_device *in,
300 const struct net_device *out,
301 const char *tablename,
302 struct xt_table_info *private,
303 struct ip6t_entry *e)
306 const struct ip6t_entry *root;
307 const char *hookname, *chainname, *comment;
308 unsigned int rulenum = 0;
310 table_base = private->entries[smp_processor_id()];
311 root = get_entry(table_base, private->hook_entry[hook]);
313 hookname = chainname = hooknames[hook];
314 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
316 IP6T_ENTRY_ITERATE(root,
317 private->size - private->hook_entry[hook],
318 get_chainname_rulenum,
319 e, hookname, &chainname, &comment, &rulenum);
321 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
322 "TRACE: %s:%s:%s:%u ",
323 tablename, chainname, comment, rulenum);
327 static inline __pure struct ip6t_entry *
328 ip6t_next_entry(const struct ip6t_entry *entry)
330 return (void *)entry + entry->next_offset;
333 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
335 ip6t_do_table(struct sk_buff *skb,
337 const struct net_device *in,
338 const struct net_device *out,
339 struct xt_table *table)
341 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
343 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
344 bool hotdrop = false;
345 /* Initializing verdict to NF_DROP keeps gcc happy. */
346 unsigned int verdict = NF_DROP;
347 const char *indev, *outdev;
349 struct ip6t_entry *e, *back;
350 struct xt_table_info *private;
351 struct xt_match_param mtpar;
352 struct xt_target_param tgpar;
355 indev = in ? in->name : nulldevname;
356 outdev = out ? out->name : nulldevname;
357 /* We handle fragments by dealing with the first fragment as
358 * if it was a normal packet. All other fragments are treated
359 * normally, except that they will NEVER match rules that ask
360 * things we don't know, ie. tcp syn flag or ports). If the
361 * rule is also a fragment-specific rule, non-fragments won't
363 mtpar.hotdrop = &hotdrop;
364 mtpar.in = tgpar.in = in;
365 mtpar.out = tgpar.out = out;
366 mtpar.family = tgpar.family = NFPROTO_IPV6;
367 mtpar.hooknum = tgpar.hooknum = hook;
369 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
372 private = table->private;
373 table_base = private->entries[smp_processor_id()];
375 e = get_entry(table_base, private->hook_entry[hook]);
377 /* For return from builtin chain */
378 back = get_entry(table_base, private->underflow[hook]);
381 struct ip6t_entry_target *t;
385 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
386 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
387 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
388 e = ip6t_next_entry(e);
392 ADD_COUNTER(e->counters,
393 ntohs(ipv6_hdr(skb)->payload_len) +
394 sizeof(struct ipv6hdr), 1);
396 t = ip6t_get_target(e);
397 IP_NF_ASSERT(t->u.kernel.target);
399 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
400 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
401 /* The packet is traced: log it */
402 if (unlikely(skb->nf_trace))
403 trace_packet(skb, hook, in, out,
404 table->name, private, e);
406 /* Standard target? */
407 if (!t->u.kernel.target->target) {
410 v = ((struct ip6t_standard_target *)t)->verdict;
412 /* Pop from stack? */
413 if (v != IP6T_RETURN) {
414 verdict = (unsigned)(-v) - 1;
418 back = get_entry(table_base, back->comefrom);
421 if (table_base + v != ip6t_next_entry(e) &&
422 !(e->ipv6.flags & IP6T_F_GOTO)) {
423 /* Save old back ptr in next entry */
424 struct ip6t_entry *next = ip6t_next_entry(e);
425 next->comefrom = (void *)back - table_base;
426 /* set back pointer to next entry */
430 e = get_entry(table_base, v);
434 /* Targets which reenter must return
436 tgpar.target = t->u.kernel.target;
437 tgpar.targinfo = t->data;
439 #ifdef CONFIG_NETFILTER_DEBUG
440 tb_comefrom = 0xeeeeeeec;
442 verdict = t->u.kernel.target->target(skb, &tgpar);
444 #ifdef CONFIG_NETFILTER_DEBUG
445 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
450 tb_comefrom = 0x57acc001;
452 if (verdict == IP6T_CONTINUE)
453 e = ip6t_next_entry(e);
459 #ifdef CONFIG_NETFILTER_DEBUG
460 tb_comefrom = NETFILTER_LINK_POISON;
462 xt_info_rdunlock_bh();
464 #ifdef DEBUG_ALLOW_ALL
475 /* Figures out from what hook each rule can be called: returns 0 if
476 there are loops. Puts hook bitmask in comefrom. */
478 mark_source_chains(struct xt_table_info *newinfo,
479 unsigned int valid_hooks, void *entry0)
483 /* No recursion; use packet counter to save back ptrs (reset
484 to 0 as we leave), and comefrom to save source hook bitmask */
485 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
486 unsigned int pos = newinfo->hook_entry[hook];
487 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
489 if (!(valid_hooks & (1 << hook)))
492 /* Set initial back pointer. */
493 e->counters.pcnt = pos;
496 struct ip6t_standard_target *t
497 = (void *)ip6t_get_target(e);
498 int visited = e->comefrom & (1 << hook);
500 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
501 printk("iptables: loop hook %u pos %u %08X.\n",
502 hook, pos, e->comefrom);
505 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
507 /* Unconditional return/END. */
508 if ((e->target_offset == sizeof(struct ip6t_entry) &&
509 (strcmp(t->target.u.user.name,
510 IP6T_STANDARD_TARGET) == 0) &&
512 unconditional(&e->ipv6)) || visited) {
513 unsigned int oldpos, size;
515 if ((strcmp(t->target.u.user.name,
516 IP6T_STANDARD_TARGET) == 0) &&
517 t->verdict < -NF_MAX_VERDICT - 1) {
518 duprintf("mark_source_chains: bad "
519 "negative verdict (%i)\n",
524 /* Return: backtrack through the last
527 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
528 #ifdef DEBUG_IP_FIREWALL_USER
530 & (1 << NF_INET_NUMHOOKS)) {
531 duprintf("Back unset "
538 pos = e->counters.pcnt;
539 e->counters.pcnt = 0;
541 /* We're at the start. */
545 e = (struct ip6t_entry *)
547 } while (oldpos == pos + e->next_offset);
550 size = e->next_offset;
551 e = (struct ip6t_entry *)
552 (entry0 + pos + size);
553 e->counters.pcnt = pos;
556 int newpos = t->verdict;
558 if (strcmp(t->target.u.user.name,
559 IP6T_STANDARD_TARGET) == 0 &&
561 if (newpos > newinfo->size -
562 sizeof(struct ip6t_entry)) {
563 duprintf("mark_source_chains: "
564 "bad verdict (%i)\n",
568 /* This a jump; chase it. */
569 duprintf("Jump rule %u -> %u\n",
572 /* ... this is a fallthru */
573 newpos = pos + e->next_offset;
575 e = (struct ip6t_entry *)
577 e->counters.pcnt = pos;
582 duprintf("Finished chain %u\n", hook);
588 cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i)
590 struct xt_mtdtor_param par;
592 if (i && (*i)-- == 0)
596 par.match = m->u.kernel.match;
597 par.matchinfo = m->data;
598 par.family = NFPROTO_IPV6;
599 if (par.match->destroy != NULL)
600 par.match->destroy(&par);
601 module_put(par.match->me);
606 check_entry(struct ip6t_entry *e, const char *name)
608 struct ip6t_entry_target *t;
610 if (!ip6_checkentry(&e->ipv6)) {
611 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
615 if (e->target_offset + sizeof(struct ip6t_entry_target) >
619 t = ip6t_get_target(e);
620 if (e->target_offset + t->u.target_size > e->next_offset)
626 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
629 const struct ip6t_ip6 *ipv6 = par->entryinfo;
632 par->match = m->u.kernel.match;
633 par->matchinfo = m->data;
635 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
636 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
638 duprintf("ip_tables: check failed for `%s'.\n",
647 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
650 struct xt_match *match;
653 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
655 "ip6t_%s", m->u.user.name);
656 if (IS_ERR(match) || !match) {
657 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
658 return match ? PTR_ERR(match) : -ENOENT;
660 m->u.kernel.match = match;
662 ret = check_match(m, par, i);
668 module_put(m->u.kernel.match->me);
672 static int check_target(struct ip6t_entry *e, const char *name)
674 struct ip6t_entry_target *t = ip6t_get_target(e);
675 struct xt_tgchk_param par = {
678 .target = t->u.kernel.target,
680 .hook_mask = e->comefrom,
681 .family = NFPROTO_IPV6,
685 t = ip6t_get_target(e);
686 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
687 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
689 duprintf("ip_tables: check failed for `%s'.\n",
690 t->u.kernel.target->name);
697 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
698 unsigned int size, unsigned int *i)
700 struct ip6t_entry_target *t;
701 struct xt_target *target;
704 struct xt_mtchk_param mtpar;
706 ret = check_entry(e, name);
713 mtpar.entryinfo = &e->ipv6;
714 mtpar.hook_mask = e->comefrom;
715 mtpar.family = NFPROTO_IPV6;
716 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
718 goto cleanup_matches;
720 t = ip6t_get_target(e);
721 target = try_then_request_module(xt_find_target(AF_INET6,
724 "ip6t_%s", t->u.user.name);
725 if (IS_ERR(target) || !target) {
726 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
727 ret = target ? PTR_ERR(target) : -ENOENT;
728 goto cleanup_matches;
730 t->u.kernel.target = target;
732 ret = check_target(e, name);
739 module_put(t->u.kernel.target->me);
741 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
745 static bool check_underflow(struct ip6t_entry *e)
747 const struct ip6t_entry_target *t;
748 unsigned int verdict;
750 if (!unconditional(&e->ipv6))
752 t = ip6t_get_target(e);
753 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
755 verdict = ((struct ip6t_standard_target *)t)->verdict;
756 verdict = -verdict - 1;
757 return verdict == NF_DROP || verdict == NF_ACCEPT;
761 check_entry_size_and_hooks(struct ip6t_entry *e,
762 struct xt_table_info *newinfo,
764 unsigned char *limit,
765 const unsigned int *hook_entries,
766 const unsigned int *underflows,
767 unsigned int valid_hooks,
772 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
773 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
774 duprintf("Bad offset %p\n", e);
779 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
780 duprintf("checking: element %p size %u\n",
785 /* Check hooks & underflows */
786 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
787 if (!(valid_hooks & (1 << h)))
789 if ((unsigned char *)e - base == hook_entries[h])
790 newinfo->hook_entry[h] = hook_entries[h];
791 if ((unsigned char *)e - base == underflows[h]) {
792 if (!check_underflow(e)) {
793 pr_err("Underflows must be unconditional and "
794 "use the STANDARD target with "
798 newinfo->underflow[h] = underflows[h];
802 /* Clear counters and comefrom */
803 e->counters = ((struct xt_counters) { 0, 0 });
811 cleanup_entry(struct ip6t_entry *e, struct net *net, unsigned int *i)
813 struct xt_tgdtor_param par;
814 struct ip6t_entry_target *t;
816 if (i && (*i)-- == 0)
819 /* Cleanup all matches */
820 IP6T_MATCH_ITERATE(e, cleanup_match, net, NULL);
821 t = ip6t_get_target(e);
823 par.target = t->u.kernel.target;
824 par.targinfo = t->data;
825 par.family = NFPROTO_IPV6;
826 if (par.target->destroy != NULL)
827 par.target->destroy(&par);
828 module_put(par.target->me);
832 /* Checks and translates the user-supplied table segment (held in
835 translate_table(struct net *net,
837 unsigned int valid_hooks,
838 struct xt_table_info *newinfo,
842 const unsigned int *hook_entries,
843 const unsigned int *underflows)
848 newinfo->size = size;
849 newinfo->number = number;
851 /* Init all hooks to impossible value. */
852 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
853 newinfo->hook_entry[i] = 0xFFFFFFFF;
854 newinfo->underflow[i] = 0xFFFFFFFF;
857 duprintf("translate_table: size %u\n", newinfo->size);
859 /* Walk through entries, checking offsets. */
860 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
861 check_entry_size_and_hooks,
865 hook_entries, underflows, valid_hooks, &i);
870 duprintf("translate_table: %u not %u entries\n",
875 /* Check hooks all assigned */
876 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
877 /* Only hooks which are valid */
878 if (!(valid_hooks & (1 << i)))
880 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
881 duprintf("Invalid hook entry %u %u\n",
885 if (newinfo->underflow[i] == 0xFFFFFFFF) {
886 duprintf("Invalid underflow %u %u\n",
892 if (!mark_source_chains(newinfo, valid_hooks, entry0))
895 /* Finally, each sanity check must pass */
897 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
898 find_check_entry, net, name, size, &i);
901 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
902 cleanup_entry, net, &i);
906 /* And one copy for every other CPU */
907 for_each_possible_cpu(i) {
908 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
909 memcpy(newinfo->entries[i], entry0, newinfo->size);
917 add_entry_to_counter(const struct ip6t_entry *e,
918 struct xt_counters total[],
921 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
928 set_entry_to_counter(const struct ip6t_entry *e,
929 struct ip6t_counters total[],
932 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
939 get_counters(const struct xt_table_info *t,
940 struct xt_counters counters[])
946 /* Instead of clearing (by a previous call to memset())
947 * the counters and using adds, we set the counters
948 * with data used by 'current' CPU
950 * Bottom half has to be disabled to prevent deadlock
951 * if new softirq were to run and call ipt_do_table
954 curcpu = smp_processor_id();
957 IP6T_ENTRY_ITERATE(t->entries[curcpu],
959 set_entry_to_counter,
963 for_each_possible_cpu(cpu) {
968 IP6T_ENTRY_ITERATE(t->entries[cpu],
970 add_entry_to_counter,
973 xt_info_wrunlock(cpu);
978 static struct xt_counters *alloc_counters(struct xt_table *table)
980 unsigned int countersize;
981 struct xt_counters *counters;
982 struct xt_table_info *private = table->private;
984 /* We need atomic snapshot of counters: rest doesn't change
985 (other than comefrom, which userspace doesn't care
987 countersize = sizeof(struct xt_counters) * private->number;
988 counters = vmalloc_node(countersize, numa_node_id());
990 if (counters == NULL)
991 return ERR_PTR(-ENOMEM);
993 get_counters(private, counters);
999 copy_entries_to_user(unsigned int total_size,
1000 struct xt_table *table,
1001 void __user *userptr)
1003 unsigned int off, num;
1004 struct ip6t_entry *e;
1005 struct xt_counters *counters;
1006 const struct xt_table_info *private = table->private;
1008 const void *loc_cpu_entry;
1010 counters = alloc_counters(table);
1011 if (IS_ERR(counters))
1012 return PTR_ERR(counters);
1014 /* choose the copy that is on our node/cpu, ...
1015 * This choice is lazy (because current thread is
1016 * allowed to migrate to another cpu)
1018 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1019 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1024 /* FIXME: use iterator macros --RR */
1025 /* ... then go back and fix counters and names */
1026 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1028 const struct ip6t_entry_match *m;
1029 const struct ip6t_entry_target *t;
1031 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1032 if (copy_to_user(userptr + off
1033 + offsetof(struct ip6t_entry, counters),
1035 sizeof(counters[num])) != 0) {
1040 for (i = sizeof(struct ip6t_entry);
1041 i < e->target_offset;
1042 i += m->u.match_size) {
1045 if (copy_to_user(userptr + off + i
1046 + offsetof(struct ip6t_entry_match,
1048 m->u.kernel.match->name,
1049 strlen(m->u.kernel.match->name)+1)
1056 t = ip6t_get_target(e);
1057 if (copy_to_user(userptr + off + e->target_offset
1058 + offsetof(struct ip6t_entry_target,
1060 t->u.kernel.target->name,
1061 strlen(t->u.kernel.target->name)+1) != 0) {
1072 #ifdef CONFIG_COMPAT
1073 static void compat_standard_from_user(void *dst, void *src)
1075 int v = *(compat_int_t *)src;
1078 v += xt_compat_calc_jump(AF_INET6, v);
1079 memcpy(dst, &v, sizeof(v));
1082 static int compat_standard_to_user(void __user *dst, void *src)
1084 compat_int_t cv = *(int *)src;
1087 cv -= xt_compat_calc_jump(AF_INET6, cv);
1088 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1092 compat_calc_match(struct ip6t_entry_match *m, int *size)
1094 *size += xt_compat_match_offset(m->u.kernel.match);
1098 static int compat_calc_entry(struct ip6t_entry *e,
1099 const struct xt_table_info *info,
1100 void *base, struct xt_table_info *newinfo)
1102 struct ip6t_entry_target *t;
1103 unsigned int entry_offset;
1106 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1107 entry_offset = (void *)e - base;
1108 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1109 t = ip6t_get_target(e);
1110 off += xt_compat_target_offset(t->u.kernel.target);
1111 newinfo->size -= off;
1112 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1116 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1117 if (info->hook_entry[i] &&
1118 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1119 newinfo->hook_entry[i] -= off;
1120 if (info->underflow[i] &&
1121 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1122 newinfo->underflow[i] -= off;
1127 static int compat_table_info(const struct xt_table_info *info,
1128 struct xt_table_info *newinfo)
1130 void *loc_cpu_entry;
1132 if (!newinfo || !info)
1135 /* we dont care about newinfo->entries[] */
1136 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1137 newinfo->initial_entries = 0;
1138 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1139 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1140 compat_calc_entry, info, loc_cpu_entry,
1145 static int get_info(struct net *net, void __user *user, int *len, int compat)
1147 char name[IP6T_TABLE_MAXNAMELEN];
1151 if (*len != sizeof(struct ip6t_getinfo)) {
1152 duprintf("length %u != %zu\n", *len,
1153 sizeof(struct ip6t_getinfo));
1157 if (copy_from_user(name, user, sizeof(name)) != 0)
1160 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1161 #ifdef CONFIG_COMPAT
1163 xt_compat_lock(AF_INET6);
1165 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1166 "ip6table_%s", name);
1167 if (t && !IS_ERR(t)) {
1168 struct ip6t_getinfo info;
1169 const struct xt_table_info *private = t->private;
1171 #ifdef CONFIG_COMPAT
1173 struct xt_table_info tmp;
1174 ret = compat_table_info(private, &tmp);
1175 xt_compat_flush_offsets(AF_INET6);
1179 info.valid_hooks = t->valid_hooks;
1180 memcpy(info.hook_entry, private->hook_entry,
1181 sizeof(info.hook_entry));
1182 memcpy(info.underflow, private->underflow,
1183 sizeof(info.underflow));
1184 info.num_entries = private->number;
1185 info.size = private->size;
1186 strcpy(info.name, name);
1188 if (copy_to_user(user, &info, *len) != 0)
1196 ret = t ? PTR_ERR(t) : -ENOENT;
1197 #ifdef CONFIG_COMPAT
1199 xt_compat_unlock(AF_INET6);
1205 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1208 struct ip6t_get_entries get;
1211 if (*len < sizeof(get)) {
1212 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1215 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1217 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1218 duprintf("get_entries: %u != %zu\n",
1219 *len, sizeof(get) + get.size);
1223 t = xt_find_table_lock(net, AF_INET6, get.name);
1224 if (t && !IS_ERR(t)) {
1225 struct xt_table_info *private = t->private;
1226 duprintf("t->private->number = %u\n", private->number);
1227 if (get.size == private->size)
1228 ret = copy_entries_to_user(private->size,
1229 t, uptr->entrytable);
1231 duprintf("get_entries: I've got %u not %u!\n",
1232 private->size, get.size);
1238 ret = t ? PTR_ERR(t) : -ENOENT;
1244 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1245 struct xt_table_info *newinfo, unsigned int num_counters,
1246 void __user *counters_ptr)
1250 struct xt_table_info *oldinfo;
1251 struct xt_counters *counters;
1252 const void *loc_cpu_old_entry;
1255 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1262 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1263 "ip6table_%s", name);
1264 if (!t || IS_ERR(t)) {
1265 ret = t ? PTR_ERR(t) : -ENOENT;
1266 goto free_newinfo_counters_untrans;
1270 if (valid_hooks != t->valid_hooks) {
1271 duprintf("Valid hook crap: %08X vs %08X\n",
1272 valid_hooks, t->valid_hooks);
1277 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1281 /* Update module usage count based on number of rules */
1282 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1283 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1284 if ((oldinfo->number > oldinfo->initial_entries) ||
1285 (newinfo->number <= oldinfo->initial_entries))
1287 if ((oldinfo->number > oldinfo->initial_entries) &&
1288 (newinfo->number <= oldinfo->initial_entries))
1291 /* Get the old counters, and synchronize with replace */
1292 get_counters(oldinfo, counters);
1294 /* Decrease module usage counts and free resource */
1295 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1296 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1298 xt_free_table_info(oldinfo);
1299 if (copy_to_user(counters_ptr, counters,
1300 sizeof(struct xt_counters) * num_counters) != 0)
1309 free_newinfo_counters_untrans:
1316 do_replace(struct net *net, void __user *user, unsigned int len)
1319 struct ip6t_replace tmp;
1320 struct xt_table_info *newinfo;
1321 void *loc_cpu_entry;
1323 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1326 /* overflow check */
1327 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1330 newinfo = xt_alloc_table_info(tmp.size);
1334 /* choose the copy that is on our node/cpu */
1335 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1336 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1342 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1343 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1344 tmp.hook_entry, tmp.underflow);
1348 duprintf("ip_tables: Translated table\n");
1350 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1351 tmp.num_counters, tmp.counters);
1353 goto free_newinfo_untrans;
1356 free_newinfo_untrans:
1357 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1359 xt_free_table_info(newinfo);
1363 /* We're lazy, and add to the first CPU; overflow works its fey magic
1364 * and everything is OK. */
1366 add_counter_to_entry(struct ip6t_entry *e,
1367 const struct xt_counters addme[],
1370 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1377 do_add_counters(struct net *net, void __user *user, unsigned int len,
1380 unsigned int i, curcpu;
1381 struct xt_counters_info tmp;
1382 struct xt_counters *paddc;
1383 unsigned int num_counters;
1388 const struct xt_table_info *private;
1390 const void *loc_cpu_entry;
1391 #ifdef CONFIG_COMPAT
1392 struct compat_xt_counters_info compat_tmp;
1396 size = sizeof(struct compat_xt_counters_info);
1401 size = sizeof(struct xt_counters_info);
1404 if (copy_from_user(ptmp, user, size) != 0)
1407 #ifdef CONFIG_COMPAT
1409 num_counters = compat_tmp.num_counters;
1410 name = compat_tmp.name;
1414 num_counters = tmp.num_counters;
1418 if (len != size + num_counters * sizeof(struct xt_counters))
1421 paddc = vmalloc_node(len - size, numa_node_id());
1425 if (copy_from_user(paddc, user + size, len - size) != 0) {
1430 t = xt_find_table_lock(net, AF_INET6, name);
1431 if (!t || IS_ERR(t)) {
1432 ret = t ? PTR_ERR(t) : -ENOENT;
1438 private = t->private;
1439 if (private->number != num_counters) {
1441 goto unlock_up_free;
1445 /* Choose the copy that is on our node */
1446 curcpu = smp_processor_id();
1447 xt_info_wrlock(curcpu);
1448 loc_cpu_entry = private->entries[curcpu];
1449 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1451 add_counter_to_entry,
1454 xt_info_wrunlock(curcpu);
1466 #ifdef CONFIG_COMPAT
1467 struct compat_ip6t_replace {
1468 char name[IP6T_TABLE_MAXNAMELEN];
1472 u32 hook_entry[NF_INET_NUMHOOKS];
1473 u32 underflow[NF_INET_NUMHOOKS];
1475 compat_uptr_t counters; /* struct ip6t_counters * */
1476 struct compat_ip6t_entry entries[0];
1480 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1481 unsigned int *size, struct xt_counters *counters,
1484 struct ip6t_entry_target *t;
1485 struct compat_ip6t_entry __user *ce;
1486 u_int16_t target_offset, next_offset;
1487 compat_uint_t origsize;
1492 ce = (struct compat_ip6t_entry __user *)*dstptr;
1493 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1496 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1499 *dstptr += sizeof(struct compat_ip6t_entry);
1500 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1502 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1503 target_offset = e->target_offset - (origsize - *size);
1506 t = ip6t_get_target(e);
1507 ret = xt_compat_target_to_user(t, dstptr, size);
1511 next_offset = e->next_offset - (origsize - *size);
1512 if (put_user(target_offset, &ce->target_offset))
1514 if (put_user(next_offset, &ce->next_offset))
1524 compat_find_calc_match(struct ip6t_entry_match *m,
1526 const struct ip6t_ip6 *ipv6,
1527 unsigned int hookmask,
1528 int *size, unsigned int *i)
1530 struct xt_match *match;
1532 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1533 m->u.user.revision),
1534 "ip6t_%s", m->u.user.name);
1535 if (IS_ERR(match) || !match) {
1536 duprintf("compat_check_calc_match: `%s' not found\n",
1538 return match ? PTR_ERR(match) : -ENOENT;
1540 m->u.kernel.match = match;
1541 *size += xt_compat_match_offset(match);
1548 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1550 if (i && (*i)-- == 0)
1553 module_put(m->u.kernel.match->me);
1558 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1560 struct ip6t_entry_target *t;
1562 if (i && (*i)-- == 0)
1565 /* Cleanup all matches */
1566 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1567 t = compat_ip6t_get_target(e);
1568 module_put(t->u.kernel.target->me);
1573 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1574 struct xt_table_info *newinfo,
1576 unsigned char *base,
1577 unsigned char *limit,
1578 unsigned int *hook_entries,
1579 unsigned int *underflows,
1583 struct ip6t_entry_target *t;
1584 struct xt_target *target;
1585 unsigned int entry_offset;
1589 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1590 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1591 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1592 duprintf("Bad offset %p, limit = %p\n", e, limit);
1596 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1597 sizeof(struct compat_xt_entry_target)) {
1598 duprintf("checking: element %p size %u\n",
1603 /* For purposes of check_entry casting the compat entry is fine */
1604 ret = check_entry((struct ip6t_entry *)e, name);
1608 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1609 entry_offset = (void *)e - (void *)base;
1611 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1612 &e->ipv6, e->comefrom, &off, &j);
1614 goto release_matches;
1616 t = compat_ip6t_get_target(e);
1617 target = try_then_request_module(xt_find_target(AF_INET6,
1619 t->u.user.revision),
1620 "ip6t_%s", t->u.user.name);
1621 if (IS_ERR(target) || !target) {
1622 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1624 ret = target ? PTR_ERR(target) : -ENOENT;
1625 goto release_matches;
1627 t->u.kernel.target = target;
1629 off += xt_compat_target_offset(target);
1631 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1635 /* Check hooks & underflows */
1636 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1637 if ((unsigned char *)e - base == hook_entries[h])
1638 newinfo->hook_entry[h] = hook_entries[h];
1639 if ((unsigned char *)e - base == underflows[h])
1640 newinfo->underflow[h] = underflows[h];
1643 /* Clear counters and comefrom */
1644 memset(&e->counters, 0, sizeof(e->counters));
1651 module_put(t->u.kernel.target->me);
1653 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1658 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1659 unsigned int *size, const char *name,
1660 struct xt_table_info *newinfo, unsigned char *base)
1662 struct ip6t_entry_target *t;
1663 struct xt_target *target;
1664 struct ip6t_entry *de;
1665 unsigned int origsize;
1670 de = (struct ip6t_entry *)*dstptr;
1671 memcpy(de, e, sizeof(struct ip6t_entry));
1672 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1674 *dstptr += sizeof(struct ip6t_entry);
1675 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1677 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1681 de->target_offset = e->target_offset - (origsize - *size);
1682 t = compat_ip6t_get_target(e);
1683 target = t->u.kernel.target;
1684 xt_compat_target_from_user(t, dstptr, size);
1686 de->next_offset = e->next_offset - (origsize - *size);
1687 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1688 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1689 newinfo->hook_entry[h] -= origsize - *size;
1690 if ((unsigned char *)de - base < newinfo->underflow[h])
1691 newinfo->underflow[h] -= origsize - *size;
1696 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1697 const char *name, unsigned int *i)
1701 struct xt_mtchk_param mtpar;
1706 mtpar.entryinfo = &e->ipv6;
1707 mtpar.hook_mask = e->comefrom;
1708 mtpar.family = NFPROTO_IPV6;
1709 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1711 goto cleanup_matches;
1713 ret = check_target(e, name);
1715 goto cleanup_matches;
1721 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
1726 translate_compat_table(struct net *net,
1728 unsigned int valid_hooks,
1729 struct xt_table_info **pinfo,
1731 unsigned int total_size,
1732 unsigned int number,
1733 unsigned int *hook_entries,
1734 unsigned int *underflows)
1737 struct xt_table_info *newinfo, *info;
1738 void *pos, *entry0, *entry1;
1745 info->number = number;
1747 /* Init all hooks to impossible value. */
1748 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1749 info->hook_entry[i] = 0xFFFFFFFF;
1750 info->underflow[i] = 0xFFFFFFFF;
1753 duprintf("translate_compat_table: size %u\n", info->size);
1755 xt_compat_lock(AF_INET6);
1756 /* Walk through entries, checking offsets. */
1757 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1758 check_compat_entry_size_and_hooks,
1759 info, &size, entry0,
1760 entry0 + total_size,
1761 hook_entries, underflows, &j, name);
1767 duprintf("translate_compat_table: %u not %u entries\n",
1772 /* Check hooks all assigned */
1773 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1774 /* Only hooks which are valid */
1775 if (!(valid_hooks & (1 << i)))
1777 if (info->hook_entry[i] == 0xFFFFFFFF) {
1778 duprintf("Invalid hook entry %u %u\n",
1779 i, hook_entries[i]);
1782 if (info->underflow[i] == 0xFFFFFFFF) {
1783 duprintf("Invalid underflow %u %u\n",
1790 newinfo = xt_alloc_table_info(size);
1794 newinfo->number = number;
1795 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1796 newinfo->hook_entry[i] = info->hook_entry[i];
1797 newinfo->underflow[i] = info->underflow[i];
1799 entry1 = newinfo->entries[raw_smp_processor_id()];
1802 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1803 compat_copy_entry_from_user,
1804 &pos, &size, name, newinfo, entry1);
1805 xt_compat_flush_offsets(AF_INET6);
1806 xt_compat_unlock(AF_INET6);
1811 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1815 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1819 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1820 compat_release_entry, &j);
1821 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i);
1822 xt_free_table_info(newinfo);
1826 /* And one copy for every other CPU */
1827 for_each_possible_cpu(i)
1828 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1829 memcpy(newinfo->entries[i], entry1, newinfo->size);
1833 xt_free_table_info(info);
1837 xt_free_table_info(newinfo);
1839 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1842 xt_compat_flush_offsets(AF_INET6);
1843 xt_compat_unlock(AF_INET6);
1848 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1851 struct compat_ip6t_replace tmp;
1852 struct xt_table_info *newinfo;
1853 void *loc_cpu_entry;
1855 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1858 /* overflow check */
1859 if (tmp.size >= INT_MAX / num_possible_cpus())
1861 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1864 newinfo = xt_alloc_table_info(tmp.size);
1868 /* choose the copy that is on our node/cpu */
1869 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1870 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1876 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1877 &newinfo, &loc_cpu_entry, tmp.size,
1878 tmp.num_entries, tmp.hook_entry,
1883 duprintf("compat_do_replace: Translated table\n");
1885 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1886 tmp.num_counters, compat_ptr(tmp.counters));
1888 goto free_newinfo_untrans;
1891 free_newinfo_untrans:
1892 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1894 xt_free_table_info(newinfo);
1899 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1904 if (!capable(CAP_NET_ADMIN))
1908 case IP6T_SO_SET_REPLACE:
1909 ret = compat_do_replace(sock_net(sk), user, len);
1912 case IP6T_SO_SET_ADD_COUNTERS:
1913 ret = do_add_counters(sock_net(sk), user, len, 1);
1917 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1924 struct compat_ip6t_get_entries {
1925 char name[IP6T_TABLE_MAXNAMELEN];
1927 struct compat_ip6t_entry entrytable[0];
1931 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1932 void __user *userptr)
1934 struct xt_counters *counters;
1935 const struct xt_table_info *private = table->private;
1939 const void *loc_cpu_entry;
1942 counters = alloc_counters(table);
1943 if (IS_ERR(counters))
1944 return PTR_ERR(counters);
1946 /* choose the copy that is on our node/cpu, ...
1947 * This choice is lazy (because current thread is
1948 * allowed to migrate to another cpu)
1950 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1953 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1954 compat_copy_entry_to_user,
1955 &pos, &size, counters, &i);
1962 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1966 struct compat_ip6t_get_entries get;
1969 if (*len < sizeof(get)) {
1970 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1974 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1977 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1978 duprintf("compat_get_entries: %u != %zu\n",
1979 *len, sizeof(get) + get.size);
1983 xt_compat_lock(AF_INET6);
1984 t = xt_find_table_lock(net, AF_INET6, get.name);
1985 if (t && !IS_ERR(t)) {
1986 const struct xt_table_info *private = t->private;
1987 struct xt_table_info info;
1988 duprintf("t->private->number = %u\n", private->number);
1989 ret = compat_table_info(private, &info);
1990 if (!ret && get.size == info.size) {
1991 ret = compat_copy_entries_to_user(private->size,
1992 t, uptr->entrytable);
1994 duprintf("compat_get_entries: I've got %u not %u!\n",
1995 private->size, get.size);
1998 xt_compat_flush_offsets(AF_INET6);
2002 ret = t ? PTR_ERR(t) : -ENOENT;
2004 xt_compat_unlock(AF_INET6);
2008 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2011 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2015 if (!capable(CAP_NET_ADMIN))
2019 case IP6T_SO_GET_INFO:
2020 ret = get_info(sock_net(sk), user, len, 1);
2022 case IP6T_SO_GET_ENTRIES:
2023 ret = compat_get_entries(sock_net(sk), user, len);
2026 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2033 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2037 if (!capable(CAP_NET_ADMIN))
2041 case IP6T_SO_SET_REPLACE:
2042 ret = do_replace(sock_net(sk), user, len);
2045 case IP6T_SO_SET_ADD_COUNTERS:
2046 ret = do_add_counters(sock_net(sk), user, len, 0);
2050 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2058 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2062 if (!capable(CAP_NET_ADMIN))
2066 case IP6T_SO_GET_INFO:
2067 ret = get_info(sock_net(sk), user, len, 0);
2070 case IP6T_SO_GET_ENTRIES:
2071 ret = get_entries(sock_net(sk), user, len);
2074 case IP6T_SO_GET_REVISION_MATCH:
2075 case IP6T_SO_GET_REVISION_TARGET: {
2076 struct ip6t_get_revision rev;
2079 if (*len != sizeof(rev)) {
2083 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2088 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2093 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2096 "ip6t_%s", rev.name);
2101 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2108 struct xt_table *ip6t_register_table(struct net *net,
2109 const struct xt_table *table,
2110 const struct ip6t_replace *repl)
2113 struct xt_table_info *newinfo;
2114 struct xt_table_info bootstrap
2115 = { 0, 0, 0, { 0 }, { 0 }, { } };
2116 void *loc_cpu_entry;
2117 struct xt_table *new_table;
2119 newinfo = xt_alloc_table_info(repl->size);
2125 /* choose the copy on our node/cpu, but dont care about preemption */
2126 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2127 memcpy(loc_cpu_entry, repl->entries, repl->size);
2129 ret = translate_table(net, table->name, table->valid_hooks,
2130 newinfo, loc_cpu_entry, repl->size,
2137 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2138 if (IS_ERR(new_table)) {
2139 ret = PTR_ERR(new_table);
2145 xt_free_table_info(newinfo);
2147 return ERR_PTR(ret);
2150 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2152 struct xt_table_info *private;
2153 void *loc_cpu_entry;
2154 struct module *table_owner = table->me;
2156 private = xt_unregister_table(table);
2158 /* Decrease module usage counts and free resources */
2159 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2160 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL);
2161 if (private->number > private->initial_entries)
2162 module_put(table_owner);
2163 xt_free_table_info(private);
2166 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2168 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2169 u_int8_t type, u_int8_t code,
2172 return (type == test_type && code >= min_code && code <= max_code)
2177 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2179 const struct icmp6hdr *ic;
2180 struct icmp6hdr _icmph;
2181 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2183 /* Must not be a fragment. */
2184 if (par->fragoff != 0)
2187 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2189 /* We've been asked to examine this packet, and we
2190 * can't. Hence, no choice but to drop.
2192 duprintf("Dropping evil ICMP tinygram.\n");
2193 *par->hotdrop = true;
2197 return icmp6_type_code_match(icmpinfo->type,
2200 ic->icmp6_type, ic->icmp6_code,
2201 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2204 /* Called when user tries to insert an entry of this type. */
2205 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2207 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2209 /* Must specify no unknown invflags */
2210 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2213 /* The built-in targets: standard (NULL) and error. */
2214 static struct xt_target ip6t_standard_target __read_mostly = {
2215 .name = IP6T_STANDARD_TARGET,
2216 .targetsize = sizeof(int),
2217 .family = NFPROTO_IPV6,
2218 #ifdef CONFIG_COMPAT
2219 .compatsize = sizeof(compat_int_t),
2220 .compat_from_user = compat_standard_from_user,
2221 .compat_to_user = compat_standard_to_user,
2225 static struct xt_target ip6t_error_target __read_mostly = {
2226 .name = IP6T_ERROR_TARGET,
2227 .target = ip6t_error,
2228 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2229 .family = NFPROTO_IPV6,
2232 static struct nf_sockopt_ops ip6t_sockopts = {
2234 .set_optmin = IP6T_BASE_CTL,
2235 .set_optmax = IP6T_SO_SET_MAX+1,
2236 .set = do_ip6t_set_ctl,
2237 #ifdef CONFIG_COMPAT
2238 .compat_set = compat_do_ip6t_set_ctl,
2240 .get_optmin = IP6T_BASE_CTL,
2241 .get_optmax = IP6T_SO_GET_MAX+1,
2242 .get = do_ip6t_get_ctl,
2243 #ifdef CONFIG_COMPAT
2244 .compat_get = compat_do_ip6t_get_ctl,
2246 .owner = THIS_MODULE,
2249 static struct xt_match icmp6_matchstruct __read_mostly = {
2251 .match = icmp6_match,
2252 .matchsize = sizeof(struct ip6t_icmp),
2253 .checkentry = icmp6_checkentry,
2254 .proto = IPPROTO_ICMPV6,
2255 .family = NFPROTO_IPV6,
2258 static int __net_init ip6_tables_net_init(struct net *net)
2260 return xt_proto_init(net, NFPROTO_IPV6);
2263 static void __net_exit ip6_tables_net_exit(struct net *net)
2265 xt_proto_fini(net, NFPROTO_IPV6);
2268 static struct pernet_operations ip6_tables_net_ops = {
2269 .init = ip6_tables_net_init,
2270 .exit = ip6_tables_net_exit,
2273 static int __init ip6_tables_init(void)
2277 ret = register_pernet_subsys(&ip6_tables_net_ops);
2281 /* Noone else will be downing sem now, so we won't sleep */
2282 ret = xt_register_target(&ip6t_standard_target);
2285 ret = xt_register_target(&ip6t_error_target);
2288 ret = xt_register_match(&icmp6_matchstruct);
2292 /* Register setsockopt */
2293 ret = nf_register_sockopt(&ip6t_sockopts);
2297 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2301 xt_unregister_match(&icmp6_matchstruct);
2303 xt_unregister_target(&ip6t_error_target);
2305 xt_unregister_target(&ip6t_standard_target);
2307 unregister_pernet_subsys(&ip6_tables_net_ops);
2312 static void __exit ip6_tables_fini(void)
2314 nf_unregister_sockopt(&ip6t_sockopts);
2316 xt_unregister_match(&icmp6_matchstruct);
2317 xt_unregister_target(&ip6t_error_target);
2318 xt_unregister_target(&ip6t_standard_target);
2320 unregister_pernet_subsys(&ip6_tables_net_ops);
2324 * find the offset to specified header or the protocol number of last header
2325 * if target < 0. "last header" is transport protocol header, ESP, or
2328 * If target header is found, its offset is set in *offset and return protocol
2329 * number. Otherwise, return -1.
2331 * If the first fragment doesn't contain the final protocol header or
2332 * NEXTHDR_NONE it is considered invalid.
2334 * Note that non-1st fragment is special case that "the protocol number
2335 * of last header" is "next header" field in Fragment header. In this case,
2336 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2340 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2341 int target, unsigned short *fragoff)
2343 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2344 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2345 unsigned int len = skb->len - start;
2350 while (nexthdr != target) {
2351 struct ipv6_opt_hdr _hdr, *hp;
2352 unsigned int hdrlen;
2354 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2360 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2363 if (nexthdr == NEXTHDR_FRAGMENT) {
2364 unsigned short _frag_off;
2366 fp = skb_header_pointer(skb,
2367 start+offsetof(struct frag_hdr,
2374 _frag_off = ntohs(*fp) & ~0x7;
2377 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2378 hp->nexthdr == NEXTHDR_NONE)) {
2380 *fragoff = _frag_off;
2386 } else if (nexthdr == NEXTHDR_AUTH)
2387 hdrlen = (hp->hdrlen + 2) << 2;
2389 hdrlen = ipv6_optlen(hp);
2391 nexthdr = hp->nexthdr;
2400 EXPORT_SYMBOL(ip6t_register_table);
2401 EXPORT_SYMBOL(ip6t_unregister_table);
2402 EXPORT_SYMBOL(ip6t_do_table);
2403 EXPORT_SYMBOL(ip6t_ext_hdr);
2404 EXPORT_SYMBOL(ipv6_find_hdr);
2406 module_init(ip6_tables_init);
2407 module_exit(ip6_tables_fini);