2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP) ||
109 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
175 /* should be ip6 safe */
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
202 /* Performance critical - called for every packet */
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
225 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
227 static const struct ip6t_ip6 uncond;
229 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
232 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
233 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
234 /* This cries for unification! */
235 static const char *const hooknames[] = {
236 [NF_INET_PRE_ROUTING] = "PREROUTING",
237 [NF_INET_LOCAL_IN] = "INPUT",
238 [NF_INET_FORWARD] = "FORWARD",
239 [NF_INET_LOCAL_OUT] = "OUTPUT",
240 [NF_INET_POST_ROUTING] = "POSTROUTING",
243 enum nf_ip_trace_comments {
244 NF_IP6_TRACE_COMMENT_RULE,
245 NF_IP6_TRACE_COMMENT_RETURN,
246 NF_IP6_TRACE_COMMENT_POLICY,
249 static const char *const comments[] = {
250 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
251 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
252 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
255 static struct nf_loginfo trace_loginfo = {
256 .type = NF_LOG_TYPE_LOG,
260 .logflags = NF_LOG_MASK,
265 /* Mildly perf critical (only if packet tracing is on) */
267 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
268 const char *hookname, const char **chainname,
269 const char **comment, unsigned int *rulenum)
271 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
273 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
274 /* Head of user chain: ERROR target with chainname */
275 *chainname = t->target.data;
280 if (s->target_offset == sizeof(struct ip6t_entry) &&
281 strcmp(t->target.u.kernel.target->name,
282 IP6T_STANDARD_TARGET) == 0 &&
284 unconditional(&s->ipv6)) {
285 /* Tail of chains: STANDARD target (return/policy) */
286 *comment = *chainname == hookname
287 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
288 : comments[NF_IP6_TRACE_COMMENT_RETURN];
297 static void trace_packet(struct sk_buff *skb,
299 const struct net_device *in,
300 const struct net_device *out,
301 const char *tablename,
302 struct xt_table_info *private,
303 struct ip6t_entry *e)
306 const struct ip6t_entry *root;
307 const char *hookname, *chainname, *comment;
308 unsigned int rulenum = 0;
310 table_base = private->entries[smp_processor_id()];
311 root = get_entry(table_base, private->hook_entry[hook]);
313 hookname = chainname = hooknames[hook];
314 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
316 IP6T_ENTRY_ITERATE(root,
317 private->size - private->hook_entry[hook],
318 get_chainname_rulenum,
319 e, hookname, &chainname, &comment, &rulenum);
321 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
322 "TRACE: %s:%s:%s:%u ",
323 tablename, chainname, comment, rulenum);
327 static inline __pure struct ip6t_entry *
328 ip6t_next_entry(const struct ip6t_entry *entry)
330 return (void *)entry + entry->next_offset;
333 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
335 ip6t_do_table(struct sk_buff *skb,
337 const struct net_device *in,
338 const struct net_device *out,
339 struct xt_table *table)
341 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
343 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
344 bool hotdrop = false;
345 /* Initializing verdict to NF_DROP keeps gcc happy. */
346 unsigned int verdict = NF_DROP;
347 const char *indev, *outdev;
349 struct ip6t_entry *e, *back;
350 struct xt_table_info *private;
351 struct xt_match_param mtpar;
352 struct xt_target_param tgpar;
355 indev = in ? in->name : nulldevname;
356 outdev = out ? out->name : nulldevname;
357 /* We handle fragments by dealing with the first fragment as
358 * if it was a normal packet. All other fragments are treated
359 * normally, except that they will NEVER match rules that ask
360 * things we don't know, ie. tcp syn flag or ports). If the
361 * rule is also a fragment-specific rule, non-fragments won't
363 mtpar.hotdrop = &hotdrop;
364 mtpar.in = tgpar.in = in;
365 mtpar.out = tgpar.out = out;
366 mtpar.family = tgpar.family = NFPROTO_IPV6;
367 mtpar.hooknum = tgpar.hooknum = hook;
369 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
372 private = table->private;
373 table_base = private->entries[smp_processor_id()];
375 e = get_entry(table_base, private->hook_entry[hook]);
377 /* For return from builtin chain */
378 back = get_entry(table_base, private->underflow[hook]);
381 struct ip6t_entry_target *t;
385 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
386 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
387 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
388 e = ip6t_next_entry(e);
392 ADD_COUNTER(e->counters,
393 ntohs(ipv6_hdr(skb)->payload_len) +
394 sizeof(struct ipv6hdr), 1);
396 t = ip6t_get_target(e);
397 IP_NF_ASSERT(t->u.kernel.target);
399 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
400 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
401 /* The packet is traced: log it */
402 if (unlikely(skb->nf_trace))
403 trace_packet(skb, hook, in, out,
404 table->name, private, e);
406 /* Standard target? */
407 if (!t->u.kernel.target->target) {
410 v = ((struct ip6t_standard_target *)t)->verdict;
412 /* Pop from stack? */
413 if (v != IP6T_RETURN) {
414 verdict = (unsigned)(-v) - 1;
418 back = get_entry(table_base, back->comefrom);
421 if (table_base + v != ip6t_next_entry(e) &&
422 !(e->ipv6.flags & IP6T_F_GOTO)) {
423 /* Save old back ptr in next entry */
424 struct ip6t_entry *next = ip6t_next_entry(e);
425 next->comefrom = (void *)back - table_base;
426 /* set back pointer to next entry */
430 e = get_entry(table_base, v);
434 /* Targets which reenter must return
436 tgpar.target = t->u.kernel.target;
437 tgpar.targinfo = t->data;
439 #ifdef CONFIG_NETFILTER_DEBUG
440 tb_comefrom = 0xeeeeeeec;
442 verdict = t->u.kernel.target->target(skb, &tgpar);
444 #ifdef CONFIG_NETFILTER_DEBUG
445 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
450 tb_comefrom = 0x57acc001;
452 if (verdict == IP6T_CONTINUE)
453 e = ip6t_next_entry(e);
459 #ifdef CONFIG_NETFILTER_DEBUG
460 tb_comefrom = NETFILTER_LINK_POISON;
462 xt_info_rdunlock_bh();
464 #ifdef DEBUG_ALLOW_ALL
475 /* Figures out from what hook each rule can be called: returns 0 if
476 there are loops. Puts hook bitmask in comefrom. */
478 mark_source_chains(struct xt_table_info *newinfo,
479 unsigned int valid_hooks, void *entry0)
483 /* No recursion; use packet counter to save back ptrs (reset
484 to 0 as we leave), and comefrom to save source hook bitmask */
485 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
486 unsigned int pos = newinfo->hook_entry[hook];
487 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
489 if (!(valid_hooks & (1 << hook)))
492 /* Set initial back pointer. */
493 e->counters.pcnt = pos;
496 struct ip6t_standard_target *t
497 = (void *)ip6t_get_target(e);
498 int visited = e->comefrom & (1 << hook);
500 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
501 printk("iptables: loop hook %u pos %u %08X.\n",
502 hook, pos, e->comefrom);
505 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
507 /* Unconditional return/END. */
508 if ((e->target_offset == sizeof(struct ip6t_entry) &&
509 (strcmp(t->target.u.user.name,
510 IP6T_STANDARD_TARGET) == 0) &&
512 unconditional(&e->ipv6)) || visited) {
513 unsigned int oldpos, size;
515 if ((strcmp(t->target.u.user.name,
516 IP6T_STANDARD_TARGET) == 0) &&
517 t->verdict < -NF_MAX_VERDICT - 1) {
518 duprintf("mark_source_chains: bad "
519 "negative verdict (%i)\n",
524 /* Return: backtrack through the last
527 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
528 #ifdef DEBUG_IP_FIREWALL_USER
530 & (1 << NF_INET_NUMHOOKS)) {
531 duprintf("Back unset "
538 pos = e->counters.pcnt;
539 e->counters.pcnt = 0;
541 /* We're at the start. */
545 e = (struct ip6t_entry *)
547 } while (oldpos == pos + e->next_offset);
550 size = e->next_offset;
551 e = (struct ip6t_entry *)
552 (entry0 + pos + size);
553 e->counters.pcnt = pos;
556 int newpos = t->verdict;
558 if (strcmp(t->target.u.user.name,
559 IP6T_STANDARD_TARGET) == 0 &&
561 if (newpos > newinfo->size -
562 sizeof(struct ip6t_entry)) {
563 duprintf("mark_source_chains: "
564 "bad verdict (%i)\n",
568 /* This a jump; chase it. */
569 duprintf("Jump rule %u -> %u\n",
572 /* ... this is a fallthru */
573 newpos = pos + e->next_offset;
575 e = (struct ip6t_entry *)
577 e->counters.pcnt = pos;
582 duprintf("Finished chain %u\n", hook);
588 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
590 struct xt_mtdtor_param par;
592 if (i && (*i)-- == 0)
595 par.match = m->u.kernel.match;
596 par.matchinfo = m->data;
597 par.family = NFPROTO_IPV6;
598 if (par.match->destroy != NULL)
599 par.match->destroy(&par);
600 module_put(par.match->me);
605 check_entry(struct ip6t_entry *e, const char *name)
607 struct ip6t_entry_target *t;
609 if (!ip6_checkentry(&e->ipv6)) {
610 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
614 if (e->target_offset + sizeof(struct ip6t_entry_target) >
618 t = ip6t_get_target(e);
619 if (e->target_offset + t->u.target_size > e->next_offset)
625 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
628 const struct ip6t_ip6 *ipv6 = par->entryinfo;
631 par->match = m->u.kernel.match;
632 par->matchinfo = m->data;
634 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
635 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
637 duprintf("ip_tables: check failed for `%s'.\n",
646 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
649 struct xt_match *match;
652 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
654 "ip6t_%s", m->u.user.name);
655 if (IS_ERR(match) || !match) {
656 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
657 return match ? PTR_ERR(match) : -ENOENT;
659 m->u.kernel.match = match;
661 ret = check_match(m, par, i);
667 module_put(m->u.kernel.match->me);
671 static int check_target(struct ip6t_entry *e, const char *name)
673 struct ip6t_entry_target *t = ip6t_get_target(e);
674 struct xt_tgchk_param par = {
677 .target = t->u.kernel.target,
679 .hook_mask = e->comefrom,
680 .family = NFPROTO_IPV6,
684 t = ip6t_get_target(e);
685 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
686 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
688 duprintf("ip_tables: check failed for `%s'.\n",
689 t->u.kernel.target->name);
696 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
697 unsigned int size, unsigned int *i)
699 struct ip6t_entry_target *t;
700 struct xt_target *target;
703 struct xt_mtchk_param mtpar;
705 ret = check_entry(e, name);
712 mtpar.entryinfo = &e->ipv6;
713 mtpar.hook_mask = e->comefrom;
714 mtpar.family = NFPROTO_IPV6;
715 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
717 goto cleanup_matches;
719 t = ip6t_get_target(e);
720 target = try_then_request_module(xt_find_target(AF_INET6,
723 "ip6t_%s", t->u.user.name);
724 if (IS_ERR(target) || !target) {
725 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
726 ret = target ? PTR_ERR(target) : -ENOENT;
727 goto cleanup_matches;
729 t->u.kernel.target = target;
731 ret = check_target(e, name);
738 module_put(t->u.kernel.target->me);
740 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
744 static bool check_underflow(struct ip6t_entry *e)
746 const struct ip6t_entry_target *t;
747 unsigned int verdict;
749 if (!unconditional(&e->ipv6))
751 t = ip6t_get_target(e);
752 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
754 verdict = ((struct ip6t_standard_target *)t)->verdict;
755 verdict = -verdict - 1;
756 return verdict == NF_DROP || verdict == NF_ACCEPT;
760 check_entry_size_and_hooks(struct ip6t_entry *e,
761 struct xt_table_info *newinfo,
763 unsigned char *limit,
764 const unsigned int *hook_entries,
765 const unsigned int *underflows,
766 unsigned int valid_hooks,
771 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
772 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
773 duprintf("Bad offset %p\n", e);
778 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
779 duprintf("checking: element %p size %u\n",
784 /* Check hooks & underflows */
785 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
786 if (!(valid_hooks & (1 << h)))
788 if ((unsigned char *)e - base == hook_entries[h])
789 newinfo->hook_entry[h] = hook_entries[h];
790 if ((unsigned char *)e - base == underflows[h]) {
791 if (!check_underflow(e)) {
792 pr_err("Underflows must be unconditional and "
793 "use the STANDARD target with "
797 newinfo->underflow[h] = underflows[h];
801 /* Clear counters and comefrom */
802 e->counters = ((struct xt_counters) { 0, 0 });
810 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
812 struct xt_tgdtor_param par;
813 struct ip6t_entry_target *t;
815 if (i && (*i)-- == 0)
818 /* Cleanup all matches */
819 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
820 t = ip6t_get_target(e);
822 par.target = t->u.kernel.target;
823 par.targinfo = t->data;
824 par.family = NFPROTO_IPV6;
825 if (par.target->destroy != NULL)
826 par.target->destroy(&par);
827 module_put(par.target->me);
831 /* Checks and translates the user-supplied table segment (held in
834 translate_table(struct net *net,
836 unsigned int valid_hooks,
837 struct xt_table_info *newinfo,
841 const unsigned int *hook_entries,
842 const unsigned int *underflows)
847 newinfo->size = size;
848 newinfo->number = number;
850 /* Init all hooks to impossible value. */
851 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
852 newinfo->hook_entry[i] = 0xFFFFFFFF;
853 newinfo->underflow[i] = 0xFFFFFFFF;
856 duprintf("translate_table: size %u\n", newinfo->size);
858 /* Walk through entries, checking offsets. */
859 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
860 check_entry_size_and_hooks,
864 hook_entries, underflows, valid_hooks, &i);
869 duprintf("translate_table: %u not %u entries\n",
874 /* Check hooks all assigned */
875 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
876 /* Only hooks which are valid */
877 if (!(valid_hooks & (1 << i)))
879 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
880 duprintf("Invalid hook entry %u %u\n",
884 if (newinfo->underflow[i] == 0xFFFFFFFF) {
885 duprintf("Invalid underflow %u %u\n",
891 if (!mark_source_chains(newinfo, valid_hooks, entry0))
894 /* Finally, each sanity check must pass */
896 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
897 find_check_entry, net, name, size, &i);
900 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
905 /* And one copy for every other CPU */
906 for_each_possible_cpu(i) {
907 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
908 memcpy(newinfo->entries[i], entry0, newinfo->size);
916 add_entry_to_counter(const struct ip6t_entry *e,
917 struct xt_counters total[],
920 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
927 set_entry_to_counter(const struct ip6t_entry *e,
928 struct ip6t_counters total[],
931 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
938 get_counters(const struct xt_table_info *t,
939 struct xt_counters counters[])
945 /* Instead of clearing (by a previous call to memset())
946 * the counters and using adds, we set the counters
947 * with data used by 'current' CPU
949 * Bottom half has to be disabled to prevent deadlock
950 * if new softirq were to run and call ipt_do_table
953 curcpu = smp_processor_id();
956 IP6T_ENTRY_ITERATE(t->entries[curcpu],
958 set_entry_to_counter,
962 for_each_possible_cpu(cpu) {
967 IP6T_ENTRY_ITERATE(t->entries[cpu],
969 add_entry_to_counter,
972 xt_info_wrunlock(cpu);
977 static struct xt_counters *alloc_counters(struct xt_table *table)
979 unsigned int countersize;
980 struct xt_counters *counters;
981 struct xt_table_info *private = table->private;
983 /* We need atomic snapshot of counters: rest doesn't change
984 (other than comefrom, which userspace doesn't care
986 countersize = sizeof(struct xt_counters) * private->number;
987 counters = vmalloc_node(countersize, numa_node_id());
989 if (counters == NULL)
990 return ERR_PTR(-ENOMEM);
992 get_counters(private, counters);
998 copy_entries_to_user(unsigned int total_size,
999 struct xt_table *table,
1000 void __user *userptr)
1002 unsigned int off, num;
1003 struct ip6t_entry *e;
1004 struct xt_counters *counters;
1005 const struct xt_table_info *private = table->private;
1007 const void *loc_cpu_entry;
1009 counters = alloc_counters(table);
1010 if (IS_ERR(counters))
1011 return PTR_ERR(counters);
1013 /* choose the copy that is on our node/cpu, ...
1014 * This choice is lazy (because current thread is
1015 * allowed to migrate to another cpu)
1017 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1018 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1023 /* FIXME: use iterator macros --RR */
1024 /* ... then go back and fix counters and names */
1025 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1027 const struct ip6t_entry_match *m;
1028 const struct ip6t_entry_target *t;
1030 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1031 if (copy_to_user(userptr + off
1032 + offsetof(struct ip6t_entry, counters),
1034 sizeof(counters[num])) != 0) {
1039 for (i = sizeof(struct ip6t_entry);
1040 i < e->target_offset;
1041 i += m->u.match_size) {
1044 if (copy_to_user(userptr + off + i
1045 + offsetof(struct ip6t_entry_match,
1047 m->u.kernel.match->name,
1048 strlen(m->u.kernel.match->name)+1)
1055 t = ip6t_get_target(e);
1056 if (copy_to_user(userptr + off + e->target_offset
1057 + offsetof(struct ip6t_entry_target,
1059 t->u.kernel.target->name,
1060 strlen(t->u.kernel.target->name)+1) != 0) {
1071 #ifdef CONFIG_COMPAT
1072 static void compat_standard_from_user(void *dst, void *src)
1074 int v = *(compat_int_t *)src;
1077 v += xt_compat_calc_jump(AF_INET6, v);
1078 memcpy(dst, &v, sizeof(v));
1081 static int compat_standard_to_user(void __user *dst, void *src)
1083 compat_int_t cv = *(int *)src;
1086 cv -= xt_compat_calc_jump(AF_INET6, cv);
1087 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1091 compat_calc_match(struct ip6t_entry_match *m, int *size)
1093 *size += xt_compat_match_offset(m->u.kernel.match);
1097 static int compat_calc_entry(struct ip6t_entry *e,
1098 const struct xt_table_info *info,
1099 void *base, struct xt_table_info *newinfo)
1101 struct ip6t_entry_target *t;
1102 unsigned int entry_offset;
1105 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1106 entry_offset = (void *)e - base;
1107 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1108 t = ip6t_get_target(e);
1109 off += xt_compat_target_offset(t->u.kernel.target);
1110 newinfo->size -= off;
1111 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1115 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1116 if (info->hook_entry[i] &&
1117 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1118 newinfo->hook_entry[i] -= off;
1119 if (info->underflow[i] &&
1120 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1121 newinfo->underflow[i] -= off;
1126 static int compat_table_info(const struct xt_table_info *info,
1127 struct xt_table_info *newinfo)
1129 void *loc_cpu_entry;
1131 if (!newinfo || !info)
1134 /* we dont care about newinfo->entries[] */
1135 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1136 newinfo->initial_entries = 0;
1137 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1138 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1139 compat_calc_entry, info, loc_cpu_entry,
1144 static int get_info(struct net *net, void __user *user, int *len, int compat)
1146 char name[IP6T_TABLE_MAXNAMELEN];
1150 if (*len != sizeof(struct ip6t_getinfo)) {
1151 duprintf("length %u != %zu\n", *len,
1152 sizeof(struct ip6t_getinfo));
1156 if (copy_from_user(name, user, sizeof(name)) != 0)
1159 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1160 #ifdef CONFIG_COMPAT
1162 xt_compat_lock(AF_INET6);
1164 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1165 "ip6table_%s", name);
1166 if (t && !IS_ERR(t)) {
1167 struct ip6t_getinfo info;
1168 const struct xt_table_info *private = t->private;
1170 #ifdef CONFIG_COMPAT
1172 struct xt_table_info tmp;
1173 ret = compat_table_info(private, &tmp);
1174 xt_compat_flush_offsets(AF_INET6);
1178 info.valid_hooks = t->valid_hooks;
1179 memcpy(info.hook_entry, private->hook_entry,
1180 sizeof(info.hook_entry));
1181 memcpy(info.underflow, private->underflow,
1182 sizeof(info.underflow));
1183 info.num_entries = private->number;
1184 info.size = private->size;
1185 strcpy(info.name, name);
1187 if (copy_to_user(user, &info, *len) != 0)
1195 ret = t ? PTR_ERR(t) : -ENOENT;
1196 #ifdef CONFIG_COMPAT
1198 xt_compat_unlock(AF_INET6);
1204 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1207 struct ip6t_get_entries get;
1210 if (*len < sizeof(get)) {
1211 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1214 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1216 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1217 duprintf("get_entries: %u != %zu\n",
1218 *len, sizeof(get) + get.size);
1222 t = xt_find_table_lock(net, AF_INET6, get.name);
1223 if (t && !IS_ERR(t)) {
1224 struct xt_table_info *private = t->private;
1225 duprintf("t->private->number = %u\n", private->number);
1226 if (get.size == private->size)
1227 ret = copy_entries_to_user(private->size,
1228 t, uptr->entrytable);
1230 duprintf("get_entries: I've got %u not %u!\n",
1231 private->size, get.size);
1237 ret = t ? PTR_ERR(t) : -ENOENT;
1243 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1244 struct xt_table_info *newinfo, unsigned int num_counters,
1245 void __user *counters_ptr)
1249 struct xt_table_info *oldinfo;
1250 struct xt_counters *counters;
1251 const void *loc_cpu_old_entry;
1254 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1261 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1262 "ip6table_%s", name);
1263 if (!t || IS_ERR(t)) {
1264 ret = t ? PTR_ERR(t) : -ENOENT;
1265 goto free_newinfo_counters_untrans;
1269 if (valid_hooks != t->valid_hooks) {
1270 duprintf("Valid hook crap: %08X vs %08X\n",
1271 valid_hooks, t->valid_hooks);
1276 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1280 /* Update module usage count based on number of rules */
1281 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1282 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1283 if ((oldinfo->number > oldinfo->initial_entries) ||
1284 (newinfo->number <= oldinfo->initial_entries))
1286 if ((oldinfo->number > oldinfo->initial_entries) &&
1287 (newinfo->number <= oldinfo->initial_entries))
1290 /* Get the old counters, and synchronize with replace */
1291 get_counters(oldinfo, counters);
1293 /* Decrease module usage counts and free resource */
1294 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1295 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1297 xt_free_table_info(oldinfo);
1298 if (copy_to_user(counters_ptr, counters,
1299 sizeof(struct xt_counters) * num_counters) != 0)
1308 free_newinfo_counters_untrans:
1315 do_replace(struct net *net, void __user *user, unsigned int len)
1318 struct ip6t_replace tmp;
1319 struct xt_table_info *newinfo;
1320 void *loc_cpu_entry;
1322 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1325 /* overflow check */
1326 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1329 newinfo = xt_alloc_table_info(tmp.size);
1333 /* choose the copy that is on our node/cpu */
1334 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1335 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1341 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1342 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1343 tmp.hook_entry, tmp.underflow);
1347 duprintf("ip_tables: Translated table\n");
1349 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1350 tmp.num_counters, tmp.counters);
1352 goto free_newinfo_untrans;
1355 free_newinfo_untrans:
1356 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1358 xt_free_table_info(newinfo);
1362 /* We're lazy, and add to the first CPU; overflow works its fey magic
1363 * and everything is OK. */
1365 add_counter_to_entry(struct ip6t_entry *e,
1366 const struct xt_counters addme[],
1369 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1376 do_add_counters(struct net *net, void __user *user, unsigned int len,
1379 unsigned int i, curcpu;
1380 struct xt_counters_info tmp;
1381 struct xt_counters *paddc;
1382 unsigned int num_counters;
1387 const struct xt_table_info *private;
1389 const void *loc_cpu_entry;
1390 #ifdef CONFIG_COMPAT
1391 struct compat_xt_counters_info compat_tmp;
1395 size = sizeof(struct compat_xt_counters_info);
1400 size = sizeof(struct xt_counters_info);
1403 if (copy_from_user(ptmp, user, size) != 0)
1406 #ifdef CONFIG_COMPAT
1408 num_counters = compat_tmp.num_counters;
1409 name = compat_tmp.name;
1413 num_counters = tmp.num_counters;
1417 if (len != size + num_counters * sizeof(struct xt_counters))
1420 paddc = vmalloc_node(len - size, numa_node_id());
1424 if (copy_from_user(paddc, user + size, len - size) != 0) {
1429 t = xt_find_table_lock(net, AF_INET6, name);
1430 if (!t || IS_ERR(t)) {
1431 ret = t ? PTR_ERR(t) : -ENOENT;
1437 private = t->private;
1438 if (private->number != num_counters) {
1440 goto unlock_up_free;
1444 /* Choose the copy that is on our node */
1445 curcpu = smp_processor_id();
1446 xt_info_wrlock(curcpu);
1447 loc_cpu_entry = private->entries[curcpu];
1448 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1450 add_counter_to_entry,
1453 xt_info_wrunlock(curcpu);
1465 #ifdef CONFIG_COMPAT
1466 struct compat_ip6t_replace {
1467 char name[IP6T_TABLE_MAXNAMELEN];
1471 u32 hook_entry[NF_INET_NUMHOOKS];
1472 u32 underflow[NF_INET_NUMHOOKS];
1474 compat_uptr_t counters; /* struct ip6t_counters * */
1475 struct compat_ip6t_entry entries[0];
1479 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1480 unsigned int *size, struct xt_counters *counters,
1483 struct ip6t_entry_target *t;
1484 struct compat_ip6t_entry __user *ce;
1485 u_int16_t target_offset, next_offset;
1486 compat_uint_t origsize;
1491 ce = (struct compat_ip6t_entry __user *)*dstptr;
1492 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1495 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1498 *dstptr += sizeof(struct compat_ip6t_entry);
1499 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1501 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1502 target_offset = e->target_offset - (origsize - *size);
1505 t = ip6t_get_target(e);
1506 ret = xt_compat_target_to_user(t, dstptr, size);
1510 next_offset = e->next_offset - (origsize - *size);
1511 if (put_user(target_offset, &ce->target_offset))
1513 if (put_user(next_offset, &ce->next_offset))
1523 compat_find_calc_match(struct ip6t_entry_match *m,
1525 const struct ip6t_ip6 *ipv6,
1526 unsigned int hookmask,
1527 int *size, unsigned int *i)
1529 struct xt_match *match;
1531 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1532 m->u.user.revision),
1533 "ip6t_%s", m->u.user.name);
1534 if (IS_ERR(match) || !match) {
1535 duprintf("compat_check_calc_match: `%s' not found\n",
1537 return match ? PTR_ERR(match) : -ENOENT;
1539 m->u.kernel.match = match;
1540 *size += xt_compat_match_offset(match);
1547 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1549 if (i && (*i)-- == 0)
1552 module_put(m->u.kernel.match->me);
1557 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1559 struct ip6t_entry_target *t;
1561 if (i && (*i)-- == 0)
1564 /* Cleanup all matches */
1565 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1566 t = compat_ip6t_get_target(e);
1567 module_put(t->u.kernel.target->me);
1572 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1573 struct xt_table_info *newinfo,
1575 unsigned char *base,
1576 unsigned char *limit,
1577 unsigned int *hook_entries,
1578 unsigned int *underflows,
1582 struct ip6t_entry_target *t;
1583 struct xt_target *target;
1584 unsigned int entry_offset;
1588 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1589 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1590 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1591 duprintf("Bad offset %p, limit = %p\n", e, limit);
1595 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1596 sizeof(struct compat_xt_entry_target)) {
1597 duprintf("checking: element %p size %u\n",
1602 /* For purposes of check_entry casting the compat entry is fine */
1603 ret = check_entry((struct ip6t_entry *)e, name);
1607 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1608 entry_offset = (void *)e - (void *)base;
1610 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1611 &e->ipv6, e->comefrom, &off, &j);
1613 goto release_matches;
1615 t = compat_ip6t_get_target(e);
1616 target = try_then_request_module(xt_find_target(AF_INET6,
1618 t->u.user.revision),
1619 "ip6t_%s", t->u.user.name);
1620 if (IS_ERR(target) || !target) {
1621 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1623 ret = target ? PTR_ERR(target) : -ENOENT;
1624 goto release_matches;
1626 t->u.kernel.target = target;
1628 off += xt_compat_target_offset(target);
1630 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1634 /* Check hooks & underflows */
1635 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1636 if ((unsigned char *)e - base == hook_entries[h])
1637 newinfo->hook_entry[h] = hook_entries[h];
1638 if ((unsigned char *)e - base == underflows[h])
1639 newinfo->underflow[h] = underflows[h];
1642 /* Clear counters and comefrom */
1643 memset(&e->counters, 0, sizeof(e->counters));
1650 module_put(t->u.kernel.target->me);
1652 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1657 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1658 unsigned int *size, const char *name,
1659 struct xt_table_info *newinfo, unsigned char *base)
1661 struct ip6t_entry_target *t;
1662 struct xt_target *target;
1663 struct ip6t_entry *de;
1664 unsigned int origsize;
1669 de = (struct ip6t_entry *)*dstptr;
1670 memcpy(de, e, sizeof(struct ip6t_entry));
1671 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1673 *dstptr += sizeof(struct ip6t_entry);
1674 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1676 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1680 de->target_offset = e->target_offset - (origsize - *size);
1681 t = compat_ip6t_get_target(e);
1682 target = t->u.kernel.target;
1683 xt_compat_target_from_user(t, dstptr, size);
1685 de->next_offset = e->next_offset - (origsize - *size);
1686 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1687 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1688 newinfo->hook_entry[h] -= origsize - *size;
1689 if ((unsigned char *)de - base < newinfo->underflow[h])
1690 newinfo->underflow[h] -= origsize - *size;
1695 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1700 struct xt_mtchk_param mtpar;
1704 mtpar.entryinfo = &e->ipv6;
1705 mtpar.hook_mask = e->comefrom;
1706 mtpar.family = NFPROTO_IPV6;
1707 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1709 goto cleanup_matches;
1711 ret = check_target(e, name);
1713 goto cleanup_matches;
1719 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1724 translate_compat_table(const char *name,
1725 unsigned int valid_hooks,
1726 struct xt_table_info **pinfo,
1728 unsigned int total_size,
1729 unsigned int number,
1730 unsigned int *hook_entries,
1731 unsigned int *underflows)
1734 struct xt_table_info *newinfo, *info;
1735 void *pos, *entry0, *entry1;
1742 info->number = number;
1744 /* Init all hooks to impossible value. */
1745 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1746 info->hook_entry[i] = 0xFFFFFFFF;
1747 info->underflow[i] = 0xFFFFFFFF;
1750 duprintf("translate_compat_table: size %u\n", info->size);
1752 xt_compat_lock(AF_INET6);
1753 /* Walk through entries, checking offsets. */
1754 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1755 check_compat_entry_size_and_hooks,
1756 info, &size, entry0,
1757 entry0 + total_size,
1758 hook_entries, underflows, &j, name);
1764 duprintf("translate_compat_table: %u not %u entries\n",
1769 /* Check hooks all assigned */
1770 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1771 /* Only hooks which are valid */
1772 if (!(valid_hooks & (1 << i)))
1774 if (info->hook_entry[i] == 0xFFFFFFFF) {
1775 duprintf("Invalid hook entry %u %u\n",
1776 i, hook_entries[i]);
1779 if (info->underflow[i] == 0xFFFFFFFF) {
1780 duprintf("Invalid underflow %u %u\n",
1787 newinfo = xt_alloc_table_info(size);
1791 newinfo->number = number;
1792 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1793 newinfo->hook_entry[i] = info->hook_entry[i];
1794 newinfo->underflow[i] = info->underflow[i];
1796 entry1 = newinfo->entries[raw_smp_processor_id()];
1799 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1800 compat_copy_entry_from_user,
1801 &pos, &size, name, newinfo, entry1);
1802 xt_compat_flush_offsets(AF_INET6);
1803 xt_compat_unlock(AF_INET6);
1808 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1812 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1816 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1817 compat_release_entry, &j);
1818 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1819 xt_free_table_info(newinfo);
1823 /* And one copy for every other CPU */
1824 for_each_possible_cpu(i)
1825 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1826 memcpy(newinfo->entries[i], entry1, newinfo->size);
1830 xt_free_table_info(info);
1834 xt_free_table_info(newinfo);
1836 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1839 xt_compat_flush_offsets(AF_INET6);
1840 xt_compat_unlock(AF_INET6);
1845 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1848 struct compat_ip6t_replace tmp;
1849 struct xt_table_info *newinfo;
1850 void *loc_cpu_entry;
1852 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1855 /* overflow check */
1856 if (tmp.size >= INT_MAX / num_possible_cpus())
1858 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1861 newinfo = xt_alloc_table_info(tmp.size);
1865 /* choose the copy that is on our node/cpu */
1866 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1867 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1873 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1874 &newinfo, &loc_cpu_entry, tmp.size,
1875 tmp.num_entries, tmp.hook_entry,
1880 duprintf("compat_do_replace: Translated table\n");
1882 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1883 tmp.num_counters, compat_ptr(tmp.counters));
1885 goto free_newinfo_untrans;
1888 free_newinfo_untrans:
1889 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1891 xt_free_table_info(newinfo);
1896 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1901 if (!capable(CAP_NET_ADMIN))
1905 case IP6T_SO_SET_REPLACE:
1906 ret = compat_do_replace(sock_net(sk), user, len);
1909 case IP6T_SO_SET_ADD_COUNTERS:
1910 ret = do_add_counters(sock_net(sk), user, len, 1);
1914 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1921 struct compat_ip6t_get_entries {
1922 char name[IP6T_TABLE_MAXNAMELEN];
1924 struct compat_ip6t_entry entrytable[0];
1928 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1929 void __user *userptr)
1931 struct xt_counters *counters;
1932 const struct xt_table_info *private = table->private;
1936 const void *loc_cpu_entry;
1939 counters = alloc_counters(table);
1940 if (IS_ERR(counters))
1941 return PTR_ERR(counters);
1943 /* choose the copy that is on our node/cpu, ...
1944 * This choice is lazy (because current thread is
1945 * allowed to migrate to another cpu)
1947 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1950 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1951 compat_copy_entry_to_user,
1952 &pos, &size, counters, &i);
1959 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1963 struct compat_ip6t_get_entries get;
1966 if (*len < sizeof(get)) {
1967 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1971 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1974 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1975 duprintf("compat_get_entries: %u != %zu\n",
1976 *len, sizeof(get) + get.size);
1980 xt_compat_lock(AF_INET6);
1981 t = xt_find_table_lock(net, AF_INET6, get.name);
1982 if (t && !IS_ERR(t)) {
1983 const struct xt_table_info *private = t->private;
1984 struct xt_table_info info;
1985 duprintf("t->private->number = %u\n", private->number);
1986 ret = compat_table_info(private, &info);
1987 if (!ret && get.size == info.size) {
1988 ret = compat_copy_entries_to_user(private->size,
1989 t, uptr->entrytable);
1991 duprintf("compat_get_entries: I've got %u not %u!\n",
1992 private->size, get.size);
1995 xt_compat_flush_offsets(AF_INET6);
1999 ret = t ? PTR_ERR(t) : -ENOENT;
2001 xt_compat_unlock(AF_INET6);
2005 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2008 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2012 if (!capable(CAP_NET_ADMIN))
2016 case IP6T_SO_GET_INFO:
2017 ret = get_info(sock_net(sk), user, len, 1);
2019 case IP6T_SO_GET_ENTRIES:
2020 ret = compat_get_entries(sock_net(sk), user, len);
2023 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2030 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2034 if (!capable(CAP_NET_ADMIN))
2038 case IP6T_SO_SET_REPLACE:
2039 ret = do_replace(sock_net(sk), user, len);
2042 case IP6T_SO_SET_ADD_COUNTERS:
2043 ret = do_add_counters(sock_net(sk), user, len, 0);
2047 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2055 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2059 if (!capable(CAP_NET_ADMIN))
2063 case IP6T_SO_GET_INFO:
2064 ret = get_info(sock_net(sk), user, len, 0);
2067 case IP6T_SO_GET_ENTRIES:
2068 ret = get_entries(sock_net(sk), user, len);
2071 case IP6T_SO_GET_REVISION_MATCH:
2072 case IP6T_SO_GET_REVISION_TARGET: {
2073 struct ip6t_get_revision rev;
2076 if (*len != sizeof(rev)) {
2080 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2085 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2090 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2093 "ip6t_%s", rev.name);
2098 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2105 struct xt_table *ip6t_register_table(struct net *net,
2106 const struct xt_table *table,
2107 const struct ip6t_replace *repl)
2110 struct xt_table_info *newinfo;
2111 struct xt_table_info bootstrap
2112 = { 0, 0, 0, { 0 }, { 0 }, { } };
2113 void *loc_cpu_entry;
2114 struct xt_table *new_table;
2116 newinfo = xt_alloc_table_info(repl->size);
2122 /* choose the copy on our node/cpu, but dont care about preemption */
2123 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2124 memcpy(loc_cpu_entry, repl->entries, repl->size);
2126 ret = translate_table(net, table->name, table->valid_hooks,
2127 newinfo, loc_cpu_entry, repl->size,
2134 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2135 if (IS_ERR(new_table)) {
2136 ret = PTR_ERR(new_table);
2142 xt_free_table_info(newinfo);
2144 return ERR_PTR(ret);
2147 void ip6t_unregister_table(struct xt_table *table)
2149 struct xt_table_info *private;
2150 void *loc_cpu_entry;
2151 struct module *table_owner = table->me;
2153 private = xt_unregister_table(table);
2155 /* Decrease module usage counts and free resources */
2156 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2157 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2158 if (private->number > private->initial_entries)
2159 module_put(table_owner);
2160 xt_free_table_info(private);
2163 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2165 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2166 u_int8_t type, u_int8_t code,
2169 return (type == test_type && code >= min_code && code <= max_code)
2174 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2176 const struct icmp6hdr *ic;
2177 struct icmp6hdr _icmph;
2178 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2180 /* Must not be a fragment. */
2181 if (par->fragoff != 0)
2184 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2186 /* We've been asked to examine this packet, and we
2187 * can't. Hence, no choice but to drop.
2189 duprintf("Dropping evil ICMP tinygram.\n");
2190 *par->hotdrop = true;
2194 return icmp6_type_code_match(icmpinfo->type,
2197 ic->icmp6_type, ic->icmp6_code,
2198 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2201 /* Called when user tries to insert an entry of this type. */
2202 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2204 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2206 /* Must specify no unknown invflags */
2207 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2210 /* The built-in targets: standard (NULL) and error. */
2211 static struct xt_target ip6t_standard_target __read_mostly = {
2212 .name = IP6T_STANDARD_TARGET,
2213 .targetsize = sizeof(int),
2214 .family = NFPROTO_IPV6,
2215 #ifdef CONFIG_COMPAT
2216 .compatsize = sizeof(compat_int_t),
2217 .compat_from_user = compat_standard_from_user,
2218 .compat_to_user = compat_standard_to_user,
2222 static struct xt_target ip6t_error_target __read_mostly = {
2223 .name = IP6T_ERROR_TARGET,
2224 .target = ip6t_error,
2225 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2226 .family = NFPROTO_IPV6,
2229 static struct nf_sockopt_ops ip6t_sockopts = {
2231 .set_optmin = IP6T_BASE_CTL,
2232 .set_optmax = IP6T_SO_SET_MAX+1,
2233 .set = do_ip6t_set_ctl,
2234 #ifdef CONFIG_COMPAT
2235 .compat_set = compat_do_ip6t_set_ctl,
2237 .get_optmin = IP6T_BASE_CTL,
2238 .get_optmax = IP6T_SO_GET_MAX+1,
2239 .get = do_ip6t_get_ctl,
2240 #ifdef CONFIG_COMPAT
2241 .compat_get = compat_do_ip6t_get_ctl,
2243 .owner = THIS_MODULE,
2246 static struct xt_match icmp6_matchstruct __read_mostly = {
2248 .match = icmp6_match,
2249 .matchsize = sizeof(struct ip6t_icmp),
2250 .checkentry = icmp6_checkentry,
2251 .proto = IPPROTO_ICMPV6,
2252 .family = NFPROTO_IPV6,
2255 static int __net_init ip6_tables_net_init(struct net *net)
2257 return xt_proto_init(net, NFPROTO_IPV6);
2260 static void __net_exit ip6_tables_net_exit(struct net *net)
2262 xt_proto_fini(net, NFPROTO_IPV6);
2265 static struct pernet_operations ip6_tables_net_ops = {
2266 .init = ip6_tables_net_init,
2267 .exit = ip6_tables_net_exit,
2270 static int __init ip6_tables_init(void)
2274 ret = register_pernet_subsys(&ip6_tables_net_ops);
2278 /* Noone else will be downing sem now, so we won't sleep */
2279 ret = xt_register_target(&ip6t_standard_target);
2282 ret = xt_register_target(&ip6t_error_target);
2285 ret = xt_register_match(&icmp6_matchstruct);
2289 /* Register setsockopt */
2290 ret = nf_register_sockopt(&ip6t_sockopts);
2294 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2298 xt_unregister_match(&icmp6_matchstruct);
2300 xt_unregister_target(&ip6t_error_target);
2302 xt_unregister_target(&ip6t_standard_target);
2304 unregister_pernet_subsys(&ip6_tables_net_ops);
2309 static void __exit ip6_tables_fini(void)
2311 nf_unregister_sockopt(&ip6t_sockopts);
2313 xt_unregister_match(&icmp6_matchstruct);
2314 xt_unregister_target(&ip6t_error_target);
2315 xt_unregister_target(&ip6t_standard_target);
2317 unregister_pernet_subsys(&ip6_tables_net_ops);
2321 * find the offset to specified header or the protocol number of last header
2322 * if target < 0. "last header" is transport protocol header, ESP, or
2325 * If target header is found, its offset is set in *offset and return protocol
2326 * number. Otherwise, return -1.
2328 * If the first fragment doesn't contain the final protocol header or
2329 * NEXTHDR_NONE it is considered invalid.
2331 * Note that non-1st fragment is special case that "the protocol number
2332 * of last header" is "next header" field in Fragment header. In this case,
2333 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2337 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2338 int target, unsigned short *fragoff)
2340 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2341 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2342 unsigned int len = skb->len - start;
2347 while (nexthdr != target) {
2348 struct ipv6_opt_hdr _hdr, *hp;
2349 unsigned int hdrlen;
2351 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2357 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2360 if (nexthdr == NEXTHDR_FRAGMENT) {
2361 unsigned short _frag_off;
2363 fp = skb_header_pointer(skb,
2364 start+offsetof(struct frag_hdr,
2371 _frag_off = ntohs(*fp) & ~0x7;
2374 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2375 hp->nexthdr == NEXTHDR_NONE)) {
2377 *fragoff = _frag_off;
2383 } else if (nexthdr == NEXTHDR_AUTH)
2384 hdrlen = (hp->hdrlen + 2) << 2;
2386 hdrlen = ipv6_optlen(hp);
2388 nexthdr = hp->nexthdr;
2397 EXPORT_SYMBOL(ip6t_register_table);
2398 EXPORT_SYMBOL(ip6t_unregister_table);
2399 EXPORT_SYMBOL(ip6t_do_table);
2400 EXPORT_SYMBOL(ip6t_ext_hdr);
2401 EXPORT_SYMBOL(ipv6_find_hdr);
2403 module_init(ip6_tables_init);
2404 module_exit(ip6_tables_fini);