2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff *skb,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
200 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
203 pr_info("error: `%s'\n", (const char *)par->targinfo);
208 static inline struct ip6t_entry *
209 get_entry(const void *base, unsigned int offset)
211 return (struct ip6t_entry *)(base + offset);
214 /* All zeroes == unconditional rule. */
215 /* Mildly perf critical (only if packet tracing is on) */
216 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
218 static const struct ip6t_ip6 uncond;
220 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
223 static inline const struct ip6t_entry_target *
224 ip6t_get_target_c(const struct ip6t_entry *e)
226 return ip6t_get_target((struct ip6t_entry *)e);
229 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
230 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
231 /* This cries for unification! */
232 static const char *const hooknames[] = {
233 [NF_INET_PRE_ROUTING] = "PREROUTING",
234 [NF_INET_LOCAL_IN] = "INPUT",
235 [NF_INET_FORWARD] = "FORWARD",
236 [NF_INET_LOCAL_OUT] = "OUTPUT",
237 [NF_INET_POST_ROUTING] = "POSTROUTING",
240 enum nf_ip_trace_comments {
241 NF_IP6_TRACE_COMMENT_RULE,
242 NF_IP6_TRACE_COMMENT_RETURN,
243 NF_IP6_TRACE_COMMENT_POLICY,
246 static const char *const comments[] = {
247 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
248 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
249 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
252 static struct nf_loginfo trace_loginfo = {
253 .type = NF_LOG_TYPE_LOG,
257 .logflags = NF_LOG_MASK,
262 /* Mildly perf critical (only if packet tracing is on) */
264 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
265 const char *hookname, const char **chainname,
266 const char **comment, unsigned int *rulenum)
268 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
270 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
271 /* Head of user chain: ERROR target with chainname */
272 *chainname = t->target.data;
277 if (s->target_offset == sizeof(struct ip6t_entry) &&
278 strcmp(t->target.u.kernel.target->name,
279 IP6T_STANDARD_TARGET) == 0 &&
281 unconditional(&s->ipv6)) {
282 /* Tail of chains: STANDARD target (return/policy) */
283 *comment = *chainname == hookname
284 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
285 : comments[NF_IP6_TRACE_COMMENT_RETURN];
294 static void trace_packet(const struct sk_buff *skb,
296 const struct net_device *in,
297 const struct net_device *out,
298 const char *tablename,
299 const struct xt_table_info *private,
300 const struct ip6t_entry *e)
302 const void *table_base;
303 const struct ip6t_entry *root;
304 const char *hookname, *chainname, *comment;
305 const struct ip6t_entry *iter;
306 unsigned int rulenum = 0;
308 table_base = private->entries[smp_processor_id()];
309 root = get_entry(table_base, private->hook_entry[hook]);
311 hookname = chainname = hooknames[hook];
312 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
314 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
315 if (get_chainname_rulenum(iter, e, hookname,
316 &chainname, &comment, &rulenum) != 0)
319 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
320 "TRACE: %s:%s:%s:%u ",
321 tablename, chainname, comment, rulenum);
325 static inline __pure struct ip6t_entry *
326 ip6t_next_entry(const struct ip6t_entry *entry)
328 return (void *)entry + entry->next_offset;
331 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
333 ip6t_do_table(struct sk_buff *skb,
335 const struct net_device *in,
336 const struct net_device *out,
337 struct xt_table *table)
339 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
340 bool hotdrop = false;
341 /* Initializing verdict to NF_DROP keeps gcc happy. */
342 unsigned int verdict = NF_DROP;
343 const char *indev, *outdev;
344 const void *table_base;
345 struct ip6t_entry *e, **jumpstack;
346 unsigned int *stackptr, origptr, cpu;
347 const struct xt_table_info *private;
348 struct xt_action_param acpar;
351 indev = in ? in->name : nulldevname;
352 outdev = out ? out->name : nulldevname;
353 /* We handle fragments by dealing with the first fragment as
354 * if it was a normal packet. All other fragments are treated
355 * normally, except that they will NEVER match rules that ask
356 * things we don't know, ie. tcp syn flag or ports). If the
357 * rule is also a fragment-specific rule, non-fragments won't
359 acpar.hotdrop = &hotdrop;
362 acpar.family = NFPROTO_IPV6;
363 acpar.hooknum = hook;
365 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
368 private = table->private;
369 cpu = smp_processor_id();
370 table_base = private->entries[cpu];
371 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
372 stackptr = &private->stackptr[cpu];
375 e = get_entry(table_base, private->hook_entry[hook]);
378 const struct ip6t_entry_target *t;
379 const struct xt_entry_match *ematch;
382 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
383 &acpar.thoff, &acpar.fragoff, &hotdrop)) {
385 e = ip6t_next_entry(e);
389 xt_ematch_foreach(ematch, e) {
390 acpar.match = ematch->u.kernel.match;
391 acpar.matchinfo = ematch->data;
392 if (!acpar.match->match(skb, &acpar))
396 ADD_COUNTER(e->counters,
397 ntohs(ipv6_hdr(skb)->payload_len) +
398 sizeof(struct ipv6hdr), 1);
400 t = ip6t_get_target_c(e);
401 IP_NF_ASSERT(t->u.kernel.target);
403 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
404 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
405 /* The packet is traced: log it */
406 if (unlikely(skb->nf_trace))
407 trace_packet(skb, hook, in, out,
408 table->name, private, e);
410 /* Standard target? */
411 if (!t->u.kernel.target->target) {
414 v = ((struct ip6t_standard_target *)t)->verdict;
416 /* Pop from stack? */
417 if (v != IP6T_RETURN) {
418 verdict = (unsigned)(-v) - 1;
422 e = get_entry(table_base,
423 private->underflow[hook]);
425 e = ip6t_next_entry(jumpstack[--*stackptr]);
428 if (table_base + v != ip6t_next_entry(e) &&
429 !(e->ipv6.flags & IP6T_F_GOTO)) {
430 if (*stackptr >= private->stacksize) {
434 jumpstack[(*stackptr)++] = e;
437 e = get_entry(table_base, v);
441 acpar.target = t->u.kernel.target;
442 acpar.targinfo = t->data;
444 verdict = t->u.kernel.target->target(skb, &acpar);
445 if (verdict == IP6T_CONTINUE)
446 e = ip6t_next_entry(e);
452 xt_info_rdunlock_bh();
455 #ifdef DEBUG_ALLOW_ALL
464 /* Figures out from what hook each rule can be called: returns 0 if
465 there are loops. Puts hook bitmask in comefrom. */
467 mark_source_chains(const struct xt_table_info *newinfo,
468 unsigned int valid_hooks, void *entry0)
472 /* No recursion; use packet counter to save back ptrs (reset
473 to 0 as we leave), and comefrom to save source hook bitmask */
474 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
475 unsigned int pos = newinfo->hook_entry[hook];
476 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
478 if (!(valid_hooks & (1 << hook)))
481 /* Set initial back pointer. */
482 e->counters.pcnt = pos;
485 const struct ip6t_standard_target *t
486 = (void *)ip6t_get_target_c(e);
487 int visited = e->comefrom & (1 << hook);
489 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
490 printk("iptables: loop hook %u pos %u %08X.\n",
491 hook, pos, e->comefrom);
494 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
496 /* Unconditional return/END. */
497 if ((e->target_offset == sizeof(struct ip6t_entry) &&
498 (strcmp(t->target.u.user.name,
499 IP6T_STANDARD_TARGET) == 0) &&
501 unconditional(&e->ipv6)) || visited) {
502 unsigned int oldpos, size;
504 if ((strcmp(t->target.u.user.name,
505 IP6T_STANDARD_TARGET) == 0) &&
506 t->verdict < -NF_MAX_VERDICT - 1) {
507 duprintf("mark_source_chains: bad "
508 "negative verdict (%i)\n",
513 /* Return: backtrack through the last
516 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
517 #ifdef DEBUG_IP_FIREWALL_USER
519 & (1 << NF_INET_NUMHOOKS)) {
520 duprintf("Back unset "
527 pos = e->counters.pcnt;
528 e->counters.pcnt = 0;
530 /* We're at the start. */
534 e = (struct ip6t_entry *)
536 } while (oldpos == pos + e->next_offset);
539 size = e->next_offset;
540 e = (struct ip6t_entry *)
541 (entry0 + pos + size);
542 e->counters.pcnt = pos;
545 int newpos = t->verdict;
547 if (strcmp(t->target.u.user.name,
548 IP6T_STANDARD_TARGET) == 0 &&
550 if (newpos > newinfo->size -
551 sizeof(struct ip6t_entry)) {
552 duprintf("mark_source_chains: "
553 "bad verdict (%i)\n",
557 /* This a jump; chase it. */
558 duprintf("Jump rule %u -> %u\n",
561 /* ... this is a fallthru */
562 newpos = pos + e->next_offset;
564 e = (struct ip6t_entry *)
566 e->counters.pcnt = pos;
571 duprintf("Finished chain %u\n", hook);
576 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
578 struct xt_mtdtor_param par;
581 par.match = m->u.kernel.match;
582 par.matchinfo = m->data;
583 par.family = NFPROTO_IPV6;
584 if (par.match->destroy != NULL)
585 par.match->destroy(&par);
586 module_put(par.match->me);
590 check_entry(const struct ip6t_entry *e, const char *name)
592 const struct ip6t_entry_target *t;
594 if (!ip6_checkentry(&e->ipv6)) {
595 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
599 if (e->target_offset + sizeof(struct ip6t_entry_target) >
603 t = ip6t_get_target_c(e);
604 if (e->target_offset + t->u.target_size > e->next_offset)
610 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
612 const struct ip6t_ip6 *ipv6 = par->entryinfo;
615 par->match = m->u.kernel.match;
616 par->matchinfo = m->data;
618 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
619 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
621 duprintf("ip_tables: check failed for `%s'.\n",
629 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
631 struct xt_match *match;
634 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
637 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
638 return PTR_ERR(match);
640 m->u.kernel.match = match;
642 ret = check_match(m, par);
648 module_put(m->u.kernel.match->me);
652 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
654 struct ip6t_entry_target *t = ip6t_get_target(e);
655 struct xt_tgchk_param par = {
659 .target = t->u.kernel.target,
661 .hook_mask = e->comefrom,
662 .family = NFPROTO_IPV6,
666 t = ip6t_get_target(e);
667 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
668 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
670 duprintf("ip_tables: check failed for `%s'.\n",
671 t->u.kernel.target->name);
678 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
681 struct ip6t_entry_target *t;
682 struct xt_target *target;
685 struct xt_mtchk_param mtpar;
686 struct xt_entry_match *ematch;
688 ret = check_entry(e, name);
695 mtpar.entryinfo = &e->ipv6;
696 mtpar.hook_mask = e->comefrom;
697 mtpar.family = NFPROTO_IPV6;
698 xt_ematch_foreach(ematch, e) {
699 ret = find_check_match(ematch, &mtpar);
701 goto cleanup_matches;
705 t = ip6t_get_target(e);
706 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
708 if (IS_ERR(target)) {
709 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
710 ret = PTR_ERR(target);
711 goto cleanup_matches;
713 t->u.kernel.target = target;
715 ret = check_target(e, net, name);
720 module_put(t->u.kernel.target->me);
722 xt_ematch_foreach(ematch, e) {
725 cleanup_match(ematch, net);
730 static bool check_underflow(const struct ip6t_entry *e)
732 const struct ip6t_entry_target *t;
733 unsigned int verdict;
735 if (!unconditional(&e->ipv6))
737 t = ip6t_get_target_c(e);
738 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
740 verdict = ((struct ip6t_standard_target *)t)->verdict;
741 verdict = -verdict - 1;
742 return verdict == NF_DROP || verdict == NF_ACCEPT;
746 check_entry_size_and_hooks(struct ip6t_entry *e,
747 struct xt_table_info *newinfo,
748 const unsigned char *base,
749 const unsigned char *limit,
750 const unsigned int *hook_entries,
751 const unsigned int *underflows,
752 unsigned int valid_hooks)
756 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
757 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
758 duprintf("Bad offset %p\n", e);
763 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
764 duprintf("checking: element %p size %u\n",
769 /* Check hooks & underflows */
770 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
771 if (!(valid_hooks & (1 << h)))
773 if ((unsigned char *)e - base == hook_entries[h])
774 newinfo->hook_entry[h] = hook_entries[h];
775 if ((unsigned char *)e - base == underflows[h]) {
776 if (!check_underflow(e)) {
777 pr_err("Underflows must be unconditional and "
778 "use the STANDARD target with "
782 newinfo->underflow[h] = underflows[h];
786 /* Clear counters and comefrom */
787 e->counters = ((struct xt_counters) { 0, 0 });
792 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
794 struct xt_tgdtor_param par;
795 struct ip6t_entry_target *t;
796 struct xt_entry_match *ematch;
798 /* Cleanup all matches */
799 xt_ematch_foreach(ematch, e)
800 cleanup_match(ematch, net);
801 t = ip6t_get_target(e);
804 par.target = t->u.kernel.target;
805 par.targinfo = t->data;
806 par.family = NFPROTO_IPV6;
807 if (par.target->destroy != NULL)
808 par.target->destroy(&par);
809 module_put(par.target->me);
812 /* Checks and translates the user-supplied table segment (held in
815 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
816 const struct ip6t_replace *repl)
818 struct ip6t_entry *iter;
822 newinfo->size = repl->size;
823 newinfo->number = repl->num_entries;
825 /* Init all hooks to impossible value. */
826 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
827 newinfo->hook_entry[i] = 0xFFFFFFFF;
828 newinfo->underflow[i] = 0xFFFFFFFF;
831 duprintf("translate_table: size %u\n", newinfo->size);
833 /* Walk through entries, checking offsets. */
834 xt_entry_foreach(iter, entry0, newinfo->size) {
835 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
843 if (strcmp(ip6t_get_target(iter)->u.user.name,
844 XT_ERROR_TARGET) == 0)
845 ++newinfo->stacksize;
848 if (i != repl->num_entries) {
849 duprintf("translate_table: %u not %u entries\n",
850 i, repl->num_entries);
854 /* Check hooks all assigned */
855 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
856 /* Only hooks which are valid */
857 if (!(repl->valid_hooks & (1 << i)))
859 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
860 duprintf("Invalid hook entry %u %u\n",
861 i, repl->hook_entry[i]);
864 if (newinfo->underflow[i] == 0xFFFFFFFF) {
865 duprintf("Invalid underflow %u %u\n",
866 i, repl->underflow[i]);
871 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
874 /* Finally, each sanity check must pass */
876 xt_entry_foreach(iter, entry0, newinfo->size) {
877 ret = find_check_entry(iter, net, repl->name, repl->size);
884 xt_entry_foreach(iter, entry0, newinfo->size) {
887 cleanup_entry(iter, net);
892 /* And one copy for every other CPU */
893 for_each_possible_cpu(i) {
894 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
895 memcpy(newinfo->entries[i], entry0, newinfo->size);
902 get_counters(const struct xt_table_info *t,
903 struct xt_counters counters[])
905 struct ip6t_entry *iter;
910 /* Instead of clearing (by a previous call to memset())
911 * the counters and using adds, we set the counters
912 * with data used by 'current' CPU
914 * Bottom half has to be disabled to prevent deadlock
915 * if new softirq were to run and call ipt_do_table
918 curcpu = smp_processor_id();
921 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
922 SET_COUNTER(counters[i], iter->counters.bcnt,
923 iter->counters.pcnt);
927 for_each_possible_cpu(cpu) {
932 xt_entry_foreach(iter, t->entries[cpu], t->size) {
933 ADD_COUNTER(counters[i], iter->counters.bcnt,
934 iter->counters.pcnt);
937 xt_info_wrunlock(cpu);
942 static struct xt_counters *alloc_counters(const struct xt_table *table)
944 unsigned int countersize;
945 struct xt_counters *counters;
946 const struct xt_table_info *private = table->private;
948 /* We need atomic snapshot of counters: rest doesn't change
949 (other than comefrom, which userspace doesn't care
951 countersize = sizeof(struct xt_counters) * private->number;
952 counters = vmalloc_node(countersize, numa_node_id());
954 if (counters == NULL)
955 return ERR_PTR(-ENOMEM);
957 get_counters(private, counters);
963 copy_entries_to_user(unsigned int total_size,
964 const struct xt_table *table,
965 void __user *userptr)
967 unsigned int off, num;
968 const struct ip6t_entry *e;
969 struct xt_counters *counters;
970 const struct xt_table_info *private = table->private;
972 const void *loc_cpu_entry;
974 counters = alloc_counters(table);
975 if (IS_ERR(counters))
976 return PTR_ERR(counters);
978 /* choose the copy that is on our node/cpu, ...
979 * This choice is lazy (because current thread is
980 * allowed to migrate to another cpu)
982 loc_cpu_entry = private->entries[raw_smp_processor_id()];
983 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
988 /* FIXME: use iterator macros --RR */
989 /* ... then go back and fix counters and names */
990 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
992 const struct ip6t_entry_match *m;
993 const struct ip6t_entry_target *t;
995 e = (struct ip6t_entry *)(loc_cpu_entry + off);
996 if (copy_to_user(userptr + off
997 + offsetof(struct ip6t_entry, counters),
999 sizeof(counters[num])) != 0) {
1004 for (i = sizeof(struct ip6t_entry);
1005 i < e->target_offset;
1006 i += m->u.match_size) {
1009 if (copy_to_user(userptr + off + i
1010 + offsetof(struct ip6t_entry_match,
1012 m->u.kernel.match->name,
1013 strlen(m->u.kernel.match->name)+1)
1020 t = ip6t_get_target_c(e);
1021 if (copy_to_user(userptr + off + e->target_offset
1022 + offsetof(struct ip6t_entry_target,
1024 t->u.kernel.target->name,
1025 strlen(t->u.kernel.target->name)+1) != 0) {
1036 #ifdef CONFIG_COMPAT
1037 static void compat_standard_from_user(void *dst, const void *src)
1039 int v = *(compat_int_t *)src;
1042 v += xt_compat_calc_jump(AF_INET6, v);
1043 memcpy(dst, &v, sizeof(v));
1046 static int compat_standard_to_user(void __user *dst, const void *src)
1048 compat_int_t cv = *(int *)src;
1051 cv -= xt_compat_calc_jump(AF_INET6, cv);
1052 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1055 static int compat_calc_entry(const struct ip6t_entry *e,
1056 const struct xt_table_info *info,
1057 const void *base, struct xt_table_info *newinfo)
1059 const struct xt_entry_match *ematch;
1060 const struct ip6t_entry_target *t;
1061 unsigned int entry_offset;
1064 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1065 entry_offset = (void *)e - base;
1066 xt_ematch_foreach(ematch, e)
1067 off += xt_compat_match_offset(ematch->u.kernel.match);
1068 t = ip6t_get_target_c(e);
1069 off += xt_compat_target_offset(t->u.kernel.target);
1070 newinfo->size -= off;
1071 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1075 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1076 if (info->hook_entry[i] &&
1077 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1078 newinfo->hook_entry[i] -= off;
1079 if (info->underflow[i] &&
1080 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1081 newinfo->underflow[i] -= off;
1086 static int compat_table_info(const struct xt_table_info *info,
1087 struct xt_table_info *newinfo)
1089 struct ip6t_entry *iter;
1090 void *loc_cpu_entry;
1093 if (!newinfo || !info)
1096 /* we dont care about newinfo->entries[] */
1097 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1098 newinfo->initial_entries = 0;
1099 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1100 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1101 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1109 static int get_info(struct net *net, void __user *user,
1110 const int *len, int compat)
1112 char name[IP6T_TABLE_MAXNAMELEN];
1116 if (*len != sizeof(struct ip6t_getinfo)) {
1117 duprintf("length %u != %zu\n", *len,
1118 sizeof(struct ip6t_getinfo));
1122 if (copy_from_user(name, user, sizeof(name)) != 0)
1125 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1126 #ifdef CONFIG_COMPAT
1128 xt_compat_lock(AF_INET6);
1130 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1131 "ip6table_%s", name);
1132 if (t && !IS_ERR(t)) {
1133 struct ip6t_getinfo info;
1134 const struct xt_table_info *private = t->private;
1135 #ifdef CONFIG_COMPAT
1136 struct xt_table_info tmp;
1139 ret = compat_table_info(private, &tmp);
1140 xt_compat_flush_offsets(AF_INET6);
1144 info.valid_hooks = t->valid_hooks;
1145 memcpy(info.hook_entry, private->hook_entry,
1146 sizeof(info.hook_entry));
1147 memcpy(info.underflow, private->underflow,
1148 sizeof(info.underflow));
1149 info.num_entries = private->number;
1150 info.size = private->size;
1151 strcpy(info.name, name);
1153 if (copy_to_user(user, &info, *len) != 0)
1161 ret = t ? PTR_ERR(t) : -ENOENT;
1162 #ifdef CONFIG_COMPAT
1164 xt_compat_unlock(AF_INET6);
1170 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1174 struct ip6t_get_entries get;
1177 if (*len < sizeof(get)) {
1178 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1181 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1183 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1184 duprintf("get_entries: %u != %zu\n",
1185 *len, sizeof(get) + get.size);
1189 t = xt_find_table_lock(net, AF_INET6, get.name);
1190 if (t && !IS_ERR(t)) {
1191 struct xt_table_info *private = t->private;
1192 duprintf("t->private->number = %u\n", private->number);
1193 if (get.size == private->size)
1194 ret = copy_entries_to_user(private->size,
1195 t, uptr->entrytable);
1197 duprintf("get_entries: I've got %u not %u!\n",
1198 private->size, get.size);
1204 ret = t ? PTR_ERR(t) : -ENOENT;
1210 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1211 struct xt_table_info *newinfo, unsigned int num_counters,
1212 void __user *counters_ptr)
1216 struct xt_table_info *oldinfo;
1217 struct xt_counters *counters;
1218 const void *loc_cpu_old_entry;
1219 struct ip6t_entry *iter;
1222 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1229 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1230 "ip6table_%s", name);
1231 if (!t || IS_ERR(t)) {
1232 ret = t ? PTR_ERR(t) : -ENOENT;
1233 goto free_newinfo_counters_untrans;
1237 if (valid_hooks != t->valid_hooks) {
1238 duprintf("Valid hook crap: %08X vs %08X\n",
1239 valid_hooks, t->valid_hooks);
1244 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1248 /* Update module usage count based on number of rules */
1249 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1250 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1251 if ((oldinfo->number > oldinfo->initial_entries) ||
1252 (newinfo->number <= oldinfo->initial_entries))
1254 if ((oldinfo->number > oldinfo->initial_entries) &&
1255 (newinfo->number <= oldinfo->initial_entries))
1258 /* Get the old counters, and synchronize with replace */
1259 get_counters(oldinfo, counters);
1261 /* Decrease module usage counts and free resource */
1262 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1263 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1264 cleanup_entry(iter, net);
1266 xt_free_table_info(oldinfo);
1267 if (copy_to_user(counters_ptr, counters,
1268 sizeof(struct xt_counters) * num_counters) != 0)
1277 free_newinfo_counters_untrans:
1284 do_replace(struct net *net, const void __user *user, unsigned int len)
1287 struct ip6t_replace tmp;
1288 struct xt_table_info *newinfo;
1289 void *loc_cpu_entry;
1290 struct ip6t_entry *iter;
1292 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1295 /* overflow check */
1296 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1299 newinfo = xt_alloc_table_info(tmp.size);
1303 /* choose the copy that is on our node/cpu */
1304 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1305 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1311 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1315 duprintf("ip_tables: Translated table\n");
1317 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1318 tmp.num_counters, tmp.counters);
1320 goto free_newinfo_untrans;
1323 free_newinfo_untrans:
1324 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1325 cleanup_entry(iter, net);
1327 xt_free_table_info(newinfo);
1332 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1335 unsigned int i, curcpu;
1336 struct xt_counters_info tmp;
1337 struct xt_counters *paddc;
1338 unsigned int num_counters;
1343 const struct xt_table_info *private;
1345 const void *loc_cpu_entry;
1346 struct ip6t_entry *iter;
1347 #ifdef CONFIG_COMPAT
1348 struct compat_xt_counters_info compat_tmp;
1352 size = sizeof(struct compat_xt_counters_info);
1357 size = sizeof(struct xt_counters_info);
1360 if (copy_from_user(ptmp, user, size) != 0)
1363 #ifdef CONFIG_COMPAT
1365 num_counters = compat_tmp.num_counters;
1366 name = compat_tmp.name;
1370 num_counters = tmp.num_counters;
1374 if (len != size + num_counters * sizeof(struct xt_counters))
1377 paddc = vmalloc_node(len - size, numa_node_id());
1381 if (copy_from_user(paddc, user + size, len - size) != 0) {
1386 t = xt_find_table_lock(net, AF_INET6, name);
1387 if (!t || IS_ERR(t)) {
1388 ret = t ? PTR_ERR(t) : -ENOENT;
1394 private = t->private;
1395 if (private->number != num_counters) {
1397 goto unlock_up_free;
1401 /* Choose the copy that is on our node */
1402 curcpu = smp_processor_id();
1403 xt_info_wrlock(curcpu);
1404 loc_cpu_entry = private->entries[curcpu];
1405 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1406 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1409 xt_info_wrunlock(curcpu);
1421 #ifdef CONFIG_COMPAT
1422 struct compat_ip6t_replace {
1423 char name[IP6T_TABLE_MAXNAMELEN];
1427 u32 hook_entry[NF_INET_NUMHOOKS];
1428 u32 underflow[NF_INET_NUMHOOKS];
1430 compat_uptr_t counters; /* struct ip6t_counters * */
1431 struct compat_ip6t_entry entries[0];
1435 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1436 unsigned int *size, struct xt_counters *counters,
1439 struct ip6t_entry_target *t;
1440 struct compat_ip6t_entry __user *ce;
1441 u_int16_t target_offset, next_offset;
1442 compat_uint_t origsize;
1443 const struct xt_entry_match *ematch;
1447 ce = (struct compat_ip6t_entry __user *)*dstptr;
1448 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1449 copy_to_user(&ce->counters, &counters[i],
1450 sizeof(counters[i])) != 0)
1453 *dstptr += sizeof(struct compat_ip6t_entry);
1454 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1456 xt_ematch_foreach(ematch, e) {
1457 ret = xt_compat_match_to_user(ematch, dstptr, size);
1461 target_offset = e->target_offset - (origsize - *size);
1462 t = ip6t_get_target(e);
1463 ret = xt_compat_target_to_user(t, dstptr, size);
1466 next_offset = e->next_offset - (origsize - *size);
1467 if (put_user(target_offset, &ce->target_offset) != 0 ||
1468 put_user(next_offset, &ce->next_offset) != 0)
1474 compat_find_calc_match(struct ip6t_entry_match *m,
1476 const struct ip6t_ip6 *ipv6,
1477 unsigned int hookmask,
1480 struct xt_match *match;
1482 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1483 m->u.user.revision);
1484 if (IS_ERR(match)) {
1485 duprintf("compat_check_calc_match: `%s' not found\n",
1487 return PTR_ERR(match);
1489 m->u.kernel.match = match;
1490 *size += xt_compat_match_offset(match);
1494 static void compat_release_entry(struct compat_ip6t_entry *e)
1496 struct ip6t_entry_target *t;
1497 struct xt_entry_match *ematch;
1499 /* Cleanup all matches */
1500 xt_ematch_foreach(ematch, e)
1501 module_put(ematch->u.kernel.match->me);
1502 t = compat_ip6t_get_target(e);
1503 module_put(t->u.kernel.target->me);
1507 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1508 struct xt_table_info *newinfo,
1510 const unsigned char *base,
1511 const unsigned char *limit,
1512 const unsigned int *hook_entries,
1513 const unsigned int *underflows,
1516 struct xt_entry_match *ematch;
1517 struct ip6t_entry_target *t;
1518 struct xt_target *target;
1519 unsigned int entry_offset;
1523 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1524 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1525 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1526 duprintf("Bad offset %p, limit = %p\n", e, limit);
1530 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1531 sizeof(struct compat_xt_entry_target)) {
1532 duprintf("checking: element %p size %u\n",
1537 /* For purposes of check_entry casting the compat entry is fine */
1538 ret = check_entry((struct ip6t_entry *)e, name);
1542 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1543 entry_offset = (void *)e - (void *)base;
1545 xt_ematch_foreach(ematch, e) {
1546 ret = compat_find_calc_match(ematch, name,
1547 &e->ipv6, e->comefrom, &off);
1549 goto release_matches;
1553 t = compat_ip6t_get_target(e);
1554 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1555 t->u.user.revision);
1556 if (IS_ERR(target)) {
1557 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1559 ret = PTR_ERR(target);
1560 goto release_matches;
1562 t->u.kernel.target = target;
1564 off += xt_compat_target_offset(target);
1566 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1570 /* Check hooks & underflows */
1571 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1572 if ((unsigned char *)e - base == hook_entries[h])
1573 newinfo->hook_entry[h] = hook_entries[h];
1574 if ((unsigned char *)e - base == underflows[h])
1575 newinfo->underflow[h] = underflows[h];
1578 /* Clear counters and comefrom */
1579 memset(&e->counters, 0, sizeof(e->counters));
1584 module_put(t->u.kernel.target->me);
1586 xt_ematch_foreach(ematch, e) {
1589 module_put(ematch->u.kernel.match->me);
1595 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1596 unsigned int *size, const char *name,
1597 struct xt_table_info *newinfo, unsigned char *base)
1599 struct ip6t_entry_target *t;
1600 struct xt_target *target;
1601 struct ip6t_entry *de;
1602 unsigned int origsize;
1604 struct xt_entry_match *ematch;
1608 de = (struct ip6t_entry *)*dstptr;
1609 memcpy(de, e, sizeof(struct ip6t_entry));
1610 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1612 *dstptr += sizeof(struct ip6t_entry);
1613 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1615 xt_ematch_foreach(ematch, e) {
1616 ret = xt_compat_match_from_user(ematch, dstptr, size);
1620 de->target_offset = e->target_offset - (origsize - *size);
1621 t = compat_ip6t_get_target(e);
1622 target = t->u.kernel.target;
1623 xt_compat_target_from_user(t, dstptr, size);
1625 de->next_offset = e->next_offset - (origsize - *size);
1626 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1627 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1628 newinfo->hook_entry[h] -= origsize - *size;
1629 if ((unsigned char *)de - base < newinfo->underflow[h])
1630 newinfo->underflow[h] -= origsize - *size;
1635 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1640 struct xt_mtchk_param mtpar;
1641 struct xt_entry_match *ematch;
1646 mtpar.entryinfo = &e->ipv6;
1647 mtpar.hook_mask = e->comefrom;
1648 mtpar.family = NFPROTO_IPV6;
1649 xt_ematch_foreach(ematch, e) {
1650 ret = check_match(ematch, &mtpar);
1652 goto cleanup_matches;
1656 ret = check_target(e, net, name);
1658 goto cleanup_matches;
1662 xt_ematch_foreach(ematch, e) {
1665 cleanup_match(ematch, net);
1671 translate_compat_table(struct net *net,
1673 unsigned int valid_hooks,
1674 struct xt_table_info **pinfo,
1676 unsigned int total_size,
1677 unsigned int number,
1678 unsigned int *hook_entries,
1679 unsigned int *underflows)
1682 struct xt_table_info *newinfo, *info;
1683 void *pos, *entry0, *entry1;
1684 struct compat_ip6t_entry *iter0;
1685 struct ip6t_entry *iter1;
1692 info->number = number;
1694 /* Init all hooks to impossible value. */
1695 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1696 info->hook_entry[i] = 0xFFFFFFFF;
1697 info->underflow[i] = 0xFFFFFFFF;
1700 duprintf("translate_compat_table: size %u\n", info->size);
1702 xt_compat_lock(AF_INET6);
1703 /* Walk through entries, checking offsets. */
1704 xt_entry_foreach(iter0, entry0, total_size) {
1705 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1707 entry0 + total_size,
1718 duprintf("translate_compat_table: %u not %u entries\n",
1723 /* Check hooks all assigned */
1724 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1725 /* Only hooks which are valid */
1726 if (!(valid_hooks & (1 << i)))
1728 if (info->hook_entry[i] == 0xFFFFFFFF) {
1729 duprintf("Invalid hook entry %u %u\n",
1730 i, hook_entries[i]);
1733 if (info->underflow[i] == 0xFFFFFFFF) {
1734 duprintf("Invalid underflow %u %u\n",
1741 newinfo = xt_alloc_table_info(size);
1745 newinfo->number = number;
1746 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1747 newinfo->hook_entry[i] = info->hook_entry[i];
1748 newinfo->underflow[i] = info->underflow[i];
1750 entry1 = newinfo->entries[raw_smp_processor_id()];
1753 xt_entry_foreach(iter0, entry0, total_size) {
1754 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1755 name, newinfo, entry1);
1759 xt_compat_flush_offsets(AF_INET6);
1760 xt_compat_unlock(AF_INET6);
1765 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1769 xt_entry_foreach(iter1, entry1, newinfo->size) {
1770 ret = compat_check_entry(iter1, net, name);
1777 * The first i matches need cleanup_entry (calls ->destroy)
1778 * because they had called ->check already. The other j-i
1779 * entries need only release.
1783 xt_entry_foreach(iter0, entry0, newinfo->size) {
1788 compat_release_entry(iter0);
1790 xt_entry_foreach(iter1, entry1, newinfo->size) {
1793 cleanup_entry(iter1, net);
1795 xt_free_table_info(newinfo);
1799 /* And one copy for every other CPU */
1800 for_each_possible_cpu(i)
1801 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1802 memcpy(newinfo->entries[i], entry1, newinfo->size);
1806 xt_free_table_info(info);
1810 xt_free_table_info(newinfo);
1812 xt_entry_foreach(iter0, entry0, total_size) {
1815 compat_release_entry(iter0);
1819 xt_compat_flush_offsets(AF_INET6);
1820 xt_compat_unlock(AF_INET6);
1825 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1828 struct compat_ip6t_replace tmp;
1829 struct xt_table_info *newinfo;
1830 void *loc_cpu_entry;
1831 struct ip6t_entry *iter;
1833 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1836 /* overflow check */
1837 if (tmp.size >= INT_MAX / num_possible_cpus())
1839 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1842 newinfo = xt_alloc_table_info(tmp.size);
1846 /* choose the copy that is on our node/cpu */
1847 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1848 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1854 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1855 &newinfo, &loc_cpu_entry, tmp.size,
1856 tmp.num_entries, tmp.hook_entry,
1861 duprintf("compat_do_replace: Translated table\n");
1863 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1864 tmp.num_counters, compat_ptr(tmp.counters));
1866 goto free_newinfo_untrans;
1869 free_newinfo_untrans:
1870 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1871 cleanup_entry(iter, net);
1873 xt_free_table_info(newinfo);
1878 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1883 if (!capable(CAP_NET_ADMIN))
1887 case IP6T_SO_SET_REPLACE:
1888 ret = compat_do_replace(sock_net(sk), user, len);
1891 case IP6T_SO_SET_ADD_COUNTERS:
1892 ret = do_add_counters(sock_net(sk), user, len, 1);
1896 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1903 struct compat_ip6t_get_entries {
1904 char name[IP6T_TABLE_MAXNAMELEN];
1906 struct compat_ip6t_entry entrytable[0];
1910 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1911 void __user *userptr)
1913 struct xt_counters *counters;
1914 const struct xt_table_info *private = table->private;
1918 const void *loc_cpu_entry;
1920 struct ip6t_entry *iter;
1922 counters = alloc_counters(table);
1923 if (IS_ERR(counters))
1924 return PTR_ERR(counters);
1926 /* choose the copy that is on our node/cpu, ...
1927 * This choice is lazy (because current thread is
1928 * allowed to migrate to another cpu)
1930 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1933 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1934 ret = compat_copy_entry_to_user(iter, &pos,
1935 &size, counters, i++);
1945 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1949 struct compat_ip6t_get_entries get;
1952 if (*len < sizeof(get)) {
1953 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1957 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1960 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1961 duprintf("compat_get_entries: %u != %zu\n",
1962 *len, sizeof(get) + get.size);
1966 xt_compat_lock(AF_INET6);
1967 t = xt_find_table_lock(net, AF_INET6, get.name);
1968 if (t && !IS_ERR(t)) {
1969 const struct xt_table_info *private = t->private;
1970 struct xt_table_info info;
1971 duprintf("t->private->number = %u\n", private->number);
1972 ret = compat_table_info(private, &info);
1973 if (!ret && get.size == info.size) {
1974 ret = compat_copy_entries_to_user(private->size,
1975 t, uptr->entrytable);
1977 duprintf("compat_get_entries: I've got %u not %u!\n",
1978 private->size, get.size);
1981 xt_compat_flush_offsets(AF_INET6);
1985 ret = t ? PTR_ERR(t) : -ENOENT;
1987 xt_compat_unlock(AF_INET6);
1991 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1994 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1998 if (!capable(CAP_NET_ADMIN))
2002 case IP6T_SO_GET_INFO:
2003 ret = get_info(sock_net(sk), user, len, 1);
2005 case IP6T_SO_GET_ENTRIES:
2006 ret = compat_get_entries(sock_net(sk), user, len);
2009 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2016 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2020 if (!capable(CAP_NET_ADMIN))
2024 case IP6T_SO_SET_REPLACE:
2025 ret = do_replace(sock_net(sk), user, len);
2028 case IP6T_SO_SET_ADD_COUNTERS:
2029 ret = do_add_counters(sock_net(sk), user, len, 0);
2033 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2041 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2045 if (!capable(CAP_NET_ADMIN))
2049 case IP6T_SO_GET_INFO:
2050 ret = get_info(sock_net(sk), user, len, 0);
2053 case IP6T_SO_GET_ENTRIES:
2054 ret = get_entries(sock_net(sk), user, len);
2057 case IP6T_SO_GET_REVISION_MATCH:
2058 case IP6T_SO_GET_REVISION_TARGET: {
2059 struct ip6t_get_revision rev;
2062 if (*len != sizeof(rev)) {
2066 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2071 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2076 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2079 "ip6t_%s", rev.name);
2084 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2091 struct xt_table *ip6t_register_table(struct net *net,
2092 const struct xt_table *table,
2093 const struct ip6t_replace *repl)
2096 struct xt_table_info *newinfo;
2097 struct xt_table_info bootstrap = {0};
2098 void *loc_cpu_entry;
2099 struct xt_table *new_table;
2101 newinfo = xt_alloc_table_info(repl->size);
2107 /* choose the copy on our node/cpu, but dont care about preemption */
2108 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2109 memcpy(loc_cpu_entry, repl->entries, repl->size);
2111 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2115 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2116 if (IS_ERR(new_table)) {
2117 ret = PTR_ERR(new_table);
2123 xt_free_table_info(newinfo);
2125 return ERR_PTR(ret);
2128 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2130 struct xt_table_info *private;
2131 void *loc_cpu_entry;
2132 struct module *table_owner = table->me;
2133 struct ip6t_entry *iter;
2135 private = xt_unregister_table(table);
2137 /* Decrease module usage counts and free resources */
2138 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2139 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2140 cleanup_entry(iter, net);
2141 if (private->number > private->initial_entries)
2142 module_put(table_owner);
2143 xt_free_table_info(private);
2146 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2148 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2149 u_int8_t type, u_int8_t code,
2152 return (type == test_type && code >= min_code && code <= max_code)
2157 icmp6_match(const struct sk_buff *skb, const struct xt_action_param *par)
2159 const struct icmp6hdr *ic;
2160 struct icmp6hdr _icmph;
2161 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2163 /* Must not be a fragment. */
2164 if (par->fragoff != 0)
2167 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2169 /* We've been asked to examine this packet, and we
2170 * can't. Hence, no choice but to drop.
2172 duprintf("Dropping evil ICMP tinygram.\n");
2173 *par->hotdrop = true;
2177 return icmp6_type_code_match(icmpinfo->type,
2180 ic->icmp6_type, ic->icmp6_code,
2181 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2184 /* Called when user tries to insert an entry of this type. */
2185 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2187 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2189 /* Must specify no unknown invflags */
2190 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2193 /* The built-in targets: standard (NULL) and error. */
2194 static struct xt_target ip6t_standard_target __read_mostly = {
2195 .name = IP6T_STANDARD_TARGET,
2196 .targetsize = sizeof(int),
2197 .family = NFPROTO_IPV6,
2198 #ifdef CONFIG_COMPAT
2199 .compatsize = sizeof(compat_int_t),
2200 .compat_from_user = compat_standard_from_user,
2201 .compat_to_user = compat_standard_to_user,
2205 static struct xt_target ip6t_error_target __read_mostly = {
2206 .name = IP6T_ERROR_TARGET,
2207 .target = ip6t_error,
2208 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2209 .family = NFPROTO_IPV6,
2212 static struct nf_sockopt_ops ip6t_sockopts = {
2214 .set_optmin = IP6T_BASE_CTL,
2215 .set_optmax = IP6T_SO_SET_MAX+1,
2216 .set = do_ip6t_set_ctl,
2217 #ifdef CONFIG_COMPAT
2218 .compat_set = compat_do_ip6t_set_ctl,
2220 .get_optmin = IP6T_BASE_CTL,
2221 .get_optmax = IP6T_SO_GET_MAX+1,
2222 .get = do_ip6t_get_ctl,
2223 #ifdef CONFIG_COMPAT
2224 .compat_get = compat_do_ip6t_get_ctl,
2226 .owner = THIS_MODULE,
2229 static struct xt_match icmp6_matchstruct __read_mostly = {
2231 .match = icmp6_match,
2232 .matchsize = sizeof(struct ip6t_icmp),
2233 .checkentry = icmp6_checkentry,
2234 .proto = IPPROTO_ICMPV6,
2235 .family = NFPROTO_IPV6,
2238 static int __net_init ip6_tables_net_init(struct net *net)
2240 return xt_proto_init(net, NFPROTO_IPV6);
2243 static void __net_exit ip6_tables_net_exit(struct net *net)
2245 xt_proto_fini(net, NFPROTO_IPV6);
2248 static struct pernet_operations ip6_tables_net_ops = {
2249 .init = ip6_tables_net_init,
2250 .exit = ip6_tables_net_exit,
2253 static int __init ip6_tables_init(void)
2257 ret = register_pernet_subsys(&ip6_tables_net_ops);
2261 /* Noone else will be downing sem now, so we won't sleep */
2262 ret = xt_register_target(&ip6t_standard_target);
2265 ret = xt_register_target(&ip6t_error_target);
2268 ret = xt_register_match(&icmp6_matchstruct);
2272 /* Register setsockopt */
2273 ret = nf_register_sockopt(&ip6t_sockopts);
2277 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2281 xt_unregister_match(&icmp6_matchstruct);
2283 xt_unregister_target(&ip6t_error_target);
2285 xt_unregister_target(&ip6t_standard_target);
2287 unregister_pernet_subsys(&ip6_tables_net_ops);
2292 static void __exit ip6_tables_fini(void)
2294 nf_unregister_sockopt(&ip6t_sockopts);
2296 xt_unregister_match(&icmp6_matchstruct);
2297 xt_unregister_target(&ip6t_error_target);
2298 xt_unregister_target(&ip6t_standard_target);
2300 unregister_pernet_subsys(&ip6_tables_net_ops);
2304 * find the offset to specified header or the protocol number of last header
2305 * if target < 0. "last header" is transport protocol header, ESP, or
2308 * If target header is found, its offset is set in *offset and return protocol
2309 * number. Otherwise, return -1.
2311 * If the first fragment doesn't contain the final protocol header or
2312 * NEXTHDR_NONE it is considered invalid.
2314 * Note that non-1st fragment is special case that "the protocol number
2315 * of last header" is "next header" field in Fragment header. In this case,
2316 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2320 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2321 int target, unsigned short *fragoff)
2323 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2324 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2325 unsigned int len = skb->len - start;
2330 while (nexthdr != target) {
2331 struct ipv6_opt_hdr _hdr, *hp;
2332 unsigned int hdrlen;
2334 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2340 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2343 if (nexthdr == NEXTHDR_FRAGMENT) {
2344 unsigned short _frag_off;
2346 fp = skb_header_pointer(skb,
2347 start+offsetof(struct frag_hdr,
2354 _frag_off = ntohs(*fp) & ~0x7;
2357 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2358 hp->nexthdr == NEXTHDR_NONE)) {
2360 *fragoff = _frag_off;
2366 } else if (nexthdr == NEXTHDR_AUTH)
2367 hdrlen = (hp->hdrlen + 2) << 2;
2369 hdrlen = ipv6_optlen(hp);
2371 nexthdr = hp->nexthdr;
2380 EXPORT_SYMBOL(ip6t_register_table);
2381 EXPORT_SYMBOL(ip6t_unregister_table);
2382 EXPORT_SYMBOL(ip6t_do_table);
2383 EXPORT_SYMBOL(ip6t_ext_hdr);
2384 EXPORT_SYMBOL(ipv6_find_hdr);
2386 module_init(ip6_tables_init);
2387 module_exit(ip6_tables_fini);