2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff *skb,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
203 printk("ip6_tables: error: `%s'\n",
204 (const char *)par->targinfo);
209 /* Performance critical - called for every packet */
211 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
212 struct xt_match_param *par)
214 par->match = m->u.kernel.match;
215 par->matchinfo = m->data;
217 /* Stop iteration if it doesn't match */
218 if (!m->u.kernel.match->match(skb, par))
224 static inline struct ip6t_entry *
225 get_entry(const void *base, unsigned int offset)
227 return (struct ip6t_entry *)(base + offset);
230 /* All zeroes == unconditional rule. */
231 /* Mildly perf critical (only if packet tracing is on) */
232 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
234 static const struct ip6t_ip6 uncond;
236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
239 static inline const struct ip6t_entry_target *
240 ip6t_get_target_c(const struct ip6t_entry *e)
242 return ip6t_get_target((struct ip6t_entry *)e);
245 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
247 /* This cries for unification! */
248 static const char *const hooknames[] = {
249 [NF_INET_PRE_ROUTING] = "PREROUTING",
250 [NF_INET_LOCAL_IN] = "INPUT",
251 [NF_INET_FORWARD] = "FORWARD",
252 [NF_INET_LOCAL_OUT] = "OUTPUT",
253 [NF_INET_POST_ROUTING] = "POSTROUTING",
256 enum nf_ip_trace_comments {
257 NF_IP6_TRACE_COMMENT_RULE,
258 NF_IP6_TRACE_COMMENT_RETURN,
259 NF_IP6_TRACE_COMMENT_POLICY,
262 static const char *const comments[] = {
263 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
264 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
265 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
268 static struct nf_loginfo trace_loginfo = {
269 .type = NF_LOG_TYPE_LOG,
273 .logflags = NF_LOG_MASK,
278 /* Mildly perf critical (only if packet tracing is on) */
280 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
281 const char *hookname, const char **chainname,
282 const char **comment, unsigned int *rulenum)
284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
287 /* Head of user chain: ERROR target with chainname */
288 *chainname = t->target.data;
293 if (s->target_offset == sizeof(struct ip6t_entry) &&
294 strcmp(t->target.u.kernel.target->name,
295 IP6T_STANDARD_TARGET) == 0 &&
297 unconditional(&s->ipv6)) {
298 /* Tail of chains: STANDARD target (return/policy) */
299 *comment = *chainname == hookname
300 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
301 : comments[NF_IP6_TRACE_COMMENT_RETURN];
310 static void trace_packet(const struct sk_buff *skb,
312 const struct net_device *in,
313 const struct net_device *out,
314 const char *tablename,
315 const struct xt_table_info *private,
316 const struct ip6t_entry *e)
318 const void *table_base;
319 const struct ip6t_entry *root;
320 const char *hookname, *chainname, *comment;
321 const struct ip6t_entry *iter;
322 unsigned int rulenum = 0;
324 table_base = private->entries[smp_processor_id()];
325 root = get_entry(table_base, private->hook_entry[hook]);
327 hookname = chainname = hooknames[hook];
328 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
330 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
331 if (get_chainname_rulenum(iter, e, hookname,
332 &chainname, &comment, &rulenum) != 0)
335 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
336 "TRACE: %s:%s:%s:%u ",
337 tablename, chainname, comment, rulenum);
341 static inline __pure struct ip6t_entry *
342 ip6t_next_entry(const struct ip6t_entry *entry)
344 return (void *)entry + entry->next_offset;
347 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
349 ip6t_do_table(struct sk_buff *skb,
351 const struct net_device *in,
352 const struct net_device *out,
353 struct xt_table *table)
355 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
357 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
358 bool hotdrop = false;
359 /* Initializing verdict to NF_DROP keeps gcc happy. */
360 unsigned int verdict = NF_DROP;
361 const char *indev, *outdev;
362 const void *table_base;
363 struct ip6t_entry *e, *back;
364 const struct xt_table_info *private;
365 struct xt_match_param mtpar;
366 struct xt_target_param tgpar;
369 indev = in ? in->name : nulldevname;
370 outdev = out ? out->name : nulldevname;
371 /* We handle fragments by dealing with the first fragment as
372 * if it was a normal packet. All other fragments are treated
373 * normally, except that they will NEVER match rules that ask
374 * things we don't know, ie. tcp syn flag or ports). If the
375 * rule is also a fragment-specific rule, non-fragments won't
377 mtpar.hotdrop = &hotdrop;
378 mtpar.in = tgpar.in = in;
379 mtpar.out = tgpar.out = out;
380 mtpar.family = tgpar.family = NFPROTO_IPV6;
381 mtpar.hooknum = tgpar.hooknum = hook;
383 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
386 private = table->private;
387 table_base = private->entries[smp_processor_id()];
389 e = get_entry(table_base, private->hook_entry[hook]);
391 /* For return from builtin chain */
392 back = get_entry(table_base, private->underflow[hook]);
395 const struct ip6t_entry_target *t;
396 const struct xt_entry_match *ematch;
400 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
401 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
403 e = ip6t_next_entry(e);
407 xt_ematch_foreach(ematch, e)
408 if (do_match(ematch, skb, &mtpar) != 0)
411 ADD_COUNTER(e->counters,
412 ntohs(ipv6_hdr(skb)->payload_len) +
413 sizeof(struct ipv6hdr), 1);
415 t = ip6t_get_target_c(e);
416 IP_NF_ASSERT(t->u.kernel.target);
418 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
419 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
420 /* The packet is traced: log it */
421 if (unlikely(skb->nf_trace))
422 trace_packet(skb, hook, in, out,
423 table->name, private, e);
425 /* Standard target? */
426 if (!t->u.kernel.target->target) {
429 v = ((struct ip6t_standard_target *)t)->verdict;
431 /* Pop from stack? */
432 if (v != IP6T_RETURN) {
433 verdict = (unsigned)(-v) - 1;
437 back = get_entry(table_base, back->comefrom);
440 if (table_base + v != ip6t_next_entry(e) &&
441 !(e->ipv6.flags & IP6T_F_GOTO)) {
442 /* Save old back ptr in next entry */
443 struct ip6t_entry *next = ip6t_next_entry(e);
444 next->comefrom = (void *)back - table_base;
445 /* set back pointer to next entry */
449 e = get_entry(table_base, v);
453 /* Targets which reenter must return
455 tgpar.target = t->u.kernel.target;
456 tgpar.targinfo = t->data;
458 #ifdef CONFIG_NETFILTER_DEBUG
459 tb_comefrom = 0xeeeeeeec;
461 verdict = t->u.kernel.target->target(skb, &tgpar);
463 #ifdef CONFIG_NETFILTER_DEBUG
464 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
465 printk("Target %s reentered!\n",
466 t->u.kernel.target->name);
469 tb_comefrom = 0x57acc001;
471 if (verdict == IP6T_CONTINUE)
472 e = ip6t_next_entry(e);
478 #ifdef CONFIG_NETFILTER_DEBUG
479 tb_comefrom = NETFILTER_LINK_POISON;
481 xt_info_rdunlock_bh();
483 #ifdef DEBUG_ALLOW_ALL
494 /* Figures out from what hook each rule can be called: returns 0 if
495 there are loops. Puts hook bitmask in comefrom. */
497 mark_source_chains(const struct xt_table_info *newinfo,
498 unsigned int valid_hooks, void *entry0)
502 /* No recursion; use packet counter to save back ptrs (reset
503 to 0 as we leave), and comefrom to save source hook bitmask */
504 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
505 unsigned int pos = newinfo->hook_entry[hook];
506 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
508 if (!(valid_hooks & (1 << hook)))
511 /* Set initial back pointer. */
512 e->counters.pcnt = pos;
515 const struct ip6t_standard_target *t
516 = (void *)ip6t_get_target_c(e);
517 int visited = e->comefrom & (1 << hook);
519 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
520 printk("iptables: loop hook %u pos %u %08X.\n",
521 hook, pos, e->comefrom);
524 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
526 /* Unconditional return/END. */
527 if ((e->target_offset == sizeof(struct ip6t_entry) &&
528 (strcmp(t->target.u.user.name,
529 IP6T_STANDARD_TARGET) == 0) &&
531 unconditional(&e->ipv6)) || visited) {
532 unsigned int oldpos, size;
534 if ((strcmp(t->target.u.user.name,
535 IP6T_STANDARD_TARGET) == 0) &&
536 t->verdict < -NF_MAX_VERDICT - 1) {
537 duprintf("mark_source_chains: bad "
538 "negative verdict (%i)\n",
543 /* Return: backtrack through the last
546 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
547 #ifdef DEBUG_IP_FIREWALL_USER
549 & (1 << NF_INET_NUMHOOKS)) {
550 duprintf("Back unset "
557 pos = e->counters.pcnt;
558 e->counters.pcnt = 0;
560 /* We're at the start. */
564 e = (struct ip6t_entry *)
566 } while (oldpos == pos + e->next_offset);
569 size = e->next_offset;
570 e = (struct ip6t_entry *)
571 (entry0 + pos + size);
572 e->counters.pcnt = pos;
575 int newpos = t->verdict;
577 if (strcmp(t->target.u.user.name,
578 IP6T_STANDARD_TARGET) == 0 &&
580 if (newpos > newinfo->size -
581 sizeof(struct ip6t_entry)) {
582 duprintf("mark_source_chains: "
583 "bad verdict (%i)\n",
587 /* This a jump; chase it. */
588 duprintf("Jump rule %u -> %u\n",
591 /* ... this is a fallthru */
592 newpos = pos + e->next_offset;
594 e = (struct ip6t_entry *)
596 e->counters.pcnt = pos;
601 duprintf("Finished chain %u\n", hook);
606 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
608 struct xt_mtdtor_param par;
611 par.match = m->u.kernel.match;
612 par.matchinfo = m->data;
613 par.family = NFPROTO_IPV6;
614 if (par.match->destroy != NULL)
615 par.match->destroy(&par);
616 module_put(par.match->me);
620 check_entry(const struct ip6t_entry *e, const char *name)
622 const struct ip6t_entry_target *t;
624 if (!ip6_checkentry(&e->ipv6)) {
625 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
629 if (e->target_offset + sizeof(struct ip6t_entry_target) >
633 t = ip6t_get_target_c(e);
634 if (e->target_offset + t->u.target_size > e->next_offset)
640 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
642 const struct ip6t_ip6 *ipv6 = par->entryinfo;
645 par->match = m->u.kernel.match;
646 par->matchinfo = m->data;
648 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
649 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
651 duprintf("ip_tables: check failed for `%s'.\n",
659 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
661 struct xt_match *match;
664 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
666 "ip6t_%s", m->u.user.name);
667 if (IS_ERR(match) || !match) {
668 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
669 return match ? PTR_ERR(match) : -ENOENT;
671 m->u.kernel.match = match;
673 ret = check_match(m, par);
679 module_put(m->u.kernel.match->me);
683 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
685 struct ip6t_entry_target *t = ip6t_get_target(e);
686 struct xt_tgchk_param par = {
690 .target = t->u.kernel.target,
692 .hook_mask = e->comefrom,
693 .family = NFPROTO_IPV6,
697 t = ip6t_get_target(e);
698 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
699 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
701 duprintf("ip_tables: check failed for `%s'.\n",
702 t->u.kernel.target->name);
709 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
712 struct ip6t_entry_target *t;
713 struct xt_target *target;
716 struct xt_mtchk_param mtpar;
717 struct xt_entry_match *ematch;
719 ret = check_entry(e, name);
726 mtpar.entryinfo = &e->ipv6;
727 mtpar.hook_mask = e->comefrom;
728 mtpar.family = NFPROTO_IPV6;
729 xt_ematch_foreach(ematch, e) {
730 ret = find_check_match(ematch, &mtpar);
732 goto cleanup_matches;
736 t = ip6t_get_target(e);
737 target = try_then_request_module(xt_find_target(AF_INET6,
740 "ip6t_%s", t->u.user.name);
741 if (IS_ERR(target) || !target) {
742 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
743 ret = target ? PTR_ERR(target) : -ENOENT;
744 goto cleanup_matches;
746 t->u.kernel.target = target;
748 ret = check_target(e, net, name);
753 module_put(t->u.kernel.target->me);
755 xt_ematch_foreach(ematch, e) {
758 cleanup_match(ematch, net);
763 static bool check_underflow(const struct ip6t_entry *e)
765 const struct ip6t_entry_target *t;
766 unsigned int verdict;
768 if (!unconditional(&e->ipv6))
770 t = ip6t_get_target_c(e);
771 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
773 verdict = ((struct ip6t_standard_target *)t)->verdict;
774 verdict = -verdict - 1;
775 return verdict == NF_DROP || verdict == NF_ACCEPT;
779 check_entry_size_and_hooks(struct ip6t_entry *e,
780 struct xt_table_info *newinfo,
781 const unsigned char *base,
782 const unsigned char *limit,
783 const unsigned int *hook_entries,
784 const unsigned int *underflows,
785 unsigned int valid_hooks)
789 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
790 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
791 duprintf("Bad offset %p\n", e);
796 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
797 duprintf("checking: element %p size %u\n",
802 /* Check hooks & underflows */
803 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
804 if (!(valid_hooks & (1 << h)))
806 if ((unsigned char *)e - base == hook_entries[h])
807 newinfo->hook_entry[h] = hook_entries[h];
808 if ((unsigned char *)e - base == underflows[h]) {
809 if (!check_underflow(e)) {
810 pr_err("Underflows must be unconditional and "
811 "use the STANDARD target with "
815 newinfo->underflow[h] = underflows[h];
819 /* Clear counters and comefrom */
820 e->counters = ((struct xt_counters) { 0, 0 });
825 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
827 struct xt_tgdtor_param par;
828 struct ip6t_entry_target *t;
829 struct xt_entry_match *ematch;
831 /* Cleanup all matches */
832 xt_ematch_foreach(ematch, e)
833 cleanup_match(ematch, net);
834 t = ip6t_get_target(e);
837 par.target = t->u.kernel.target;
838 par.targinfo = t->data;
839 par.family = NFPROTO_IPV6;
840 if (par.target->destroy != NULL)
841 par.target->destroy(&par);
842 module_put(par.target->me);
845 /* Checks and translates the user-supplied table segment (held in
848 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
849 const struct ip6t_replace *repl)
851 struct ip6t_entry *iter;
855 newinfo->size = repl->size;
856 newinfo->number = repl->num_entries;
858 /* Init all hooks to impossible value. */
859 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
860 newinfo->hook_entry[i] = 0xFFFFFFFF;
861 newinfo->underflow[i] = 0xFFFFFFFF;
864 duprintf("translate_table: size %u\n", newinfo->size);
866 /* Walk through entries, checking offsets. */
867 xt_entry_foreach(iter, entry0, newinfo->size) {
868 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
869 entry0 + repl->size, repl->hook_entry, repl->underflow,
876 if (i != repl->num_entries) {
877 duprintf("translate_table: %u not %u entries\n",
878 i, repl->num_entries);
882 /* Check hooks all assigned */
883 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
884 /* Only hooks which are valid */
885 if (!(repl->valid_hooks & (1 << i)))
887 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
888 duprintf("Invalid hook entry %u %u\n",
889 i, repl->hook_entry[i]);
892 if (newinfo->underflow[i] == 0xFFFFFFFF) {
893 duprintf("Invalid underflow %u %u\n",
894 i, repl->underflow[i]);
899 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
902 /* Finally, each sanity check must pass */
904 xt_entry_foreach(iter, entry0, newinfo->size) {
905 ret = find_check_entry(iter, net, repl->name, repl->size);
912 xt_entry_foreach(iter, entry0, newinfo->size) {
915 cleanup_entry(iter, net);
920 /* And one copy for every other CPU */
921 for_each_possible_cpu(i) {
922 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
923 memcpy(newinfo->entries[i], entry0, newinfo->size);
930 get_counters(const struct xt_table_info *t,
931 struct xt_counters counters[])
933 struct ip6t_entry *iter;
938 /* Instead of clearing (by a previous call to memset())
939 * the counters and using adds, we set the counters
940 * with data used by 'current' CPU
942 * Bottom half has to be disabled to prevent deadlock
943 * if new softirq were to run and call ipt_do_table
946 curcpu = smp_processor_id();
949 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
950 SET_COUNTER(counters[i], iter->counters.bcnt,
951 iter->counters.pcnt);
955 for_each_possible_cpu(cpu) {
960 xt_entry_foreach(iter, t->entries[cpu], t->size) {
961 ADD_COUNTER(counters[i], iter->counters.bcnt,
962 iter->counters.pcnt);
965 xt_info_wrunlock(cpu);
970 static struct xt_counters *alloc_counters(const struct xt_table *table)
972 unsigned int countersize;
973 struct xt_counters *counters;
974 const struct xt_table_info *private = table->private;
976 /* We need atomic snapshot of counters: rest doesn't change
977 (other than comefrom, which userspace doesn't care
979 countersize = sizeof(struct xt_counters) * private->number;
980 counters = vmalloc_node(countersize, numa_node_id());
982 if (counters == NULL)
983 return ERR_PTR(-ENOMEM);
985 get_counters(private, counters);
991 copy_entries_to_user(unsigned int total_size,
992 const struct xt_table *table,
993 void __user *userptr)
995 unsigned int off, num;
996 const struct ip6t_entry *e;
997 struct xt_counters *counters;
998 const struct xt_table_info *private = table->private;
1000 const void *loc_cpu_entry;
1002 counters = alloc_counters(table);
1003 if (IS_ERR(counters))
1004 return PTR_ERR(counters);
1006 /* choose the copy that is on our node/cpu, ...
1007 * This choice is lazy (because current thread is
1008 * allowed to migrate to another cpu)
1010 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1011 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1016 /* FIXME: use iterator macros --RR */
1017 /* ... then go back and fix counters and names */
1018 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1020 const struct ip6t_entry_match *m;
1021 const struct ip6t_entry_target *t;
1023 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1024 if (copy_to_user(userptr + off
1025 + offsetof(struct ip6t_entry, counters),
1027 sizeof(counters[num])) != 0) {
1032 for (i = sizeof(struct ip6t_entry);
1033 i < e->target_offset;
1034 i += m->u.match_size) {
1037 if (copy_to_user(userptr + off + i
1038 + offsetof(struct ip6t_entry_match,
1040 m->u.kernel.match->name,
1041 strlen(m->u.kernel.match->name)+1)
1048 t = ip6t_get_target_c(e);
1049 if (copy_to_user(userptr + off + e->target_offset
1050 + offsetof(struct ip6t_entry_target,
1052 t->u.kernel.target->name,
1053 strlen(t->u.kernel.target->name)+1) != 0) {
1064 #ifdef CONFIG_COMPAT
1065 static void compat_standard_from_user(void *dst, const void *src)
1067 int v = *(compat_int_t *)src;
1070 v += xt_compat_calc_jump(AF_INET6, v);
1071 memcpy(dst, &v, sizeof(v));
1074 static int compat_standard_to_user(void __user *dst, const void *src)
1076 compat_int_t cv = *(int *)src;
1079 cv -= xt_compat_calc_jump(AF_INET6, cv);
1080 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1083 static int compat_calc_entry(const struct ip6t_entry *e,
1084 const struct xt_table_info *info,
1085 const void *base, struct xt_table_info *newinfo)
1087 const struct xt_entry_match *ematch;
1088 const struct ip6t_entry_target *t;
1089 unsigned int entry_offset;
1092 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1093 entry_offset = (void *)e - base;
1094 xt_ematch_foreach(ematch, e)
1095 off += xt_compat_match_offset(ematch->u.kernel.match);
1096 t = ip6t_get_target_c(e);
1097 off += xt_compat_target_offset(t->u.kernel.target);
1098 newinfo->size -= off;
1099 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1103 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1104 if (info->hook_entry[i] &&
1105 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1106 newinfo->hook_entry[i] -= off;
1107 if (info->underflow[i] &&
1108 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1109 newinfo->underflow[i] -= off;
1114 static int compat_table_info(const struct xt_table_info *info,
1115 struct xt_table_info *newinfo)
1117 struct ip6t_entry *iter;
1118 void *loc_cpu_entry;
1121 if (!newinfo || !info)
1124 /* we dont care about newinfo->entries[] */
1125 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1126 newinfo->initial_entries = 0;
1127 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1128 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1129 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1137 static int get_info(struct net *net, void __user *user,
1138 const int *len, int compat)
1140 char name[IP6T_TABLE_MAXNAMELEN];
1144 if (*len != sizeof(struct ip6t_getinfo)) {
1145 duprintf("length %u != %zu\n", *len,
1146 sizeof(struct ip6t_getinfo));
1150 if (copy_from_user(name, user, sizeof(name)) != 0)
1153 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1154 #ifdef CONFIG_COMPAT
1156 xt_compat_lock(AF_INET6);
1158 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1159 "ip6table_%s", name);
1160 if (t && !IS_ERR(t)) {
1161 struct ip6t_getinfo info;
1162 const struct xt_table_info *private = t->private;
1163 #ifdef CONFIG_COMPAT
1164 struct xt_table_info tmp;
1167 ret = compat_table_info(private, &tmp);
1168 xt_compat_flush_offsets(AF_INET6);
1172 info.valid_hooks = t->valid_hooks;
1173 memcpy(info.hook_entry, private->hook_entry,
1174 sizeof(info.hook_entry));
1175 memcpy(info.underflow, private->underflow,
1176 sizeof(info.underflow));
1177 info.num_entries = private->number;
1178 info.size = private->size;
1179 strcpy(info.name, name);
1181 if (copy_to_user(user, &info, *len) != 0)
1189 ret = t ? PTR_ERR(t) : -ENOENT;
1190 #ifdef CONFIG_COMPAT
1192 xt_compat_unlock(AF_INET6);
1198 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1202 struct ip6t_get_entries get;
1205 if (*len < sizeof(get)) {
1206 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1209 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1211 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1212 duprintf("get_entries: %u != %zu\n",
1213 *len, sizeof(get) + get.size);
1217 t = xt_find_table_lock(net, AF_INET6, get.name);
1218 if (t && !IS_ERR(t)) {
1219 struct xt_table_info *private = t->private;
1220 duprintf("t->private->number = %u\n", private->number);
1221 if (get.size == private->size)
1222 ret = copy_entries_to_user(private->size,
1223 t, uptr->entrytable);
1225 duprintf("get_entries: I've got %u not %u!\n",
1226 private->size, get.size);
1232 ret = t ? PTR_ERR(t) : -ENOENT;
1238 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1239 struct xt_table_info *newinfo, unsigned int num_counters,
1240 void __user *counters_ptr)
1244 struct xt_table_info *oldinfo;
1245 struct xt_counters *counters;
1246 const void *loc_cpu_old_entry;
1247 struct ip6t_entry *iter;
1250 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1257 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1258 "ip6table_%s", name);
1259 if (!t || IS_ERR(t)) {
1260 ret = t ? PTR_ERR(t) : -ENOENT;
1261 goto free_newinfo_counters_untrans;
1265 if (valid_hooks != t->valid_hooks) {
1266 duprintf("Valid hook crap: %08X vs %08X\n",
1267 valid_hooks, t->valid_hooks);
1272 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1276 /* Update module usage count based on number of rules */
1277 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1278 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1279 if ((oldinfo->number > oldinfo->initial_entries) ||
1280 (newinfo->number <= oldinfo->initial_entries))
1282 if ((oldinfo->number > oldinfo->initial_entries) &&
1283 (newinfo->number <= oldinfo->initial_entries))
1286 /* Get the old counters, and synchronize with replace */
1287 get_counters(oldinfo, counters);
1289 /* Decrease module usage counts and free resource */
1290 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1291 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1292 cleanup_entry(iter, net);
1294 xt_free_table_info(oldinfo);
1295 if (copy_to_user(counters_ptr, counters,
1296 sizeof(struct xt_counters) * num_counters) != 0)
1305 free_newinfo_counters_untrans:
1312 do_replace(struct net *net, const void __user *user, unsigned int len)
1315 struct ip6t_replace tmp;
1316 struct xt_table_info *newinfo;
1317 void *loc_cpu_entry;
1318 struct ip6t_entry *iter;
1320 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1323 /* overflow check */
1324 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1327 newinfo = xt_alloc_table_info(tmp.size);
1331 /* choose the copy that is on our node/cpu */
1332 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1333 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1339 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1343 duprintf("ip_tables: Translated table\n");
1345 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1346 tmp.num_counters, tmp.counters);
1348 goto free_newinfo_untrans;
1351 free_newinfo_untrans:
1352 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1353 cleanup_entry(iter, net);
1355 xt_free_table_info(newinfo);
1360 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1363 unsigned int i, curcpu;
1364 struct xt_counters_info tmp;
1365 struct xt_counters *paddc;
1366 unsigned int num_counters;
1371 const struct xt_table_info *private;
1373 const void *loc_cpu_entry;
1374 struct ip6t_entry *iter;
1375 #ifdef CONFIG_COMPAT
1376 struct compat_xt_counters_info compat_tmp;
1380 size = sizeof(struct compat_xt_counters_info);
1385 size = sizeof(struct xt_counters_info);
1388 if (copy_from_user(ptmp, user, size) != 0)
1391 #ifdef CONFIG_COMPAT
1393 num_counters = compat_tmp.num_counters;
1394 name = compat_tmp.name;
1398 num_counters = tmp.num_counters;
1402 if (len != size + num_counters * sizeof(struct xt_counters))
1405 paddc = vmalloc_node(len - size, numa_node_id());
1409 if (copy_from_user(paddc, user + size, len - size) != 0) {
1414 t = xt_find_table_lock(net, AF_INET6, name);
1415 if (!t || IS_ERR(t)) {
1416 ret = t ? PTR_ERR(t) : -ENOENT;
1422 private = t->private;
1423 if (private->number != num_counters) {
1425 goto unlock_up_free;
1429 /* Choose the copy that is on our node */
1430 curcpu = smp_processor_id();
1431 xt_info_wrlock(curcpu);
1432 loc_cpu_entry = private->entries[curcpu];
1433 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1434 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1437 xt_info_wrunlock(curcpu);
1449 #ifdef CONFIG_COMPAT
1450 struct compat_ip6t_replace {
1451 char name[IP6T_TABLE_MAXNAMELEN];
1455 u32 hook_entry[NF_INET_NUMHOOKS];
1456 u32 underflow[NF_INET_NUMHOOKS];
1458 compat_uptr_t counters; /* struct ip6t_counters * */
1459 struct compat_ip6t_entry entries[0];
1463 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1464 unsigned int *size, struct xt_counters *counters,
1467 struct ip6t_entry_target *t;
1468 struct compat_ip6t_entry __user *ce;
1469 u_int16_t target_offset, next_offset;
1470 compat_uint_t origsize;
1471 const struct xt_entry_match *ematch;
1475 ce = (struct compat_ip6t_entry __user *)*dstptr;
1476 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1477 copy_to_user(&ce->counters, &counters[i],
1478 sizeof(counters[i])) != 0)
1481 *dstptr += sizeof(struct compat_ip6t_entry);
1482 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1484 xt_ematch_foreach(ematch, e) {
1485 ret = xt_compat_match_to_user(ematch, dstptr, size);
1489 target_offset = e->target_offset - (origsize - *size);
1490 t = ip6t_get_target(e);
1491 ret = xt_compat_target_to_user(t, dstptr, size);
1494 next_offset = e->next_offset - (origsize - *size);
1495 if (put_user(target_offset, &ce->target_offset) != 0 ||
1496 put_user(next_offset, &ce->next_offset) != 0)
1502 compat_find_calc_match(struct ip6t_entry_match *m,
1504 const struct ip6t_ip6 *ipv6,
1505 unsigned int hookmask,
1508 struct xt_match *match;
1510 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1511 m->u.user.revision),
1512 "ip6t_%s", m->u.user.name);
1513 if (IS_ERR(match) || !match) {
1514 duprintf("compat_check_calc_match: `%s' not found\n",
1516 return match ? PTR_ERR(match) : -ENOENT;
1518 m->u.kernel.match = match;
1519 *size += xt_compat_match_offset(match);
1523 static void compat_release_entry(struct compat_ip6t_entry *e)
1525 struct ip6t_entry_target *t;
1526 struct xt_entry_match *ematch;
1528 /* Cleanup all matches */
1529 xt_ematch_foreach(ematch, e)
1530 module_put(ematch->u.kernel.match->me);
1531 t = compat_ip6t_get_target(e);
1532 module_put(t->u.kernel.target->me);
1536 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1537 struct xt_table_info *newinfo,
1539 const unsigned char *base,
1540 const unsigned char *limit,
1541 const unsigned int *hook_entries,
1542 const unsigned int *underflows,
1545 struct xt_entry_match *ematch;
1546 struct ip6t_entry_target *t;
1547 struct xt_target *target;
1548 unsigned int entry_offset;
1552 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1553 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1554 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1555 duprintf("Bad offset %p, limit = %p\n", e, limit);
1559 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1560 sizeof(struct compat_xt_entry_target)) {
1561 duprintf("checking: element %p size %u\n",
1566 /* For purposes of check_entry casting the compat entry is fine */
1567 ret = check_entry((struct ip6t_entry *)e, name);
1571 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1572 entry_offset = (void *)e - (void *)base;
1574 xt_ematch_foreach(ematch, e) {
1575 ret = compat_find_calc_match(ematch, name,
1576 &e->ipv6, e->comefrom, &off);
1578 goto release_matches;
1582 t = compat_ip6t_get_target(e);
1583 target = try_then_request_module(xt_find_target(AF_INET6,
1585 t->u.user.revision),
1586 "ip6t_%s", t->u.user.name);
1587 if (IS_ERR(target) || !target) {
1588 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1590 ret = target ? PTR_ERR(target) : -ENOENT;
1591 goto release_matches;
1593 t->u.kernel.target = target;
1595 off += xt_compat_target_offset(target);
1597 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1601 /* Check hooks & underflows */
1602 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1603 if ((unsigned char *)e - base == hook_entries[h])
1604 newinfo->hook_entry[h] = hook_entries[h];
1605 if ((unsigned char *)e - base == underflows[h])
1606 newinfo->underflow[h] = underflows[h];
1609 /* Clear counters and comefrom */
1610 memset(&e->counters, 0, sizeof(e->counters));
1615 module_put(t->u.kernel.target->me);
1617 xt_ematch_foreach(ematch, e) {
1620 module_put(ematch->u.kernel.match->me);
1626 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1627 unsigned int *size, const char *name,
1628 struct xt_table_info *newinfo, unsigned char *base)
1630 struct ip6t_entry_target *t;
1631 struct xt_target *target;
1632 struct ip6t_entry *de;
1633 unsigned int origsize;
1635 struct xt_entry_match *ematch;
1639 de = (struct ip6t_entry *)*dstptr;
1640 memcpy(de, e, sizeof(struct ip6t_entry));
1641 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1643 *dstptr += sizeof(struct ip6t_entry);
1644 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1646 xt_ematch_foreach(ematch, e) {
1647 ret = xt_compat_match_from_user(ematch, dstptr, size);
1651 de->target_offset = e->target_offset - (origsize - *size);
1652 t = compat_ip6t_get_target(e);
1653 target = t->u.kernel.target;
1654 xt_compat_target_from_user(t, dstptr, size);
1656 de->next_offset = e->next_offset - (origsize - *size);
1657 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1658 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1659 newinfo->hook_entry[h] -= origsize - *size;
1660 if ((unsigned char *)de - base < newinfo->underflow[h])
1661 newinfo->underflow[h] -= origsize - *size;
1666 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1671 struct xt_mtchk_param mtpar;
1672 struct xt_entry_match *ematch;
1677 mtpar.entryinfo = &e->ipv6;
1678 mtpar.hook_mask = e->comefrom;
1679 mtpar.family = NFPROTO_IPV6;
1680 xt_ematch_foreach(ematch, e) {
1681 ret = check_match(ematch, &mtpar);
1683 goto cleanup_matches;
1687 ret = check_target(e, net, name);
1689 goto cleanup_matches;
1693 xt_ematch_foreach(ematch, e) {
1696 cleanup_match(ematch, net);
1702 translate_compat_table(struct net *net,
1704 unsigned int valid_hooks,
1705 struct xt_table_info **pinfo,
1707 unsigned int total_size,
1708 unsigned int number,
1709 unsigned int *hook_entries,
1710 unsigned int *underflows)
1713 struct xt_table_info *newinfo, *info;
1714 void *pos, *entry0, *entry1;
1715 struct compat_ip6t_entry *iter0;
1716 struct ip6t_entry *iter1;
1723 info->number = number;
1725 /* Init all hooks to impossible value. */
1726 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1727 info->hook_entry[i] = 0xFFFFFFFF;
1728 info->underflow[i] = 0xFFFFFFFF;
1731 duprintf("translate_compat_table: size %u\n", info->size);
1733 xt_compat_lock(AF_INET6);
1734 /* Walk through entries, checking offsets. */
1735 xt_entry_foreach(iter0, entry0, total_size) {
1736 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1737 entry0, entry0 + total_size, hook_entries, underflows,
1746 duprintf("translate_compat_table: %u not %u entries\n",
1751 /* Check hooks all assigned */
1752 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1753 /* Only hooks which are valid */
1754 if (!(valid_hooks & (1 << i)))
1756 if (info->hook_entry[i] == 0xFFFFFFFF) {
1757 duprintf("Invalid hook entry %u %u\n",
1758 i, hook_entries[i]);
1761 if (info->underflow[i] == 0xFFFFFFFF) {
1762 duprintf("Invalid underflow %u %u\n",
1769 newinfo = xt_alloc_table_info(size);
1773 newinfo->number = number;
1774 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1775 newinfo->hook_entry[i] = info->hook_entry[i];
1776 newinfo->underflow[i] = info->underflow[i];
1778 entry1 = newinfo->entries[raw_smp_processor_id()];
1781 xt_entry_foreach(iter0, entry0, total_size) {
1782 ret = compat_copy_entry_from_user(iter0, &pos,
1783 &size, name, newinfo, entry1);
1787 xt_compat_flush_offsets(AF_INET6);
1788 xt_compat_unlock(AF_INET6);
1793 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1797 xt_entry_foreach(iter1, entry1, newinfo->size) {
1798 ret = compat_check_entry(iter1, net, name);
1805 * The first i matches need cleanup_entry (calls ->destroy)
1806 * because they had called ->check already. The other j-i
1807 * entries need only release.
1811 xt_entry_foreach(iter0, entry0, newinfo->size) {
1816 compat_release_entry(iter0);
1818 xt_entry_foreach(iter1, entry1, newinfo->size) {
1821 cleanup_entry(iter1, net);
1823 xt_free_table_info(newinfo);
1827 /* And one copy for every other CPU */
1828 for_each_possible_cpu(i)
1829 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1830 memcpy(newinfo->entries[i], entry1, newinfo->size);
1834 xt_free_table_info(info);
1838 xt_free_table_info(newinfo);
1840 xt_entry_foreach(iter0, entry0, total_size) {
1843 compat_release_entry(iter0);
1847 xt_compat_flush_offsets(AF_INET6);
1848 xt_compat_unlock(AF_INET6);
1853 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1856 struct compat_ip6t_replace tmp;
1857 struct xt_table_info *newinfo;
1858 void *loc_cpu_entry;
1859 struct ip6t_entry *iter;
1861 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1864 /* overflow check */
1865 if (tmp.size >= INT_MAX / num_possible_cpus())
1867 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1870 newinfo = xt_alloc_table_info(tmp.size);
1874 /* choose the copy that is on our node/cpu */
1875 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1876 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1882 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1883 &newinfo, &loc_cpu_entry, tmp.size,
1884 tmp.num_entries, tmp.hook_entry,
1889 duprintf("compat_do_replace: Translated table\n");
1891 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1892 tmp.num_counters, compat_ptr(tmp.counters));
1894 goto free_newinfo_untrans;
1897 free_newinfo_untrans:
1898 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1899 cleanup_entry(iter, net);
1901 xt_free_table_info(newinfo);
1906 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1911 if (!capable(CAP_NET_ADMIN))
1915 case IP6T_SO_SET_REPLACE:
1916 ret = compat_do_replace(sock_net(sk), user, len);
1919 case IP6T_SO_SET_ADD_COUNTERS:
1920 ret = do_add_counters(sock_net(sk), user, len, 1);
1924 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1931 struct compat_ip6t_get_entries {
1932 char name[IP6T_TABLE_MAXNAMELEN];
1934 struct compat_ip6t_entry entrytable[0];
1938 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1939 void __user *userptr)
1941 struct xt_counters *counters;
1942 const struct xt_table_info *private = table->private;
1946 const void *loc_cpu_entry;
1948 struct ip6t_entry *iter;
1950 counters = alloc_counters(table);
1951 if (IS_ERR(counters))
1952 return PTR_ERR(counters);
1954 /* choose the copy that is on our node/cpu, ...
1955 * This choice is lazy (because current thread is
1956 * allowed to migrate to another cpu)
1958 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1961 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1962 ret = compat_copy_entry_to_user(iter, &pos,
1963 &size, counters, i++);
1973 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1977 struct compat_ip6t_get_entries get;
1980 if (*len < sizeof(get)) {
1981 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1985 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1988 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1989 duprintf("compat_get_entries: %u != %zu\n",
1990 *len, sizeof(get) + get.size);
1994 xt_compat_lock(AF_INET6);
1995 t = xt_find_table_lock(net, AF_INET6, get.name);
1996 if (t && !IS_ERR(t)) {
1997 const struct xt_table_info *private = t->private;
1998 struct xt_table_info info;
1999 duprintf("t->private->number = %u\n", private->number);
2000 ret = compat_table_info(private, &info);
2001 if (!ret && get.size == info.size) {
2002 ret = compat_copy_entries_to_user(private->size,
2003 t, uptr->entrytable);
2005 duprintf("compat_get_entries: I've got %u not %u!\n",
2006 private->size, get.size);
2009 xt_compat_flush_offsets(AF_INET6);
2013 ret = t ? PTR_ERR(t) : -ENOENT;
2015 xt_compat_unlock(AF_INET6);
2019 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2022 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2026 if (!capable(CAP_NET_ADMIN))
2030 case IP6T_SO_GET_INFO:
2031 ret = get_info(sock_net(sk), user, len, 1);
2033 case IP6T_SO_GET_ENTRIES:
2034 ret = compat_get_entries(sock_net(sk), user, len);
2037 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2044 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2048 if (!capable(CAP_NET_ADMIN))
2052 case IP6T_SO_SET_REPLACE:
2053 ret = do_replace(sock_net(sk), user, len);
2056 case IP6T_SO_SET_ADD_COUNTERS:
2057 ret = do_add_counters(sock_net(sk), user, len, 0);
2061 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2069 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2073 if (!capable(CAP_NET_ADMIN))
2077 case IP6T_SO_GET_INFO:
2078 ret = get_info(sock_net(sk), user, len, 0);
2081 case IP6T_SO_GET_ENTRIES:
2082 ret = get_entries(sock_net(sk), user, len);
2085 case IP6T_SO_GET_REVISION_MATCH:
2086 case IP6T_SO_GET_REVISION_TARGET: {
2087 struct ip6t_get_revision rev;
2090 if (*len != sizeof(rev)) {
2094 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2099 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2104 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2107 "ip6t_%s", rev.name);
2112 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2119 struct xt_table *ip6t_register_table(struct net *net,
2120 const struct xt_table *table,
2121 const struct ip6t_replace *repl)
2124 struct xt_table_info *newinfo;
2125 struct xt_table_info bootstrap
2126 = { 0, 0, 0, { 0 }, { 0 }, { } };
2127 void *loc_cpu_entry;
2128 struct xt_table *new_table;
2130 newinfo = xt_alloc_table_info(repl->size);
2136 /* choose the copy on our node/cpu, but dont care about preemption */
2137 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2138 memcpy(loc_cpu_entry, repl->entries, repl->size);
2140 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2144 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2145 if (IS_ERR(new_table)) {
2146 ret = PTR_ERR(new_table);
2152 xt_free_table_info(newinfo);
2154 return ERR_PTR(ret);
2157 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2159 struct xt_table_info *private;
2160 void *loc_cpu_entry;
2161 struct module *table_owner = table->me;
2162 struct ip6t_entry *iter;
2164 private = xt_unregister_table(table);
2166 /* Decrease module usage counts and free resources */
2167 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2168 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2169 cleanup_entry(iter, net);
2170 if (private->number > private->initial_entries)
2171 module_put(table_owner);
2172 xt_free_table_info(private);
2175 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2177 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2178 u_int8_t type, u_int8_t code,
2181 return (type == test_type && code >= min_code && code <= max_code)
2186 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2188 const struct icmp6hdr *ic;
2189 struct icmp6hdr _icmph;
2190 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2192 /* Must not be a fragment. */
2193 if (par->fragoff != 0)
2196 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2198 /* We've been asked to examine this packet, and we
2199 * can't. Hence, no choice but to drop.
2201 duprintf("Dropping evil ICMP tinygram.\n");
2202 *par->hotdrop = true;
2206 return icmp6_type_code_match(icmpinfo->type,
2209 ic->icmp6_type, ic->icmp6_code,
2210 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2213 /* Called when user tries to insert an entry of this type. */
2214 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2216 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2218 /* Must specify no unknown invflags */
2219 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2222 /* The built-in targets: standard (NULL) and error. */
2223 static struct xt_target ip6t_standard_target __read_mostly = {
2224 .name = IP6T_STANDARD_TARGET,
2225 .targetsize = sizeof(int),
2226 .family = NFPROTO_IPV6,
2227 #ifdef CONFIG_COMPAT
2228 .compatsize = sizeof(compat_int_t),
2229 .compat_from_user = compat_standard_from_user,
2230 .compat_to_user = compat_standard_to_user,
2234 static struct xt_target ip6t_error_target __read_mostly = {
2235 .name = IP6T_ERROR_TARGET,
2236 .target = ip6t_error,
2237 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2238 .family = NFPROTO_IPV6,
2241 static struct nf_sockopt_ops ip6t_sockopts = {
2243 .set_optmin = IP6T_BASE_CTL,
2244 .set_optmax = IP6T_SO_SET_MAX+1,
2245 .set = do_ip6t_set_ctl,
2246 #ifdef CONFIG_COMPAT
2247 .compat_set = compat_do_ip6t_set_ctl,
2249 .get_optmin = IP6T_BASE_CTL,
2250 .get_optmax = IP6T_SO_GET_MAX+1,
2251 .get = do_ip6t_get_ctl,
2252 #ifdef CONFIG_COMPAT
2253 .compat_get = compat_do_ip6t_get_ctl,
2255 .owner = THIS_MODULE,
2258 static struct xt_match icmp6_matchstruct __read_mostly = {
2260 .match = icmp6_match,
2261 .matchsize = sizeof(struct ip6t_icmp),
2262 .checkentry = icmp6_checkentry,
2263 .proto = IPPROTO_ICMPV6,
2264 .family = NFPROTO_IPV6,
2267 static int __net_init ip6_tables_net_init(struct net *net)
2269 return xt_proto_init(net, NFPROTO_IPV6);
2272 static void __net_exit ip6_tables_net_exit(struct net *net)
2274 xt_proto_fini(net, NFPROTO_IPV6);
2277 static struct pernet_operations ip6_tables_net_ops = {
2278 .init = ip6_tables_net_init,
2279 .exit = ip6_tables_net_exit,
2282 static int __init ip6_tables_init(void)
2286 ret = register_pernet_subsys(&ip6_tables_net_ops);
2290 /* Noone else will be downing sem now, so we won't sleep */
2291 ret = xt_register_target(&ip6t_standard_target);
2294 ret = xt_register_target(&ip6t_error_target);
2297 ret = xt_register_match(&icmp6_matchstruct);
2301 /* Register setsockopt */
2302 ret = nf_register_sockopt(&ip6t_sockopts);
2306 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2310 xt_unregister_match(&icmp6_matchstruct);
2312 xt_unregister_target(&ip6t_error_target);
2314 xt_unregister_target(&ip6t_standard_target);
2316 unregister_pernet_subsys(&ip6_tables_net_ops);
2321 static void __exit ip6_tables_fini(void)
2323 nf_unregister_sockopt(&ip6t_sockopts);
2325 xt_unregister_match(&icmp6_matchstruct);
2326 xt_unregister_target(&ip6t_error_target);
2327 xt_unregister_target(&ip6t_standard_target);
2329 unregister_pernet_subsys(&ip6_tables_net_ops);
2333 * find the offset to specified header or the protocol number of last header
2334 * if target < 0. "last header" is transport protocol header, ESP, or
2337 * If target header is found, its offset is set in *offset and return protocol
2338 * number. Otherwise, return -1.
2340 * If the first fragment doesn't contain the final protocol header or
2341 * NEXTHDR_NONE it is considered invalid.
2343 * Note that non-1st fragment is special case that "the protocol number
2344 * of last header" is "next header" field in Fragment header. In this case,
2345 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2349 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2350 int target, unsigned short *fragoff)
2352 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2353 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2354 unsigned int len = skb->len - start;
2359 while (nexthdr != target) {
2360 struct ipv6_opt_hdr _hdr, *hp;
2361 unsigned int hdrlen;
2363 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2369 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2372 if (nexthdr == NEXTHDR_FRAGMENT) {
2373 unsigned short _frag_off;
2375 fp = skb_header_pointer(skb,
2376 start+offsetof(struct frag_hdr,
2383 _frag_off = ntohs(*fp) & ~0x7;
2386 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2387 hp->nexthdr == NEXTHDR_NONE)) {
2389 *fragoff = _frag_off;
2395 } else if (nexthdr == NEXTHDR_AUTH)
2396 hdrlen = (hp->hdrlen + 2) << 2;
2398 hdrlen = ipv6_optlen(hp);
2400 nexthdr = hp->nexthdr;
2409 EXPORT_SYMBOL(ip6t_register_table);
2410 EXPORT_SYMBOL(ip6t_unregister_table);
2411 EXPORT_SYMBOL(ip6t_do_table);
2412 EXPORT_SYMBOL(ip6t_ext_hdr);
2413 EXPORT_SYMBOL(ipv6_find_hdr);
2415 module_init(ip6_tables_init);
2416 module_exit(ip6_tables_fini);