2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff *skb,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
203 printk("ip6_tables: error: `%s'\n",
204 (const char *)par->targinfo);
209 /* Performance critical - called for every packet */
211 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
212 struct xt_match_param *par)
214 par->match = m->u.kernel.match;
215 par->matchinfo = m->data;
217 /* Stop iteration if it doesn't match */
218 if (!m->u.kernel.match->match(skb, par))
224 static inline struct ip6t_entry *
225 get_entry(const void *base, unsigned int offset)
227 return (struct ip6t_entry *)(base + offset);
230 /* All zeroes == unconditional rule. */
231 /* Mildly perf critical (only if packet tracing is on) */
232 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
234 static const struct ip6t_ip6 uncond;
236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
239 static inline const struct ip6t_entry_target *
240 ip6t_get_target_c(const struct ip6t_entry *e)
242 return ip6t_get_target((struct ip6t_entry *)e);
245 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
247 /* This cries for unification! */
248 static const char *const hooknames[] = {
249 [NF_INET_PRE_ROUTING] = "PREROUTING",
250 [NF_INET_LOCAL_IN] = "INPUT",
251 [NF_INET_FORWARD] = "FORWARD",
252 [NF_INET_LOCAL_OUT] = "OUTPUT",
253 [NF_INET_POST_ROUTING] = "POSTROUTING",
256 enum nf_ip_trace_comments {
257 NF_IP6_TRACE_COMMENT_RULE,
258 NF_IP6_TRACE_COMMENT_RETURN,
259 NF_IP6_TRACE_COMMENT_POLICY,
262 static const char *const comments[] = {
263 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
264 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
265 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
268 static struct nf_loginfo trace_loginfo = {
269 .type = NF_LOG_TYPE_LOG,
273 .logflags = NF_LOG_MASK,
278 /* Mildly perf critical (only if packet tracing is on) */
280 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
281 const char *hookname, const char **chainname,
282 const char **comment, unsigned int *rulenum)
284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
287 /* Head of user chain: ERROR target with chainname */
288 *chainname = t->target.data;
293 if (s->target_offset == sizeof(struct ip6t_entry) &&
294 strcmp(t->target.u.kernel.target->name,
295 IP6T_STANDARD_TARGET) == 0 &&
297 unconditional(&s->ipv6)) {
298 /* Tail of chains: STANDARD target (return/policy) */
299 *comment = *chainname == hookname
300 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
301 : comments[NF_IP6_TRACE_COMMENT_RETURN];
310 static void trace_packet(const struct sk_buff *skb,
312 const struct net_device *in,
313 const struct net_device *out,
314 const char *tablename,
315 const struct xt_table_info *private,
316 const struct ip6t_entry *e)
318 const void *table_base;
319 const struct ip6t_entry *root;
320 const char *hookname, *chainname, *comment;
321 const struct ip6t_entry *iter;
322 unsigned int rulenum = 0;
324 table_base = private->entries[smp_processor_id()];
325 root = get_entry(table_base, private->hook_entry[hook]);
327 hookname = chainname = hooknames[hook];
328 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
330 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
331 if (get_chainname_rulenum(iter, e, hookname,
332 &chainname, &comment, &rulenum) != 0)
335 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
336 "TRACE: %s:%s:%s:%u ",
337 tablename, chainname, comment, rulenum);
341 static inline __pure struct ip6t_entry *
342 ip6t_next_entry(const struct ip6t_entry *entry)
344 return (void *)entry + entry->next_offset;
347 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
349 ip6t_do_table(struct sk_buff *skb,
351 const struct net_device *in,
352 const struct net_device *out,
353 struct xt_table *table)
355 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
357 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
358 bool hotdrop = false;
359 /* Initializing verdict to NF_DROP keeps gcc happy. */
360 unsigned int verdict = NF_DROP;
361 const char *indev, *outdev;
362 const void *table_base;
363 struct ip6t_entry *e, *back;
364 const struct xt_table_info *private;
365 struct xt_match_param mtpar;
366 struct xt_target_param tgpar;
369 indev = in ? in->name : nulldevname;
370 outdev = out ? out->name : nulldevname;
371 /* We handle fragments by dealing with the first fragment as
372 * if it was a normal packet. All other fragments are treated
373 * normally, except that they will NEVER match rules that ask
374 * things we don't know, ie. tcp syn flag or ports). If the
375 * rule is also a fragment-specific rule, non-fragments won't
377 mtpar.hotdrop = &hotdrop;
378 mtpar.in = tgpar.in = in;
379 mtpar.out = tgpar.out = out;
380 mtpar.family = tgpar.family = NFPROTO_IPV6;
381 mtpar.hooknum = tgpar.hooknum = hook;
383 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
386 private = table->private;
387 table_base = private->entries[smp_processor_id()];
389 e = get_entry(table_base, private->hook_entry[hook]);
391 /* For return from builtin chain */
392 back = get_entry(table_base, private->underflow[hook]);
395 const struct ip6t_entry_target *t;
396 const struct xt_entry_match *ematch;
400 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
401 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
403 e = ip6t_next_entry(e);
407 xt_ematch_foreach(ematch, e)
408 if (do_match(ematch, skb, &mtpar) != 0)
411 ADD_COUNTER(e->counters,
412 ntohs(ipv6_hdr(skb)->payload_len) +
413 sizeof(struct ipv6hdr), 1);
415 t = ip6t_get_target_c(e);
416 IP_NF_ASSERT(t->u.kernel.target);
418 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
419 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
420 /* The packet is traced: log it */
421 if (unlikely(skb->nf_trace))
422 trace_packet(skb, hook, in, out,
423 table->name, private, e);
425 /* Standard target? */
426 if (!t->u.kernel.target->target) {
429 v = ((struct ip6t_standard_target *)t)->verdict;
431 /* Pop from stack? */
432 if (v != IP6T_RETURN) {
433 verdict = (unsigned)(-v) - 1;
437 back = get_entry(table_base, back->comefrom);
440 if (table_base + v != ip6t_next_entry(e) &&
441 !(e->ipv6.flags & IP6T_F_GOTO)) {
442 /* Save old back ptr in next entry */
443 struct ip6t_entry *next = ip6t_next_entry(e);
444 next->comefrom = (void *)back - table_base;
445 /* set back pointer to next entry */
449 e = get_entry(table_base, v);
453 /* Targets which reenter must return
455 tgpar.target = t->u.kernel.target;
456 tgpar.targinfo = t->data;
458 #ifdef CONFIG_NETFILTER_DEBUG
459 tb_comefrom = 0xeeeeeeec;
461 verdict = t->u.kernel.target->target(skb, &tgpar);
463 #ifdef CONFIG_NETFILTER_DEBUG
464 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
465 printk("Target %s reentered!\n",
466 t->u.kernel.target->name);
469 tb_comefrom = 0x57acc001;
471 if (verdict == IP6T_CONTINUE)
472 e = ip6t_next_entry(e);
478 #ifdef CONFIG_NETFILTER_DEBUG
479 tb_comefrom = NETFILTER_LINK_POISON;
481 xt_info_rdunlock_bh();
483 #ifdef DEBUG_ALLOW_ALL
494 /* Figures out from what hook each rule can be called: returns 0 if
495 there are loops. Puts hook bitmask in comefrom. */
497 mark_source_chains(const struct xt_table_info *newinfo,
498 unsigned int valid_hooks, void *entry0)
502 /* No recursion; use packet counter to save back ptrs (reset
503 to 0 as we leave), and comefrom to save source hook bitmask */
504 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
505 unsigned int pos = newinfo->hook_entry[hook];
506 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
508 if (!(valid_hooks & (1 << hook)))
511 /* Set initial back pointer. */
512 e->counters.pcnt = pos;
515 const struct ip6t_standard_target *t
516 = (void *)ip6t_get_target_c(e);
517 int visited = e->comefrom & (1 << hook);
519 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
520 printk("iptables: loop hook %u pos %u %08X.\n",
521 hook, pos, e->comefrom);
524 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
526 /* Unconditional return/END. */
527 if ((e->target_offset == sizeof(struct ip6t_entry) &&
528 (strcmp(t->target.u.user.name,
529 IP6T_STANDARD_TARGET) == 0) &&
531 unconditional(&e->ipv6)) || visited) {
532 unsigned int oldpos, size;
534 if ((strcmp(t->target.u.user.name,
535 IP6T_STANDARD_TARGET) == 0) &&
536 t->verdict < -NF_MAX_VERDICT - 1) {
537 duprintf("mark_source_chains: bad "
538 "negative verdict (%i)\n",
543 /* Return: backtrack through the last
546 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
547 #ifdef DEBUG_IP_FIREWALL_USER
549 & (1 << NF_INET_NUMHOOKS)) {
550 duprintf("Back unset "
557 pos = e->counters.pcnt;
558 e->counters.pcnt = 0;
560 /* We're at the start. */
564 e = (struct ip6t_entry *)
566 } while (oldpos == pos + e->next_offset);
569 size = e->next_offset;
570 e = (struct ip6t_entry *)
571 (entry0 + pos + size);
572 e->counters.pcnt = pos;
575 int newpos = t->verdict;
577 if (strcmp(t->target.u.user.name,
578 IP6T_STANDARD_TARGET) == 0 &&
580 if (newpos > newinfo->size -
581 sizeof(struct ip6t_entry)) {
582 duprintf("mark_source_chains: "
583 "bad verdict (%i)\n",
587 /* This a jump; chase it. */
588 duprintf("Jump rule %u -> %u\n",
591 /* ... this is a fallthru */
592 newpos = pos + e->next_offset;
594 e = (struct ip6t_entry *)
596 e->counters.pcnt = pos;
601 duprintf("Finished chain %u\n", hook);
606 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
608 struct xt_mtdtor_param par;
611 par.match = m->u.kernel.match;
612 par.matchinfo = m->data;
613 par.family = NFPROTO_IPV6;
614 if (par.match->destroy != NULL)
615 par.match->destroy(&par);
616 module_put(par.match->me);
620 check_entry(const struct ip6t_entry *e, const char *name)
622 const struct ip6t_entry_target *t;
624 if (!ip6_checkentry(&e->ipv6)) {
625 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
629 if (e->target_offset + sizeof(struct ip6t_entry_target) >
633 t = ip6t_get_target_c(e);
634 if (e->target_offset + t->u.target_size > e->next_offset)
640 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
642 const struct ip6t_ip6 *ipv6 = par->entryinfo;
645 par->match = m->u.kernel.match;
646 par->matchinfo = m->data;
648 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
649 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
651 duprintf("ip_tables: check failed for `%s'.\n",
659 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
661 struct xt_match *match;
664 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
666 "ip6t_%s", m->u.user.name);
667 if (IS_ERR(match) || !match) {
668 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
669 return match ? PTR_ERR(match) : -ENOENT;
671 m->u.kernel.match = match;
673 ret = check_match(m, par);
679 module_put(m->u.kernel.match->me);
683 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
685 struct ip6t_entry_target *t = ip6t_get_target(e);
686 struct xt_tgchk_param par = {
690 .target = t->u.kernel.target,
692 .hook_mask = e->comefrom,
693 .family = NFPROTO_IPV6,
697 t = ip6t_get_target(e);
698 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
699 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
701 duprintf("ip_tables: check failed for `%s'.\n",
702 t->u.kernel.target->name);
709 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
712 struct ip6t_entry_target *t;
713 struct xt_target *target;
716 struct xt_mtchk_param mtpar;
717 struct xt_entry_match *ematch;
719 ret = check_entry(e, name);
726 mtpar.entryinfo = &e->ipv6;
727 mtpar.hook_mask = e->comefrom;
728 mtpar.family = NFPROTO_IPV6;
729 xt_ematch_foreach(ematch, e) {
730 ret = find_check_match(ematch, &mtpar);
732 goto cleanup_matches;
736 t = ip6t_get_target(e);
737 target = try_then_request_module(xt_find_target(AF_INET6,
740 "ip6t_%s", t->u.user.name);
741 if (IS_ERR(target) || !target) {
742 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
743 ret = target ? PTR_ERR(target) : -ENOENT;
744 goto cleanup_matches;
746 t->u.kernel.target = target;
748 ret = check_target(e, net, name);
753 module_put(t->u.kernel.target->me);
755 xt_ematch_foreach(ematch, e) {
758 cleanup_match(ematch, net);
763 static bool check_underflow(const struct ip6t_entry *e)
765 const struct ip6t_entry_target *t;
766 unsigned int verdict;
768 if (!unconditional(&e->ipv6))
770 t = ip6t_get_target_c(e);
771 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
773 verdict = ((struct ip6t_standard_target *)t)->verdict;
774 verdict = -verdict - 1;
775 return verdict == NF_DROP || verdict == NF_ACCEPT;
779 check_entry_size_and_hooks(struct ip6t_entry *e,
780 struct xt_table_info *newinfo,
781 const unsigned char *base,
782 const unsigned char *limit,
783 const unsigned int *hook_entries,
784 const unsigned int *underflows,
785 unsigned int valid_hooks)
789 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
790 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
791 duprintf("Bad offset %p\n", e);
796 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
797 duprintf("checking: element %p size %u\n",
802 /* Check hooks & underflows */
803 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
804 if (!(valid_hooks & (1 << h)))
806 if ((unsigned char *)e - base == hook_entries[h])
807 newinfo->hook_entry[h] = hook_entries[h];
808 if ((unsigned char *)e - base == underflows[h]) {
809 if (!check_underflow(e)) {
810 pr_err("Underflows must be unconditional and "
811 "use the STANDARD target with "
815 newinfo->underflow[h] = underflows[h];
819 /* Clear counters and comefrom */
820 e->counters = ((struct xt_counters) { 0, 0 });
825 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
827 struct xt_tgdtor_param par;
828 struct ip6t_entry_target *t;
829 struct xt_entry_match *ematch;
831 /* Cleanup all matches */
832 xt_ematch_foreach(ematch, e)
833 cleanup_match(ematch, net);
834 t = ip6t_get_target(e);
837 par.target = t->u.kernel.target;
838 par.targinfo = t->data;
839 par.family = NFPROTO_IPV6;
840 if (par.target->destroy != NULL)
841 par.target->destroy(&par);
842 module_put(par.target->me);
845 /* Checks and translates the user-supplied table segment (held in
848 translate_table(struct net *net,
850 unsigned int valid_hooks,
851 struct xt_table_info *newinfo,
855 const unsigned int *hook_entries,
856 const unsigned int *underflows)
858 struct ip6t_entry *iter;
862 newinfo->size = size;
863 newinfo->number = number;
865 /* Init all hooks to impossible value. */
866 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
867 newinfo->hook_entry[i] = 0xFFFFFFFF;
868 newinfo->underflow[i] = 0xFFFFFFFF;
871 duprintf("translate_table: size %u\n", newinfo->size);
873 /* Walk through entries, checking offsets. */
874 xt_entry_foreach(iter, entry0, newinfo->size) {
875 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
876 entry0 + size, hook_entries, underflows, valid_hooks);
883 duprintf("translate_table: %u not %u entries\n",
888 /* Check hooks all assigned */
889 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
890 /* Only hooks which are valid */
891 if (!(valid_hooks & (1 << i)))
893 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
894 duprintf("Invalid hook entry %u %u\n",
898 if (newinfo->underflow[i] == 0xFFFFFFFF) {
899 duprintf("Invalid underflow %u %u\n",
905 if (!mark_source_chains(newinfo, valid_hooks, entry0))
908 /* Finally, each sanity check must pass */
910 xt_entry_foreach(iter, entry0, newinfo->size) {
911 ret = find_check_entry(iter, net, name, size);
918 xt_entry_foreach(iter, entry0, newinfo->size) {
921 cleanup_entry(iter, net);
926 /* And one copy for every other CPU */
927 for_each_possible_cpu(i) {
928 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
929 memcpy(newinfo->entries[i], entry0, newinfo->size);
936 get_counters(const struct xt_table_info *t,
937 struct xt_counters counters[])
939 struct ip6t_entry *iter;
944 /* Instead of clearing (by a previous call to memset())
945 * the counters and using adds, we set the counters
946 * with data used by 'current' CPU
948 * Bottom half has to be disabled to prevent deadlock
949 * if new softirq were to run and call ipt_do_table
952 curcpu = smp_processor_id();
955 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
956 SET_COUNTER(counters[i], iter->counters.bcnt,
957 iter->counters.pcnt);
961 for_each_possible_cpu(cpu) {
966 xt_entry_foreach(iter, t->entries[cpu], t->size) {
967 ADD_COUNTER(counters[i], iter->counters.bcnt,
968 iter->counters.pcnt);
971 xt_info_wrunlock(cpu);
976 static struct xt_counters *alloc_counters(const struct xt_table *table)
978 unsigned int countersize;
979 struct xt_counters *counters;
980 const struct xt_table_info *private = table->private;
982 /* We need atomic snapshot of counters: rest doesn't change
983 (other than comefrom, which userspace doesn't care
985 countersize = sizeof(struct xt_counters) * private->number;
986 counters = vmalloc_node(countersize, numa_node_id());
988 if (counters == NULL)
989 return ERR_PTR(-ENOMEM);
991 get_counters(private, counters);
997 copy_entries_to_user(unsigned int total_size,
998 const struct xt_table *table,
999 void __user *userptr)
1001 unsigned int off, num;
1002 const struct ip6t_entry *e;
1003 struct xt_counters *counters;
1004 const struct xt_table_info *private = table->private;
1006 const void *loc_cpu_entry;
1008 counters = alloc_counters(table);
1009 if (IS_ERR(counters))
1010 return PTR_ERR(counters);
1012 /* choose the copy that is on our node/cpu, ...
1013 * This choice is lazy (because current thread is
1014 * allowed to migrate to another cpu)
1016 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1017 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1022 /* FIXME: use iterator macros --RR */
1023 /* ... then go back and fix counters and names */
1024 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1026 const struct ip6t_entry_match *m;
1027 const struct ip6t_entry_target *t;
1029 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1030 if (copy_to_user(userptr + off
1031 + offsetof(struct ip6t_entry, counters),
1033 sizeof(counters[num])) != 0) {
1038 for (i = sizeof(struct ip6t_entry);
1039 i < e->target_offset;
1040 i += m->u.match_size) {
1043 if (copy_to_user(userptr + off + i
1044 + offsetof(struct ip6t_entry_match,
1046 m->u.kernel.match->name,
1047 strlen(m->u.kernel.match->name)+1)
1054 t = ip6t_get_target_c(e);
1055 if (copy_to_user(userptr + off + e->target_offset
1056 + offsetof(struct ip6t_entry_target,
1058 t->u.kernel.target->name,
1059 strlen(t->u.kernel.target->name)+1) != 0) {
1070 #ifdef CONFIG_COMPAT
1071 static void compat_standard_from_user(void *dst, const void *src)
1073 int v = *(compat_int_t *)src;
1076 v += xt_compat_calc_jump(AF_INET6, v);
1077 memcpy(dst, &v, sizeof(v));
1080 static int compat_standard_to_user(void __user *dst, const void *src)
1082 compat_int_t cv = *(int *)src;
1085 cv -= xt_compat_calc_jump(AF_INET6, cv);
1086 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1089 static int compat_calc_entry(const struct ip6t_entry *e,
1090 const struct xt_table_info *info,
1091 const void *base, struct xt_table_info *newinfo)
1093 const struct xt_entry_match *ematch;
1094 const struct ip6t_entry_target *t;
1095 unsigned int entry_offset;
1098 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1099 entry_offset = (void *)e - base;
1100 xt_ematch_foreach(ematch, e)
1101 off += xt_compat_match_offset(ematch->u.kernel.match);
1102 t = ip6t_get_target_c(e);
1103 off += xt_compat_target_offset(t->u.kernel.target);
1104 newinfo->size -= off;
1105 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1109 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1110 if (info->hook_entry[i] &&
1111 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1112 newinfo->hook_entry[i] -= off;
1113 if (info->underflow[i] &&
1114 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1115 newinfo->underflow[i] -= off;
1120 static int compat_table_info(const struct xt_table_info *info,
1121 struct xt_table_info *newinfo)
1123 struct ip6t_entry *iter;
1124 void *loc_cpu_entry;
1127 if (!newinfo || !info)
1130 /* we dont care about newinfo->entries[] */
1131 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1132 newinfo->initial_entries = 0;
1133 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1134 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1135 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1143 static int get_info(struct net *net, void __user *user,
1144 const int *len, int compat)
1146 char name[IP6T_TABLE_MAXNAMELEN];
1150 if (*len != sizeof(struct ip6t_getinfo)) {
1151 duprintf("length %u != %zu\n", *len,
1152 sizeof(struct ip6t_getinfo));
1156 if (copy_from_user(name, user, sizeof(name)) != 0)
1159 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1160 #ifdef CONFIG_COMPAT
1162 xt_compat_lock(AF_INET6);
1164 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1165 "ip6table_%s", name);
1166 if (t && !IS_ERR(t)) {
1167 struct ip6t_getinfo info;
1168 const struct xt_table_info *private = t->private;
1169 #ifdef CONFIG_COMPAT
1170 struct xt_table_info tmp;
1173 ret = compat_table_info(private, &tmp);
1174 xt_compat_flush_offsets(AF_INET6);
1178 info.valid_hooks = t->valid_hooks;
1179 memcpy(info.hook_entry, private->hook_entry,
1180 sizeof(info.hook_entry));
1181 memcpy(info.underflow, private->underflow,
1182 sizeof(info.underflow));
1183 info.num_entries = private->number;
1184 info.size = private->size;
1185 strcpy(info.name, name);
1187 if (copy_to_user(user, &info, *len) != 0)
1195 ret = t ? PTR_ERR(t) : -ENOENT;
1196 #ifdef CONFIG_COMPAT
1198 xt_compat_unlock(AF_INET6);
1204 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1208 struct ip6t_get_entries get;
1211 if (*len < sizeof(get)) {
1212 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1215 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1217 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1218 duprintf("get_entries: %u != %zu\n",
1219 *len, sizeof(get) + get.size);
1223 t = xt_find_table_lock(net, AF_INET6, get.name);
1224 if (t && !IS_ERR(t)) {
1225 struct xt_table_info *private = t->private;
1226 duprintf("t->private->number = %u\n", private->number);
1227 if (get.size == private->size)
1228 ret = copy_entries_to_user(private->size,
1229 t, uptr->entrytable);
1231 duprintf("get_entries: I've got %u not %u!\n",
1232 private->size, get.size);
1238 ret = t ? PTR_ERR(t) : -ENOENT;
1244 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1245 struct xt_table_info *newinfo, unsigned int num_counters,
1246 void __user *counters_ptr)
1250 struct xt_table_info *oldinfo;
1251 struct xt_counters *counters;
1252 const void *loc_cpu_old_entry;
1253 struct ip6t_entry *iter;
1256 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1263 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1264 "ip6table_%s", name);
1265 if (!t || IS_ERR(t)) {
1266 ret = t ? PTR_ERR(t) : -ENOENT;
1267 goto free_newinfo_counters_untrans;
1271 if (valid_hooks != t->valid_hooks) {
1272 duprintf("Valid hook crap: %08X vs %08X\n",
1273 valid_hooks, t->valid_hooks);
1278 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1282 /* Update module usage count based on number of rules */
1283 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1284 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1285 if ((oldinfo->number > oldinfo->initial_entries) ||
1286 (newinfo->number <= oldinfo->initial_entries))
1288 if ((oldinfo->number > oldinfo->initial_entries) &&
1289 (newinfo->number <= oldinfo->initial_entries))
1292 /* Get the old counters, and synchronize with replace */
1293 get_counters(oldinfo, counters);
1295 /* Decrease module usage counts and free resource */
1296 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1297 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1298 cleanup_entry(iter, net);
1300 xt_free_table_info(oldinfo);
1301 if (copy_to_user(counters_ptr, counters,
1302 sizeof(struct xt_counters) * num_counters) != 0)
1311 free_newinfo_counters_untrans:
1318 do_replace(struct net *net, const void __user *user, unsigned int len)
1321 struct ip6t_replace tmp;
1322 struct xt_table_info *newinfo;
1323 void *loc_cpu_entry;
1324 struct ip6t_entry *iter;
1326 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1329 /* overflow check */
1330 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1333 newinfo = xt_alloc_table_info(tmp.size);
1337 /* choose the copy that is on our node/cpu */
1338 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1339 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1345 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1346 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1347 tmp.hook_entry, tmp.underflow);
1351 duprintf("ip_tables: Translated table\n");
1353 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1354 tmp.num_counters, tmp.counters);
1356 goto free_newinfo_untrans;
1359 free_newinfo_untrans:
1360 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1361 cleanup_entry(iter, net);
1363 xt_free_table_info(newinfo);
1368 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1371 unsigned int i, curcpu;
1372 struct xt_counters_info tmp;
1373 struct xt_counters *paddc;
1374 unsigned int num_counters;
1379 const struct xt_table_info *private;
1381 const void *loc_cpu_entry;
1382 struct ip6t_entry *iter;
1383 #ifdef CONFIG_COMPAT
1384 struct compat_xt_counters_info compat_tmp;
1388 size = sizeof(struct compat_xt_counters_info);
1393 size = sizeof(struct xt_counters_info);
1396 if (copy_from_user(ptmp, user, size) != 0)
1399 #ifdef CONFIG_COMPAT
1401 num_counters = compat_tmp.num_counters;
1402 name = compat_tmp.name;
1406 num_counters = tmp.num_counters;
1410 if (len != size + num_counters * sizeof(struct xt_counters))
1413 paddc = vmalloc_node(len - size, numa_node_id());
1417 if (copy_from_user(paddc, user + size, len - size) != 0) {
1422 t = xt_find_table_lock(net, AF_INET6, name);
1423 if (!t || IS_ERR(t)) {
1424 ret = t ? PTR_ERR(t) : -ENOENT;
1430 private = t->private;
1431 if (private->number != num_counters) {
1433 goto unlock_up_free;
1437 /* Choose the copy that is on our node */
1438 curcpu = smp_processor_id();
1439 xt_info_wrlock(curcpu);
1440 loc_cpu_entry = private->entries[curcpu];
1441 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1442 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1445 xt_info_wrunlock(curcpu);
1457 #ifdef CONFIG_COMPAT
1458 struct compat_ip6t_replace {
1459 char name[IP6T_TABLE_MAXNAMELEN];
1463 u32 hook_entry[NF_INET_NUMHOOKS];
1464 u32 underflow[NF_INET_NUMHOOKS];
1466 compat_uptr_t counters; /* struct ip6t_counters * */
1467 struct compat_ip6t_entry entries[0];
1471 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1472 unsigned int *size, struct xt_counters *counters,
1475 struct ip6t_entry_target *t;
1476 struct compat_ip6t_entry __user *ce;
1477 u_int16_t target_offset, next_offset;
1478 compat_uint_t origsize;
1479 const struct xt_entry_match *ematch;
1483 ce = (struct compat_ip6t_entry __user *)*dstptr;
1484 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1485 copy_to_user(&ce->counters, &counters[i],
1486 sizeof(counters[i])) != 0)
1489 *dstptr += sizeof(struct compat_ip6t_entry);
1490 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1492 xt_ematch_foreach(ematch, e) {
1493 ret = xt_compat_match_to_user(ematch, dstptr, size);
1497 target_offset = e->target_offset - (origsize - *size);
1498 t = ip6t_get_target(e);
1499 ret = xt_compat_target_to_user(t, dstptr, size);
1502 next_offset = e->next_offset - (origsize - *size);
1503 if (put_user(target_offset, &ce->target_offset) != 0 ||
1504 put_user(next_offset, &ce->next_offset) != 0)
1510 compat_find_calc_match(struct ip6t_entry_match *m,
1512 const struct ip6t_ip6 *ipv6,
1513 unsigned int hookmask,
1516 struct xt_match *match;
1518 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1519 m->u.user.revision),
1520 "ip6t_%s", m->u.user.name);
1521 if (IS_ERR(match) || !match) {
1522 duprintf("compat_check_calc_match: `%s' not found\n",
1524 return match ? PTR_ERR(match) : -ENOENT;
1526 m->u.kernel.match = match;
1527 *size += xt_compat_match_offset(match);
1531 static void compat_release_entry(struct compat_ip6t_entry *e)
1533 struct ip6t_entry_target *t;
1534 struct xt_entry_match *ematch;
1536 /* Cleanup all matches */
1537 xt_ematch_foreach(ematch, e)
1538 module_put(ematch->u.kernel.match->me);
1539 t = compat_ip6t_get_target(e);
1540 module_put(t->u.kernel.target->me);
1544 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1545 struct xt_table_info *newinfo,
1547 const unsigned char *base,
1548 const unsigned char *limit,
1549 const unsigned int *hook_entries,
1550 const unsigned int *underflows,
1553 struct xt_entry_match *ematch;
1554 struct ip6t_entry_target *t;
1555 struct xt_target *target;
1556 unsigned int entry_offset;
1560 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1561 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1562 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1563 duprintf("Bad offset %p, limit = %p\n", e, limit);
1567 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1568 sizeof(struct compat_xt_entry_target)) {
1569 duprintf("checking: element %p size %u\n",
1574 /* For purposes of check_entry casting the compat entry is fine */
1575 ret = check_entry((struct ip6t_entry *)e, name);
1579 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1580 entry_offset = (void *)e - (void *)base;
1582 xt_ematch_foreach(ematch, e) {
1583 ret = compat_find_calc_match(ematch, name,
1584 &e->ipv6, e->comefrom, &off);
1586 goto release_matches;
1590 t = compat_ip6t_get_target(e);
1591 target = try_then_request_module(xt_find_target(AF_INET6,
1593 t->u.user.revision),
1594 "ip6t_%s", t->u.user.name);
1595 if (IS_ERR(target) || !target) {
1596 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1598 ret = target ? PTR_ERR(target) : -ENOENT;
1599 goto release_matches;
1601 t->u.kernel.target = target;
1603 off += xt_compat_target_offset(target);
1605 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1609 /* Check hooks & underflows */
1610 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1611 if ((unsigned char *)e - base == hook_entries[h])
1612 newinfo->hook_entry[h] = hook_entries[h];
1613 if ((unsigned char *)e - base == underflows[h])
1614 newinfo->underflow[h] = underflows[h];
1617 /* Clear counters and comefrom */
1618 memset(&e->counters, 0, sizeof(e->counters));
1623 module_put(t->u.kernel.target->me);
1625 xt_ematch_foreach(ematch, e) {
1628 module_put(ematch->u.kernel.match->me);
1634 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1635 unsigned int *size, const char *name,
1636 struct xt_table_info *newinfo, unsigned char *base)
1638 struct ip6t_entry_target *t;
1639 struct xt_target *target;
1640 struct ip6t_entry *de;
1641 unsigned int origsize;
1643 struct xt_entry_match *ematch;
1647 de = (struct ip6t_entry *)*dstptr;
1648 memcpy(de, e, sizeof(struct ip6t_entry));
1649 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1651 *dstptr += sizeof(struct ip6t_entry);
1652 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1654 xt_ematch_foreach(ematch, e) {
1655 ret = xt_compat_match_from_user(ematch, dstptr, size);
1659 de->target_offset = e->target_offset - (origsize - *size);
1660 t = compat_ip6t_get_target(e);
1661 target = t->u.kernel.target;
1662 xt_compat_target_from_user(t, dstptr, size);
1664 de->next_offset = e->next_offset - (origsize - *size);
1665 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1666 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1667 newinfo->hook_entry[h] -= origsize - *size;
1668 if ((unsigned char *)de - base < newinfo->underflow[h])
1669 newinfo->underflow[h] -= origsize - *size;
1674 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1679 struct xt_mtchk_param mtpar;
1680 struct xt_entry_match *ematch;
1685 mtpar.entryinfo = &e->ipv6;
1686 mtpar.hook_mask = e->comefrom;
1687 mtpar.family = NFPROTO_IPV6;
1688 xt_ematch_foreach(ematch, e) {
1689 ret = check_match(ematch, &mtpar);
1691 goto cleanup_matches;
1695 ret = check_target(e, net, name);
1697 goto cleanup_matches;
1701 xt_ematch_foreach(ematch, e) {
1704 cleanup_match(ematch, net);
1710 translate_compat_table(struct net *net,
1712 unsigned int valid_hooks,
1713 struct xt_table_info **pinfo,
1715 unsigned int total_size,
1716 unsigned int number,
1717 unsigned int *hook_entries,
1718 unsigned int *underflows)
1721 struct xt_table_info *newinfo, *info;
1722 void *pos, *entry0, *entry1;
1723 struct compat_ip6t_entry *iter0;
1724 struct ip6t_entry *iter1;
1731 info->number = number;
1733 /* Init all hooks to impossible value. */
1734 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1735 info->hook_entry[i] = 0xFFFFFFFF;
1736 info->underflow[i] = 0xFFFFFFFF;
1739 duprintf("translate_compat_table: size %u\n", info->size);
1741 xt_compat_lock(AF_INET6);
1742 /* Walk through entries, checking offsets. */
1743 xt_entry_foreach(iter0, entry0, total_size) {
1744 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1745 entry0, entry0 + total_size, hook_entries, underflows,
1754 duprintf("translate_compat_table: %u not %u entries\n",
1759 /* Check hooks all assigned */
1760 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1761 /* Only hooks which are valid */
1762 if (!(valid_hooks & (1 << i)))
1764 if (info->hook_entry[i] == 0xFFFFFFFF) {
1765 duprintf("Invalid hook entry %u %u\n",
1766 i, hook_entries[i]);
1769 if (info->underflow[i] == 0xFFFFFFFF) {
1770 duprintf("Invalid underflow %u %u\n",
1777 newinfo = xt_alloc_table_info(size);
1781 newinfo->number = number;
1782 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1783 newinfo->hook_entry[i] = info->hook_entry[i];
1784 newinfo->underflow[i] = info->underflow[i];
1786 entry1 = newinfo->entries[raw_smp_processor_id()];
1789 xt_entry_foreach(iter0, entry0, total_size) {
1790 ret = compat_copy_entry_from_user(iter0, &pos,
1791 &size, name, newinfo, entry1);
1795 xt_compat_flush_offsets(AF_INET6);
1796 xt_compat_unlock(AF_INET6);
1801 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1805 xt_entry_foreach(iter1, entry1, newinfo->size) {
1806 ret = compat_check_entry(iter1, net, name);
1813 * The first i matches need cleanup_entry (calls ->destroy)
1814 * because they had called ->check already. The other j-i
1815 * entries need only release.
1819 xt_entry_foreach(iter0, entry0, newinfo->size) {
1824 compat_release_entry(iter0);
1826 xt_entry_foreach(iter1, entry1, newinfo->size) {
1829 cleanup_entry(iter1, net);
1831 xt_free_table_info(newinfo);
1835 /* And one copy for every other CPU */
1836 for_each_possible_cpu(i)
1837 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1838 memcpy(newinfo->entries[i], entry1, newinfo->size);
1842 xt_free_table_info(info);
1846 xt_free_table_info(newinfo);
1848 xt_entry_foreach(iter0, entry0, total_size) {
1851 compat_release_entry(iter0);
1855 xt_compat_flush_offsets(AF_INET6);
1856 xt_compat_unlock(AF_INET6);
1861 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1864 struct compat_ip6t_replace tmp;
1865 struct xt_table_info *newinfo;
1866 void *loc_cpu_entry;
1867 struct ip6t_entry *iter;
1869 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1872 /* overflow check */
1873 if (tmp.size >= INT_MAX / num_possible_cpus())
1875 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1878 newinfo = xt_alloc_table_info(tmp.size);
1882 /* choose the copy that is on our node/cpu */
1883 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1884 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1890 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1891 &newinfo, &loc_cpu_entry, tmp.size,
1892 tmp.num_entries, tmp.hook_entry,
1897 duprintf("compat_do_replace: Translated table\n");
1899 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1900 tmp.num_counters, compat_ptr(tmp.counters));
1902 goto free_newinfo_untrans;
1905 free_newinfo_untrans:
1906 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1907 cleanup_entry(iter, net);
1909 xt_free_table_info(newinfo);
1914 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1919 if (!capable(CAP_NET_ADMIN))
1923 case IP6T_SO_SET_REPLACE:
1924 ret = compat_do_replace(sock_net(sk), user, len);
1927 case IP6T_SO_SET_ADD_COUNTERS:
1928 ret = do_add_counters(sock_net(sk), user, len, 1);
1932 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1939 struct compat_ip6t_get_entries {
1940 char name[IP6T_TABLE_MAXNAMELEN];
1942 struct compat_ip6t_entry entrytable[0];
1946 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1947 void __user *userptr)
1949 struct xt_counters *counters;
1950 const struct xt_table_info *private = table->private;
1954 const void *loc_cpu_entry;
1956 struct ip6t_entry *iter;
1958 counters = alloc_counters(table);
1959 if (IS_ERR(counters))
1960 return PTR_ERR(counters);
1962 /* choose the copy that is on our node/cpu, ...
1963 * This choice is lazy (because current thread is
1964 * allowed to migrate to another cpu)
1966 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1969 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1970 ret = compat_copy_entry_to_user(iter, &pos,
1971 &size, counters, i++);
1981 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1985 struct compat_ip6t_get_entries get;
1988 if (*len < sizeof(get)) {
1989 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1993 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1996 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1997 duprintf("compat_get_entries: %u != %zu\n",
1998 *len, sizeof(get) + get.size);
2002 xt_compat_lock(AF_INET6);
2003 t = xt_find_table_lock(net, AF_INET6, get.name);
2004 if (t && !IS_ERR(t)) {
2005 const struct xt_table_info *private = t->private;
2006 struct xt_table_info info;
2007 duprintf("t->private->number = %u\n", private->number);
2008 ret = compat_table_info(private, &info);
2009 if (!ret && get.size == info.size) {
2010 ret = compat_copy_entries_to_user(private->size,
2011 t, uptr->entrytable);
2013 duprintf("compat_get_entries: I've got %u not %u!\n",
2014 private->size, get.size);
2017 xt_compat_flush_offsets(AF_INET6);
2021 ret = t ? PTR_ERR(t) : -ENOENT;
2023 xt_compat_unlock(AF_INET6);
2027 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2030 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2034 if (!capable(CAP_NET_ADMIN))
2038 case IP6T_SO_GET_INFO:
2039 ret = get_info(sock_net(sk), user, len, 1);
2041 case IP6T_SO_GET_ENTRIES:
2042 ret = compat_get_entries(sock_net(sk), user, len);
2045 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2052 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2056 if (!capable(CAP_NET_ADMIN))
2060 case IP6T_SO_SET_REPLACE:
2061 ret = do_replace(sock_net(sk), user, len);
2064 case IP6T_SO_SET_ADD_COUNTERS:
2065 ret = do_add_counters(sock_net(sk), user, len, 0);
2069 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2077 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2081 if (!capable(CAP_NET_ADMIN))
2085 case IP6T_SO_GET_INFO:
2086 ret = get_info(sock_net(sk), user, len, 0);
2089 case IP6T_SO_GET_ENTRIES:
2090 ret = get_entries(sock_net(sk), user, len);
2093 case IP6T_SO_GET_REVISION_MATCH:
2094 case IP6T_SO_GET_REVISION_TARGET: {
2095 struct ip6t_get_revision rev;
2098 if (*len != sizeof(rev)) {
2102 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2107 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2112 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2115 "ip6t_%s", rev.name);
2120 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2127 struct xt_table *ip6t_register_table(struct net *net,
2128 const struct xt_table *table,
2129 const struct ip6t_replace *repl)
2132 struct xt_table_info *newinfo;
2133 struct xt_table_info bootstrap
2134 = { 0, 0, 0, { 0 }, { 0 }, { } };
2135 void *loc_cpu_entry;
2136 struct xt_table *new_table;
2138 newinfo = xt_alloc_table_info(repl->size);
2144 /* choose the copy on our node/cpu, but dont care about preemption */
2145 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2146 memcpy(loc_cpu_entry, repl->entries, repl->size);
2148 ret = translate_table(net, table->name, table->valid_hooks,
2149 newinfo, loc_cpu_entry, repl->size,
2156 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2157 if (IS_ERR(new_table)) {
2158 ret = PTR_ERR(new_table);
2164 xt_free_table_info(newinfo);
2166 return ERR_PTR(ret);
2169 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2171 struct xt_table_info *private;
2172 void *loc_cpu_entry;
2173 struct module *table_owner = table->me;
2174 struct ip6t_entry *iter;
2176 private = xt_unregister_table(table);
2178 /* Decrease module usage counts and free resources */
2179 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2180 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2181 cleanup_entry(iter, net);
2182 if (private->number > private->initial_entries)
2183 module_put(table_owner);
2184 xt_free_table_info(private);
2187 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2189 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2190 u_int8_t type, u_int8_t code,
2193 return (type == test_type && code >= min_code && code <= max_code)
2198 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2200 const struct icmp6hdr *ic;
2201 struct icmp6hdr _icmph;
2202 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2204 /* Must not be a fragment. */
2205 if (par->fragoff != 0)
2208 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2210 /* We've been asked to examine this packet, and we
2211 * can't. Hence, no choice but to drop.
2213 duprintf("Dropping evil ICMP tinygram.\n");
2214 *par->hotdrop = true;
2218 return icmp6_type_code_match(icmpinfo->type,
2221 ic->icmp6_type, ic->icmp6_code,
2222 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2225 /* Called when user tries to insert an entry of this type. */
2226 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2228 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2230 /* Must specify no unknown invflags */
2231 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2234 /* The built-in targets: standard (NULL) and error. */
2235 static struct xt_target ip6t_standard_target __read_mostly = {
2236 .name = IP6T_STANDARD_TARGET,
2237 .targetsize = sizeof(int),
2238 .family = NFPROTO_IPV6,
2239 #ifdef CONFIG_COMPAT
2240 .compatsize = sizeof(compat_int_t),
2241 .compat_from_user = compat_standard_from_user,
2242 .compat_to_user = compat_standard_to_user,
2246 static struct xt_target ip6t_error_target __read_mostly = {
2247 .name = IP6T_ERROR_TARGET,
2248 .target = ip6t_error,
2249 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2250 .family = NFPROTO_IPV6,
2253 static struct nf_sockopt_ops ip6t_sockopts = {
2255 .set_optmin = IP6T_BASE_CTL,
2256 .set_optmax = IP6T_SO_SET_MAX+1,
2257 .set = do_ip6t_set_ctl,
2258 #ifdef CONFIG_COMPAT
2259 .compat_set = compat_do_ip6t_set_ctl,
2261 .get_optmin = IP6T_BASE_CTL,
2262 .get_optmax = IP6T_SO_GET_MAX+1,
2263 .get = do_ip6t_get_ctl,
2264 #ifdef CONFIG_COMPAT
2265 .compat_get = compat_do_ip6t_get_ctl,
2267 .owner = THIS_MODULE,
2270 static struct xt_match icmp6_matchstruct __read_mostly = {
2272 .match = icmp6_match,
2273 .matchsize = sizeof(struct ip6t_icmp),
2274 .checkentry = icmp6_checkentry,
2275 .proto = IPPROTO_ICMPV6,
2276 .family = NFPROTO_IPV6,
2279 static int __net_init ip6_tables_net_init(struct net *net)
2281 return xt_proto_init(net, NFPROTO_IPV6);
2284 static void __net_exit ip6_tables_net_exit(struct net *net)
2286 xt_proto_fini(net, NFPROTO_IPV6);
2289 static struct pernet_operations ip6_tables_net_ops = {
2290 .init = ip6_tables_net_init,
2291 .exit = ip6_tables_net_exit,
2294 static int __init ip6_tables_init(void)
2298 ret = register_pernet_subsys(&ip6_tables_net_ops);
2302 /* Noone else will be downing sem now, so we won't sleep */
2303 ret = xt_register_target(&ip6t_standard_target);
2306 ret = xt_register_target(&ip6t_error_target);
2309 ret = xt_register_match(&icmp6_matchstruct);
2313 /* Register setsockopt */
2314 ret = nf_register_sockopt(&ip6t_sockopts);
2318 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2322 xt_unregister_match(&icmp6_matchstruct);
2324 xt_unregister_target(&ip6t_error_target);
2326 xt_unregister_target(&ip6t_standard_target);
2328 unregister_pernet_subsys(&ip6_tables_net_ops);
2333 static void __exit ip6_tables_fini(void)
2335 nf_unregister_sockopt(&ip6t_sockopts);
2337 xt_unregister_match(&icmp6_matchstruct);
2338 xt_unregister_target(&ip6t_error_target);
2339 xt_unregister_target(&ip6t_standard_target);
2341 unregister_pernet_subsys(&ip6_tables_net_ops);
2345 * find the offset to specified header or the protocol number of last header
2346 * if target < 0. "last header" is transport protocol header, ESP, or
2349 * If target header is found, its offset is set in *offset and return protocol
2350 * number. Otherwise, return -1.
2352 * If the first fragment doesn't contain the final protocol header or
2353 * NEXTHDR_NONE it is considered invalid.
2355 * Note that non-1st fragment is special case that "the protocol number
2356 * of last header" is "next header" field in Fragment header. In this case,
2357 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2361 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2362 int target, unsigned short *fragoff)
2364 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2365 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2366 unsigned int len = skb->len - start;
2371 while (nexthdr != target) {
2372 struct ipv6_opt_hdr _hdr, *hp;
2373 unsigned int hdrlen;
2375 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2381 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2384 if (nexthdr == NEXTHDR_FRAGMENT) {
2385 unsigned short _frag_off;
2387 fp = skb_header_pointer(skb,
2388 start+offsetof(struct frag_hdr,
2395 _frag_off = ntohs(*fp) & ~0x7;
2398 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2399 hp->nexthdr == NEXTHDR_NONE)) {
2401 *fragoff = _frag_off;
2407 } else if (nexthdr == NEXTHDR_AUTH)
2408 hdrlen = (hp->hdrlen + 2) << 2;
2410 hdrlen = ipv6_optlen(hp);
2412 nexthdr = hp->nexthdr;
2421 EXPORT_SYMBOL(ip6t_register_table);
2422 EXPORT_SYMBOL(ip6t_unregister_table);
2423 EXPORT_SYMBOL(ip6t_do_table);
2424 EXPORT_SYMBOL(ip6t_ext_hdr);
2425 EXPORT_SYMBOL(ipv6_find_hdr);
2427 module_init(ip6_tables_init);
2428 module_exit(ip6_tables_fini);