2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
175 /* should be ip6 safe */
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
202 /* Performance critical - called for every packet */
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
225 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
227 static const struct ip6t_ip6 uncond;
229 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
232 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
233 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
234 /* This cries for unification! */
235 static const char *const hooknames[] = {
236 [NF_INET_PRE_ROUTING] = "PREROUTING",
237 [NF_INET_LOCAL_IN] = "INPUT",
238 [NF_INET_FORWARD] = "FORWARD",
239 [NF_INET_LOCAL_OUT] = "OUTPUT",
240 [NF_INET_POST_ROUTING] = "POSTROUTING",
243 enum nf_ip_trace_comments {
244 NF_IP6_TRACE_COMMENT_RULE,
245 NF_IP6_TRACE_COMMENT_RETURN,
246 NF_IP6_TRACE_COMMENT_POLICY,
249 static const char *const comments[] = {
250 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
251 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
252 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
255 static struct nf_loginfo trace_loginfo = {
256 .type = NF_LOG_TYPE_LOG,
260 .logflags = NF_LOG_MASK,
265 /* Mildly perf critical (only if packet tracing is on) */
267 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
268 const char *hookname, const char **chainname,
269 const char **comment, unsigned int *rulenum)
271 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
273 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
274 /* Head of user chain: ERROR target with chainname */
275 *chainname = t->target.data;
280 if (s->target_offset == sizeof(struct ip6t_entry)
281 && strcmp(t->target.u.kernel.target->name,
282 IP6T_STANDARD_TARGET) == 0
284 && unconditional(&s->ipv6)) {
285 /* Tail of chains: STANDARD target (return/policy) */
286 *comment = *chainname == hookname
287 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
288 : comments[NF_IP6_TRACE_COMMENT_RETURN];
297 static void trace_packet(struct sk_buff *skb,
299 const struct net_device *in,
300 const struct net_device *out,
301 const char *tablename,
302 struct xt_table_info *private,
303 struct ip6t_entry *e)
306 const struct ip6t_entry *root;
307 const char *hookname, *chainname, *comment;
308 unsigned int rulenum = 0;
310 table_base = private->entries[smp_processor_id()];
311 root = get_entry(table_base, private->hook_entry[hook]);
313 hookname = chainname = hooknames[hook];
314 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
316 IP6T_ENTRY_ITERATE(root,
317 private->size - private->hook_entry[hook],
318 get_chainname_rulenum,
319 e, hookname, &chainname, &comment, &rulenum);
321 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
322 "TRACE: %s:%s:%s:%u ",
323 tablename, chainname, comment, rulenum);
327 static inline __pure struct ip6t_entry *
328 ip6t_next_entry(const struct ip6t_entry *entry)
330 return (void *)entry + entry->next_offset;
333 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
335 ip6t_do_table(struct sk_buff *skb,
337 const struct net_device *in,
338 const struct net_device *out,
339 struct xt_table *table)
341 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
343 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
344 bool hotdrop = false;
345 /* Initializing verdict to NF_DROP keeps gcc happy. */
346 unsigned int verdict = NF_DROP;
347 const char *indev, *outdev;
349 struct ip6t_entry *e, *back;
350 struct xt_table_info *private;
351 struct xt_match_param mtpar;
352 struct xt_target_param tgpar;
355 indev = in ? in->name : nulldevname;
356 outdev = out ? out->name : nulldevname;
357 /* We handle fragments by dealing with the first fragment as
358 * if it was a normal packet. All other fragments are treated
359 * normally, except that they will NEVER match rules that ask
360 * things we don't know, ie. tcp syn flag or ports). If the
361 * rule is also a fragment-specific rule, non-fragments won't
363 mtpar.hotdrop = &hotdrop;
364 mtpar.in = tgpar.in = in;
365 mtpar.out = tgpar.out = out;
366 mtpar.family = tgpar.family = NFPROTO_IPV6;
367 mtpar.hooknum = tgpar.hooknum = hook;
369 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
372 private = table->private;
373 table_base = private->entries[smp_processor_id()];
375 e = get_entry(table_base, private->hook_entry[hook]);
377 /* For return from builtin chain */
378 back = get_entry(table_base, private->underflow[hook]);
381 struct ip6t_entry_target *t;
385 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
386 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
387 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
388 e = ip6t_next_entry(e);
392 ADD_COUNTER(e->counters,
393 ntohs(ipv6_hdr(skb)->payload_len) +
394 sizeof(struct ipv6hdr), 1);
396 t = ip6t_get_target(e);
397 IP_NF_ASSERT(t->u.kernel.target);
399 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
400 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
401 /* The packet is traced: log it */
402 if (unlikely(skb->nf_trace))
403 trace_packet(skb, hook, in, out,
404 table->name, private, e);
406 /* Standard target? */
407 if (!t->u.kernel.target->target) {
410 v = ((struct ip6t_standard_target *)t)->verdict;
412 /* Pop from stack? */
413 if (v != IP6T_RETURN) {
414 verdict = (unsigned)(-v) - 1;
418 back = get_entry(table_base, back->comefrom);
421 if (table_base + v != ip6t_next_entry(e)
422 && !(e->ipv6.flags & IP6T_F_GOTO)) {
423 /* Save old back ptr in next entry */
424 struct ip6t_entry *next = ip6t_next_entry(e);
425 next->comefrom = (void *)back - table_base;
426 /* set back pointer to next entry */
430 e = get_entry(table_base, v);
434 /* Targets which reenter must return
436 tgpar.target = t->u.kernel.target;
437 tgpar.targinfo = t->data;
439 #ifdef CONFIG_NETFILTER_DEBUG
440 tb_comefrom = 0xeeeeeeec;
442 verdict = t->u.kernel.target->target(skb, &tgpar);
444 #ifdef CONFIG_NETFILTER_DEBUG
445 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
450 tb_comefrom = 0x57acc001;
452 if (verdict == IP6T_CONTINUE)
453 e = ip6t_next_entry(e);
459 #ifdef CONFIG_NETFILTER_DEBUG
460 tb_comefrom = NETFILTER_LINK_POISON;
462 xt_info_rdunlock_bh();
464 #ifdef DEBUG_ALLOW_ALL
475 /* Figures out from what hook each rule can be called: returns 0 if
476 there are loops. Puts hook bitmask in comefrom. */
478 mark_source_chains(struct xt_table_info *newinfo,
479 unsigned int valid_hooks, void *entry0)
483 /* No recursion; use packet counter to save back ptrs (reset
484 to 0 as we leave), and comefrom to save source hook bitmask */
485 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
486 unsigned int pos = newinfo->hook_entry[hook];
487 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
489 if (!(valid_hooks & (1 << hook)))
492 /* Set initial back pointer. */
493 e->counters.pcnt = pos;
496 struct ip6t_standard_target *t
497 = (void *)ip6t_get_target(e);
498 int visited = e->comefrom & (1 << hook);
500 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
501 printk("iptables: loop hook %u pos %u %08X.\n",
502 hook, pos, e->comefrom);
505 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
507 /* Unconditional return/END. */
508 if ((e->target_offset == sizeof(struct ip6t_entry)
509 && (strcmp(t->target.u.user.name,
510 IP6T_STANDARD_TARGET) == 0)
512 && unconditional(&e->ipv6)) || visited) {
513 unsigned int oldpos, size;
515 if ((strcmp(t->target.u.user.name,
516 IP6T_STANDARD_TARGET) == 0) &&
517 t->verdict < -NF_MAX_VERDICT - 1) {
518 duprintf("mark_source_chains: bad "
519 "negative verdict (%i)\n",
524 /* Return: backtrack through the last
527 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
528 #ifdef DEBUG_IP_FIREWALL_USER
530 & (1 << NF_INET_NUMHOOKS)) {
531 duprintf("Back unset "
538 pos = e->counters.pcnt;
539 e->counters.pcnt = 0;
541 /* We're at the start. */
545 e = (struct ip6t_entry *)
547 } while (oldpos == pos + e->next_offset);
550 size = e->next_offset;
551 e = (struct ip6t_entry *)
552 (entry0 + pos + size);
553 e->counters.pcnt = pos;
556 int newpos = t->verdict;
558 if (strcmp(t->target.u.user.name,
559 IP6T_STANDARD_TARGET) == 0
561 if (newpos > newinfo->size -
562 sizeof(struct ip6t_entry)) {
563 duprintf("mark_source_chains: "
564 "bad verdict (%i)\n",
568 /* This a jump; chase it. */
569 duprintf("Jump rule %u -> %u\n",
572 /* ... this is a fallthru */
573 newpos = pos + e->next_offset;
575 e = (struct ip6t_entry *)
577 e->counters.pcnt = pos;
582 duprintf("Finished chain %u\n", hook);
588 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
590 struct xt_mtdtor_param par;
592 if (i && (*i)-- == 0)
595 par.match = m->u.kernel.match;
596 par.matchinfo = m->data;
597 par.family = NFPROTO_IPV6;
598 if (par.match->destroy != NULL)
599 par.match->destroy(&par);
600 module_put(par.match->me);
605 check_entry(struct ip6t_entry *e, const char *name)
607 struct ip6t_entry_target *t;
609 if (!ip6_checkentry(&e->ipv6)) {
610 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
614 if (e->target_offset + sizeof(struct ip6t_entry_target) >
618 t = ip6t_get_target(e);
619 if (e->target_offset + t->u.target_size > e->next_offset)
625 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
628 const struct ip6t_ip6 *ipv6 = par->entryinfo;
631 par->match = m->u.kernel.match;
632 par->matchinfo = m->data;
634 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
635 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
637 duprintf("ip_tables: check failed for `%s'.\n",
646 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
649 struct xt_match *match;
652 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
654 "ip6t_%s", m->u.user.name);
655 if (IS_ERR(match) || !match) {
656 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
657 return match ? PTR_ERR(match) : -ENOENT;
659 m->u.kernel.match = match;
661 ret = check_match(m, par, i);
667 module_put(m->u.kernel.match->me);
671 static int check_target(struct ip6t_entry *e, const char *name)
673 struct ip6t_entry_target *t = ip6t_get_target(e);
674 struct xt_tgchk_param par = {
677 .target = t->u.kernel.target,
679 .hook_mask = e->comefrom,
680 .family = NFPROTO_IPV6,
684 t = ip6t_get_target(e);
685 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
686 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
688 duprintf("ip_tables: check failed for `%s'.\n",
689 t->u.kernel.target->name);
696 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
699 struct ip6t_entry_target *t;
700 struct xt_target *target;
703 struct xt_mtchk_param mtpar;
705 ret = check_entry(e, name);
711 mtpar.entryinfo = &e->ipv6;
712 mtpar.hook_mask = e->comefrom;
713 mtpar.family = NFPROTO_IPV6;
714 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
716 goto cleanup_matches;
718 t = ip6t_get_target(e);
719 target = try_then_request_module(xt_find_target(AF_INET6,
722 "ip6t_%s", t->u.user.name);
723 if (IS_ERR(target) || !target) {
724 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
725 ret = target ? PTR_ERR(target) : -ENOENT;
726 goto cleanup_matches;
728 t->u.kernel.target = target;
730 ret = check_target(e, name);
737 module_put(t->u.kernel.target->me);
739 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
744 check_entry_size_and_hooks(struct ip6t_entry *e,
745 struct xt_table_info *newinfo,
747 unsigned char *limit,
748 const unsigned int *hook_entries,
749 const unsigned int *underflows,
754 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
755 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
756 duprintf("Bad offset %p\n", e);
761 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
762 duprintf("checking: element %p size %u\n",
767 /* Check hooks & underflows */
768 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
769 if ((unsigned char *)e - base == hook_entries[h])
770 newinfo->hook_entry[h] = hook_entries[h];
771 if ((unsigned char *)e - base == underflows[h])
772 newinfo->underflow[h] = underflows[h];
775 /* FIXME: underflows must be unconditional, standard verdicts
776 < 0 (not IP6T_RETURN). --RR */
778 /* Clear counters and comefrom */
779 e->counters = ((struct xt_counters) { 0, 0 });
787 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
789 struct xt_tgdtor_param par;
790 struct ip6t_entry_target *t;
792 if (i && (*i)-- == 0)
795 /* Cleanup all matches */
796 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
797 t = ip6t_get_target(e);
799 par.target = t->u.kernel.target;
800 par.targinfo = t->data;
801 par.family = NFPROTO_IPV6;
802 if (par.target->destroy != NULL)
803 par.target->destroy(&par);
804 module_put(par.target->me);
808 /* Checks and translates the user-supplied table segment (held in
811 translate_table(const char *name,
812 unsigned int valid_hooks,
813 struct xt_table_info *newinfo,
817 const unsigned int *hook_entries,
818 const unsigned int *underflows)
823 newinfo->size = size;
824 newinfo->number = number;
826 /* Init all hooks to impossible value. */
827 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
828 newinfo->hook_entry[i] = 0xFFFFFFFF;
829 newinfo->underflow[i] = 0xFFFFFFFF;
832 duprintf("translate_table: size %u\n", newinfo->size);
834 /* Walk through entries, checking offsets. */
835 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
836 check_entry_size_and_hooks,
840 hook_entries, underflows, &i);
845 duprintf("translate_table: %u not %u entries\n",
850 /* Check hooks all assigned */
851 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
852 /* Only hooks which are valid */
853 if (!(valid_hooks & (1 << i)))
855 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
856 duprintf("Invalid hook entry %u %u\n",
860 if (newinfo->underflow[i] == 0xFFFFFFFF) {
861 duprintf("Invalid underflow %u %u\n",
867 if (!mark_source_chains(newinfo, valid_hooks, entry0))
870 /* Finally, each sanity check must pass */
872 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
873 find_check_entry, name, size, &i);
876 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
881 /* And one copy for every other CPU */
882 for_each_possible_cpu(i) {
883 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
884 memcpy(newinfo->entries[i], entry0, newinfo->size);
892 add_entry_to_counter(const struct ip6t_entry *e,
893 struct xt_counters total[],
896 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
903 set_entry_to_counter(const struct ip6t_entry *e,
904 struct ip6t_counters total[],
907 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
914 get_counters(const struct xt_table_info *t,
915 struct xt_counters counters[])
921 /* Instead of clearing (by a previous call to memset())
922 * the counters and using adds, we set the counters
923 * with data used by 'current' CPU
925 * Bottom half has to be disabled to prevent deadlock
926 * if new softirq were to run and call ipt_do_table
929 curcpu = smp_processor_id();
932 IP6T_ENTRY_ITERATE(t->entries[curcpu],
934 set_entry_to_counter,
938 for_each_possible_cpu(cpu) {
943 IP6T_ENTRY_ITERATE(t->entries[cpu],
945 add_entry_to_counter,
948 xt_info_wrunlock(cpu);
953 static struct xt_counters *alloc_counters(struct xt_table *table)
955 unsigned int countersize;
956 struct xt_counters *counters;
957 struct xt_table_info *private = table->private;
959 /* We need atomic snapshot of counters: rest doesn't change
960 (other than comefrom, which userspace doesn't care
962 countersize = sizeof(struct xt_counters) * private->number;
963 counters = vmalloc_node(countersize, numa_node_id());
965 if (counters == NULL)
966 return ERR_PTR(-ENOMEM);
968 get_counters(private, counters);
974 copy_entries_to_user(unsigned int total_size,
975 struct xt_table *table,
976 void __user *userptr)
978 unsigned int off, num;
979 struct ip6t_entry *e;
980 struct xt_counters *counters;
981 const struct xt_table_info *private = table->private;
983 const void *loc_cpu_entry;
985 counters = alloc_counters(table);
986 if (IS_ERR(counters))
987 return PTR_ERR(counters);
989 /* choose the copy that is on our node/cpu, ...
990 * This choice is lazy (because current thread is
991 * allowed to migrate to another cpu)
993 loc_cpu_entry = private->entries[raw_smp_processor_id()];
994 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
999 /* FIXME: use iterator macros --RR */
1000 /* ... then go back and fix counters and names */
1001 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1003 const struct ip6t_entry_match *m;
1004 const struct ip6t_entry_target *t;
1006 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1007 if (copy_to_user(userptr + off
1008 + offsetof(struct ip6t_entry, counters),
1010 sizeof(counters[num])) != 0) {
1015 for (i = sizeof(struct ip6t_entry);
1016 i < e->target_offset;
1017 i += m->u.match_size) {
1020 if (copy_to_user(userptr + off + i
1021 + offsetof(struct ip6t_entry_match,
1023 m->u.kernel.match->name,
1024 strlen(m->u.kernel.match->name)+1)
1031 t = ip6t_get_target(e);
1032 if (copy_to_user(userptr + off + e->target_offset
1033 + offsetof(struct ip6t_entry_target,
1035 t->u.kernel.target->name,
1036 strlen(t->u.kernel.target->name)+1) != 0) {
1047 #ifdef CONFIG_COMPAT
1048 static void compat_standard_from_user(void *dst, void *src)
1050 int v = *(compat_int_t *)src;
1053 v += xt_compat_calc_jump(AF_INET6, v);
1054 memcpy(dst, &v, sizeof(v));
1057 static int compat_standard_to_user(void __user *dst, void *src)
1059 compat_int_t cv = *(int *)src;
1062 cv -= xt_compat_calc_jump(AF_INET6, cv);
1063 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1067 compat_calc_match(struct ip6t_entry_match *m, int *size)
1069 *size += xt_compat_match_offset(m->u.kernel.match);
1073 static int compat_calc_entry(struct ip6t_entry *e,
1074 const struct xt_table_info *info,
1075 void *base, struct xt_table_info *newinfo)
1077 struct ip6t_entry_target *t;
1078 unsigned int entry_offset;
1081 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1082 entry_offset = (void *)e - base;
1083 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1084 t = ip6t_get_target(e);
1085 off += xt_compat_target_offset(t->u.kernel.target);
1086 newinfo->size -= off;
1087 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1091 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1092 if (info->hook_entry[i] &&
1093 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1094 newinfo->hook_entry[i] -= off;
1095 if (info->underflow[i] &&
1096 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1097 newinfo->underflow[i] -= off;
1102 static int compat_table_info(const struct xt_table_info *info,
1103 struct xt_table_info *newinfo)
1105 void *loc_cpu_entry;
1107 if (!newinfo || !info)
1110 /* we dont care about newinfo->entries[] */
1111 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1112 newinfo->initial_entries = 0;
1113 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1114 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1115 compat_calc_entry, info, loc_cpu_entry,
1120 static int get_info(struct net *net, void __user *user, int *len, int compat)
1122 char name[IP6T_TABLE_MAXNAMELEN];
1126 if (*len != sizeof(struct ip6t_getinfo)) {
1127 duprintf("length %u != %zu\n", *len,
1128 sizeof(struct ip6t_getinfo));
1132 if (copy_from_user(name, user, sizeof(name)) != 0)
1135 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1136 #ifdef CONFIG_COMPAT
1138 xt_compat_lock(AF_INET6);
1140 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1141 "ip6table_%s", name);
1142 if (t && !IS_ERR(t)) {
1143 struct ip6t_getinfo info;
1144 const struct xt_table_info *private = t->private;
1146 #ifdef CONFIG_COMPAT
1148 struct xt_table_info tmp;
1149 ret = compat_table_info(private, &tmp);
1150 xt_compat_flush_offsets(AF_INET6);
1154 info.valid_hooks = t->valid_hooks;
1155 memcpy(info.hook_entry, private->hook_entry,
1156 sizeof(info.hook_entry));
1157 memcpy(info.underflow, private->underflow,
1158 sizeof(info.underflow));
1159 info.num_entries = private->number;
1160 info.size = private->size;
1161 strcpy(info.name, name);
1163 if (copy_to_user(user, &info, *len) != 0)
1171 ret = t ? PTR_ERR(t) : -ENOENT;
1172 #ifdef CONFIG_COMPAT
1174 xt_compat_unlock(AF_INET6);
1180 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1183 struct ip6t_get_entries get;
1186 if (*len < sizeof(get)) {
1187 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1190 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1192 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1193 duprintf("get_entries: %u != %zu\n",
1194 *len, sizeof(get) + get.size);
1198 t = xt_find_table_lock(net, AF_INET6, get.name);
1199 if (t && !IS_ERR(t)) {
1200 struct xt_table_info *private = t->private;
1201 duprintf("t->private->number = %u\n", private->number);
1202 if (get.size == private->size)
1203 ret = copy_entries_to_user(private->size,
1204 t, uptr->entrytable);
1206 duprintf("get_entries: I've got %u not %u!\n",
1207 private->size, get.size);
1213 ret = t ? PTR_ERR(t) : -ENOENT;
1219 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1220 struct xt_table_info *newinfo, unsigned int num_counters,
1221 void __user *counters_ptr)
1225 struct xt_table_info *oldinfo;
1226 struct xt_counters *counters;
1227 const void *loc_cpu_old_entry;
1230 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1237 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1238 "ip6table_%s", name);
1239 if (!t || IS_ERR(t)) {
1240 ret = t ? PTR_ERR(t) : -ENOENT;
1241 goto free_newinfo_counters_untrans;
1245 if (valid_hooks != t->valid_hooks) {
1246 duprintf("Valid hook crap: %08X vs %08X\n",
1247 valid_hooks, t->valid_hooks);
1252 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1256 /* Update module usage count based on number of rules */
1257 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1258 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1259 if ((oldinfo->number > oldinfo->initial_entries) ||
1260 (newinfo->number <= oldinfo->initial_entries))
1262 if ((oldinfo->number > oldinfo->initial_entries) &&
1263 (newinfo->number <= oldinfo->initial_entries))
1266 /* Get the old counters, and synchronize with replace */
1267 get_counters(oldinfo, counters);
1269 /* Decrease module usage counts and free resource */
1270 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1271 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1273 xt_free_table_info(oldinfo);
1274 if (copy_to_user(counters_ptr, counters,
1275 sizeof(struct xt_counters) * num_counters) != 0)
1284 free_newinfo_counters_untrans:
1291 do_replace(struct net *net, void __user *user, unsigned int len)
1294 struct ip6t_replace tmp;
1295 struct xt_table_info *newinfo;
1296 void *loc_cpu_entry;
1298 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1301 /* overflow check */
1302 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1305 newinfo = xt_alloc_table_info(tmp.size);
1309 /* choose the copy that is on our node/cpu */
1310 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1311 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1317 ret = translate_table(tmp.name, tmp.valid_hooks,
1318 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1319 tmp.hook_entry, tmp.underflow);
1323 duprintf("ip_tables: Translated table\n");
1325 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1326 tmp.num_counters, tmp.counters);
1328 goto free_newinfo_untrans;
1331 free_newinfo_untrans:
1332 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1334 xt_free_table_info(newinfo);
1338 /* We're lazy, and add to the first CPU; overflow works its fey magic
1339 * and everything is OK. */
1341 add_counter_to_entry(struct ip6t_entry *e,
1342 const struct xt_counters addme[],
1345 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1352 do_add_counters(struct net *net, void __user *user, unsigned int len,
1355 unsigned int i, curcpu;
1356 struct xt_counters_info tmp;
1357 struct xt_counters *paddc;
1358 unsigned int num_counters;
1363 const struct xt_table_info *private;
1365 const void *loc_cpu_entry;
1366 #ifdef CONFIG_COMPAT
1367 struct compat_xt_counters_info compat_tmp;
1371 size = sizeof(struct compat_xt_counters_info);
1376 size = sizeof(struct xt_counters_info);
1379 if (copy_from_user(ptmp, user, size) != 0)
1382 #ifdef CONFIG_COMPAT
1384 num_counters = compat_tmp.num_counters;
1385 name = compat_tmp.name;
1389 num_counters = tmp.num_counters;
1393 if (len != size + num_counters * sizeof(struct xt_counters))
1396 paddc = vmalloc_node(len - size, numa_node_id());
1400 if (copy_from_user(paddc, user + size, len - size) != 0) {
1405 t = xt_find_table_lock(net, AF_INET6, name);
1406 if (!t || IS_ERR(t)) {
1407 ret = t ? PTR_ERR(t) : -ENOENT;
1413 private = t->private;
1414 if (private->number != num_counters) {
1416 goto unlock_up_free;
1420 /* Choose the copy that is on our node */
1421 curcpu = smp_processor_id();
1422 xt_info_wrlock(curcpu);
1423 loc_cpu_entry = private->entries[curcpu];
1424 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1426 add_counter_to_entry,
1429 xt_info_wrunlock(curcpu);
1441 #ifdef CONFIG_COMPAT
1442 struct compat_ip6t_replace {
1443 char name[IP6T_TABLE_MAXNAMELEN];
1447 u32 hook_entry[NF_INET_NUMHOOKS];
1448 u32 underflow[NF_INET_NUMHOOKS];
1450 compat_uptr_t counters; /* struct ip6t_counters * */
1451 struct compat_ip6t_entry entries[0];
1455 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1456 unsigned int *size, struct xt_counters *counters,
1459 struct ip6t_entry_target *t;
1460 struct compat_ip6t_entry __user *ce;
1461 u_int16_t target_offset, next_offset;
1462 compat_uint_t origsize;
1467 ce = (struct compat_ip6t_entry __user *)*dstptr;
1468 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1471 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1474 *dstptr += sizeof(struct compat_ip6t_entry);
1475 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1477 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1478 target_offset = e->target_offset - (origsize - *size);
1481 t = ip6t_get_target(e);
1482 ret = xt_compat_target_to_user(t, dstptr, size);
1486 next_offset = e->next_offset - (origsize - *size);
1487 if (put_user(target_offset, &ce->target_offset))
1489 if (put_user(next_offset, &ce->next_offset))
1499 compat_find_calc_match(struct ip6t_entry_match *m,
1501 const struct ip6t_ip6 *ipv6,
1502 unsigned int hookmask,
1503 int *size, unsigned int *i)
1505 struct xt_match *match;
1507 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1508 m->u.user.revision),
1509 "ip6t_%s", m->u.user.name);
1510 if (IS_ERR(match) || !match) {
1511 duprintf("compat_check_calc_match: `%s' not found\n",
1513 return match ? PTR_ERR(match) : -ENOENT;
1515 m->u.kernel.match = match;
1516 *size += xt_compat_match_offset(match);
1523 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1525 if (i && (*i)-- == 0)
1528 module_put(m->u.kernel.match->me);
1533 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1535 struct ip6t_entry_target *t;
1537 if (i && (*i)-- == 0)
1540 /* Cleanup all matches */
1541 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1542 t = compat_ip6t_get_target(e);
1543 module_put(t->u.kernel.target->me);
1548 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1549 struct xt_table_info *newinfo,
1551 unsigned char *base,
1552 unsigned char *limit,
1553 unsigned int *hook_entries,
1554 unsigned int *underflows,
1558 struct ip6t_entry_target *t;
1559 struct xt_target *target;
1560 unsigned int entry_offset;
1564 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1565 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1566 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1567 duprintf("Bad offset %p, limit = %p\n", e, limit);
1571 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1572 sizeof(struct compat_xt_entry_target)) {
1573 duprintf("checking: element %p size %u\n",
1578 /* For purposes of check_entry casting the compat entry is fine */
1579 ret = check_entry((struct ip6t_entry *)e, name);
1583 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1584 entry_offset = (void *)e - (void *)base;
1586 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1587 &e->ipv6, e->comefrom, &off, &j);
1589 goto release_matches;
1591 t = compat_ip6t_get_target(e);
1592 target = try_then_request_module(xt_find_target(AF_INET6,
1594 t->u.user.revision),
1595 "ip6t_%s", t->u.user.name);
1596 if (IS_ERR(target) || !target) {
1597 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1599 ret = target ? PTR_ERR(target) : -ENOENT;
1600 goto release_matches;
1602 t->u.kernel.target = target;
1604 off += xt_compat_target_offset(target);
1606 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1610 /* Check hooks & underflows */
1611 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1612 if ((unsigned char *)e - base == hook_entries[h])
1613 newinfo->hook_entry[h] = hook_entries[h];
1614 if ((unsigned char *)e - base == underflows[h])
1615 newinfo->underflow[h] = underflows[h];
1618 /* Clear counters and comefrom */
1619 memset(&e->counters, 0, sizeof(e->counters));
1626 module_put(t->u.kernel.target->me);
1628 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1633 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1634 unsigned int *size, const char *name,
1635 struct xt_table_info *newinfo, unsigned char *base)
1637 struct ip6t_entry_target *t;
1638 struct xt_target *target;
1639 struct ip6t_entry *de;
1640 unsigned int origsize;
1645 de = (struct ip6t_entry *)*dstptr;
1646 memcpy(de, e, sizeof(struct ip6t_entry));
1647 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1649 *dstptr += sizeof(struct ip6t_entry);
1650 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1652 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1656 de->target_offset = e->target_offset - (origsize - *size);
1657 t = compat_ip6t_get_target(e);
1658 target = t->u.kernel.target;
1659 xt_compat_target_from_user(t, dstptr, size);
1661 de->next_offset = e->next_offset - (origsize - *size);
1662 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1663 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1664 newinfo->hook_entry[h] -= origsize - *size;
1665 if ((unsigned char *)de - base < newinfo->underflow[h])
1666 newinfo->underflow[h] -= origsize - *size;
1671 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1676 struct xt_mtchk_param mtpar;
1680 mtpar.entryinfo = &e->ipv6;
1681 mtpar.hook_mask = e->comefrom;
1682 mtpar.family = NFPROTO_IPV6;
1683 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1685 goto cleanup_matches;
1687 ret = check_target(e, name);
1689 goto cleanup_matches;
1695 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1700 translate_compat_table(const char *name,
1701 unsigned int valid_hooks,
1702 struct xt_table_info **pinfo,
1704 unsigned int total_size,
1705 unsigned int number,
1706 unsigned int *hook_entries,
1707 unsigned int *underflows)
1710 struct xt_table_info *newinfo, *info;
1711 void *pos, *entry0, *entry1;
1718 info->number = number;
1720 /* Init all hooks to impossible value. */
1721 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1722 info->hook_entry[i] = 0xFFFFFFFF;
1723 info->underflow[i] = 0xFFFFFFFF;
1726 duprintf("translate_compat_table: size %u\n", info->size);
1728 xt_compat_lock(AF_INET6);
1729 /* Walk through entries, checking offsets. */
1730 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1731 check_compat_entry_size_and_hooks,
1732 info, &size, entry0,
1733 entry0 + total_size,
1734 hook_entries, underflows, &j, name);
1740 duprintf("translate_compat_table: %u not %u entries\n",
1745 /* Check hooks all assigned */
1746 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1747 /* Only hooks which are valid */
1748 if (!(valid_hooks & (1 << i)))
1750 if (info->hook_entry[i] == 0xFFFFFFFF) {
1751 duprintf("Invalid hook entry %u %u\n",
1752 i, hook_entries[i]);
1755 if (info->underflow[i] == 0xFFFFFFFF) {
1756 duprintf("Invalid underflow %u %u\n",
1763 newinfo = xt_alloc_table_info(size);
1767 newinfo->number = number;
1768 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1769 newinfo->hook_entry[i] = info->hook_entry[i];
1770 newinfo->underflow[i] = info->underflow[i];
1772 entry1 = newinfo->entries[raw_smp_processor_id()];
1775 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1776 compat_copy_entry_from_user,
1777 &pos, &size, name, newinfo, entry1);
1778 xt_compat_flush_offsets(AF_INET6);
1779 xt_compat_unlock(AF_INET6);
1784 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1788 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1792 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1793 compat_release_entry, &j);
1794 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1795 xt_free_table_info(newinfo);
1799 /* And one copy for every other CPU */
1800 for_each_possible_cpu(i)
1801 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1802 memcpy(newinfo->entries[i], entry1, newinfo->size);
1806 xt_free_table_info(info);
1810 xt_free_table_info(newinfo);
1812 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1815 xt_compat_flush_offsets(AF_INET6);
1816 xt_compat_unlock(AF_INET6);
1821 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1824 struct compat_ip6t_replace tmp;
1825 struct xt_table_info *newinfo;
1826 void *loc_cpu_entry;
1828 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1831 /* overflow check */
1832 if (tmp.size >= INT_MAX / num_possible_cpus())
1834 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1837 newinfo = xt_alloc_table_info(tmp.size);
1841 /* choose the copy that is on our node/cpu */
1842 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1843 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1849 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1850 &newinfo, &loc_cpu_entry, tmp.size,
1851 tmp.num_entries, tmp.hook_entry,
1856 duprintf("compat_do_replace: Translated table\n");
1858 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1859 tmp.num_counters, compat_ptr(tmp.counters));
1861 goto free_newinfo_untrans;
1864 free_newinfo_untrans:
1865 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1867 xt_free_table_info(newinfo);
1872 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1877 if (!capable(CAP_NET_ADMIN))
1881 case IP6T_SO_SET_REPLACE:
1882 ret = compat_do_replace(sock_net(sk), user, len);
1885 case IP6T_SO_SET_ADD_COUNTERS:
1886 ret = do_add_counters(sock_net(sk), user, len, 1);
1890 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1897 struct compat_ip6t_get_entries {
1898 char name[IP6T_TABLE_MAXNAMELEN];
1900 struct compat_ip6t_entry entrytable[0];
1904 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1905 void __user *userptr)
1907 struct xt_counters *counters;
1908 const struct xt_table_info *private = table->private;
1912 const void *loc_cpu_entry;
1915 counters = alloc_counters(table);
1916 if (IS_ERR(counters))
1917 return PTR_ERR(counters);
1919 /* choose the copy that is on our node/cpu, ...
1920 * This choice is lazy (because current thread is
1921 * allowed to migrate to another cpu)
1923 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1926 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1927 compat_copy_entry_to_user,
1928 &pos, &size, counters, &i);
1935 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1939 struct compat_ip6t_get_entries get;
1942 if (*len < sizeof(get)) {
1943 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1947 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1950 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1951 duprintf("compat_get_entries: %u != %zu\n",
1952 *len, sizeof(get) + get.size);
1956 xt_compat_lock(AF_INET6);
1957 t = xt_find_table_lock(net, AF_INET6, get.name);
1958 if (t && !IS_ERR(t)) {
1959 const struct xt_table_info *private = t->private;
1960 struct xt_table_info info;
1961 duprintf("t->private->number = %u\n", private->number);
1962 ret = compat_table_info(private, &info);
1963 if (!ret && get.size == info.size) {
1964 ret = compat_copy_entries_to_user(private->size,
1965 t, uptr->entrytable);
1967 duprintf("compat_get_entries: I've got %u not %u!\n",
1968 private->size, get.size);
1971 xt_compat_flush_offsets(AF_INET6);
1975 ret = t ? PTR_ERR(t) : -ENOENT;
1977 xt_compat_unlock(AF_INET6);
1981 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1984 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1988 if (!capable(CAP_NET_ADMIN))
1992 case IP6T_SO_GET_INFO:
1993 ret = get_info(sock_net(sk), user, len, 1);
1995 case IP6T_SO_GET_ENTRIES:
1996 ret = compat_get_entries(sock_net(sk), user, len);
1999 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2006 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2010 if (!capable(CAP_NET_ADMIN))
2014 case IP6T_SO_SET_REPLACE:
2015 ret = do_replace(sock_net(sk), user, len);
2018 case IP6T_SO_SET_ADD_COUNTERS:
2019 ret = do_add_counters(sock_net(sk), user, len, 0);
2023 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2031 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2035 if (!capable(CAP_NET_ADMIN))
2039 case IP6T_SO_GET_INFO:
2040 ret = get_info(sock_net(sk), user, len, 0);
2043 case IP6T_SO_GET_ENTRIES:
2044 ret = get_entries(sock_net(sk), user, len);
2047 case IP6T_SO_GET_REVISION_MATCH:
2048 case IP6T_SO_GET_REVISION_TARGET: {
2049 struct ip6t_get_revision rev;
2052 if (*len != sizeof(rev)) {
2056 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2061 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2066 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2069 "ip6t_%s", rev.name);
2074 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2081 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2082 const struct ip6t_replace *repl)
2085 struct xt_table_info *newinfo;
2086 struct xt_table_info bootstrap
2087 = { 0, 0, 0, { 0 }, { 0 }, { } };
2088 void *loc_cpu_entry;
2089 struct xt_table *new_table;
2091 newinfo = xt_alloc_table_info(repl->size);
2097 /* choose the copy on our node/cpu, but dont care about preemption */
2098 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2099 memcpy(loc_cpu_entry, repl->entries, repl->size);
2101 ret = translate_table(table->name, table->valid_hooks,
2102 newinfo, loc_cpu_entry, repl->size,
2109 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2110 if (IS_ERR(new_table)) {
2111 ret = PTR_ERR(new_table);
2117 xt_free_table_info(newinfo);
2119 return ERR_PTR(ret);
2122 void ip6t_unregister_table(struct xt_table *table)
2124 struct xt_table_info *private;
2125 void *loc_cpu_entry;
2126 struct module *table_owner = table->me;
2128 private = xt_unregister_table(table);
2130 /* Decrease module usage counts and free resources */
2131 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2132 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2133 if (private->number > private->initial_entries)
2134 module_put(table_owner);
2135 xt_free_table_info(private);
2138 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2140 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2141 u_int8_t type, u_int8_t code,
2144 return (type == test_type && code >= min_code && code <= max_code)
2149 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2151 const struct icmp6hdr *ic;
2152 struct icmp6hdr _icmph;
2153 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2155 /* Must not be a fragment. */
2156 if (par->fragoff != 0)
2159 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2161 /* We've been asked to examine this packet, and we
2162 * can't. Hence, no choice but to drop.
2164 duprintf("Dropping evil ICMP tinygram.\n");
2165 *par->hotdrop = true;
2169 return icmp6_type_code_match(icmpinfo->type,
2172 ic->icmp6_type, ic->icmp6_code,
2173 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2176 /* Called when user tries to insert an entry of this type. */
2177 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2179 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2181 /* Must specify no unknown invflags */
2182 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2185 /* The built-in targets: standard (NULL) and error. */
2186 static struct xt_target ip6t_standard_target __read_mostly = {
2187 .name = IP6T_STANDARD_TARGET,
2188 .targetsize = sizeof(int),
2189 .family = NFPROTO_IPV6,
2190 #ifdef CONFIG_COMPAT
2191 .compatsize = sizeof(compat_int_t),
2192 .compat_from_user = compat_standard_from_user,
2193 .compat_to_user = compat_standard_to_user,
2197 static struct xt_target ip6t_error_target __read_mostly = {
2198 .name = IP6T_ERROR_TARGET,
2199 .target = ip6t_error,
2200 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2201 .family = NFPROTO_IPV6,
2204 static struct nf_sockopt_ops ip6t_sockopts = {
2206 .set_optmin = IP6T_BASE_CTL,
2207 .set_optmax = IP6T_SO_SET_MAX+1,
2208 .set = do_ip6t_set_ctl,
2209 #ifdef CONFIG_COMPAT
2210 .compat_set = compat_do_ip6t_set_ctl,
2212 .get_optmin = IP6T_BASE_CTL,
2213 .get_optmax = IP6T_SO_GET_MAX+1,
2214 .get = do_ip6t_get_ctl,
2215 #ifdef CONFIG_COMPAT
2216 .compat_get = compat_do_ip6t_get_ctl,
2218 .owner = THIS_MODULE,
2221 static struct xt_match icmp6_matchstruct __read_mostly = {
2223 .match = icmp6_match,
2224 .matchsize = sizeof(struct ip6t_icmp),
2225 .checkentry = icmp6_checkentry,
2226 .proto = IPPROTO_ICMPV6,
2227 .family = NFPROTO_IPV6,
2230 static int __net_init ip6_tables_net_init(struct net *net)
2232 return xt_proto_init(net, NFPROTO_IPV6);
2235 static void __net_exit ip6_tables_net_exit(struct net *net)
2237 xt_proto_fini(net, NFPROTO_IPV6);
2240 static struct pernet_operations ip6_tables_net_ops = {
2241 .init = ip6_tables_net_init,
2242 .exit = ip6_tables_net_exit,
2245 static int __init ip6_tables_init(void)
2249 ret = register_pernet_subsys(&ip6_tables_net_ops);
2253 /* Noone else will be downing sem now, so we won't sleep */
2254 ret = xt_register_target(&ip6t_standard_target);
2257 ret = xt_register_target(&ip6t_error_target);
2260 ret = xt_register_match(&icmp6_matchstruct);
2264 /* Register setsockopt */
2265 ret = nf_register_sockopt(&ip6t_sockopts);
2269 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2273 xt_unregister_match(&icmp6_matchstruct);
2275 xt_unregister_target(&ip6t_error_target);
2277 xt_unregister_target(&ip6t_standard_target);
2279 unregister_pernet_subsys(&ip6_tables_net_ops);
2284 static void __exit ip6_tables_fini(void)
2286 nf_unregister_sockopt(&ip6t_sockopts);
2288 xt_unregister_match(&icmp6_matchstruct);
2289 xt_unregister_target(&ip6t_error_target);
2290 xt_unregister_target(&ip6t_standard_target);
2292 unregister_pernet_subsys(&ip6_tables_net_ops);
2296 * find the offset to specified header or the protocol number of last header
2297 * if target < 0. "last header" is transport protocol header, ESP, or
2300 * If target header is found, its offset is set in *offset and return protocol
2301 * number. Otherwise, return -1.
2303 * If the first fragment doesn't contain the final protocol header or
2304 * NEXTHDR_NONE it is considered invalid.
2306 * Note that non-1st fragment is special case that "the protocol number
2307 * of last header" is "next header" field in Fragment header. In this case,
2308 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2312 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2313 int target, unsigned short *fragoff)
2315 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2316 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2317 unsigned int len = skb->len - start;
2322 while (nexthdr != target) {
2323 struct ipv6_opt_hdr _hdr, *hp;
2324 unsigned int hdrlen;
2326 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2332 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2335 if (nexthdr == NEXTHDR_FRAGMENT) {
2336 unsigned short _frag_off;
2338 fp = skb_header_pointer(skb,
2339 start+offsetof(struct frag_hdr,
2346 _frag_off = ntohs(*fp) & ~0x7;
2349 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2350 hp->nexthdr == NEXTHDR_NONE)) {
2352 *fragoff = _frag_off;
2358 } else if (nexthdr == NEXTHDR_AUTH)
2359 hdrlen = (hp->hdrlen + 2) << 2;
2361 hdrlen = ipv6_optlen(hp);
2363 nexthdr = hp->nexthdr;
2372 EXPORT_SYMBOL(ip6t_register_table);
2373 EXPORT_SYMBOL(ip6t_unregister_table);
2374 EXPORT_SYMBOL(ip6t_do_table);
2375 EXPORT_SYMBOL(ip6t_ext_hdr);
2376 EXPORT_SYMBOL(ipv6_find_hdr);
2378 module_init(ip6_tables_init);
2379 module_exit(ip6_tables_fini);