2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __FUNCTION__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
94 ip6_packet_match(const struct sk_buff *skb,
97 const struct ip6t_ip6 *ip6info,
98 unsigned int *protoff,
99 int *fragoff, bool *hotdrop)
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 /* Look for ifname matches; this should unroll nicely. */
123 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
124 ret |= (((const unsigned long *)indev)[i]
125 ^ ((const unsigned long *)ip6info->iniface)[i])
126 & ((const unsigned long *)ip6info->iniface_mask)[i];
129 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
130 dprintf("VIA in mismatch (%s vs %s).%s\n",
131 indev, ip6info->iniface,
132 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
136 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
137 ret |= (((const unsigned long *)outdev)[i]
138 ^ ((const unsigned long *)ip6info->outiface)[i])
139 & ((const unsigned long *)ip6info->outiface_mask)[i];
142 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
143 dprintf("VIA out mismatch (%s vs %s).%s\n",
144 outdev, ip6info->outiface,
145 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
149 /* ... might want to do something with class and flowlabel here ... */
151 /* look for the desired protocol header */
152 if((ip6info->flags & IP6T_F_PROTO)) {
154 unsigned short _frag_off;
156 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
162 *fragoff = _frag_off;
164 dprintf("Packet protocol %hi ?= %s%hi.\n",
166 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
169 if (ip6info->proto == protohdr) {
170 if(ip6info->invflags & IP6T_INV_PROTO) {
176 /* We need match for the '-p all', too! */
177 if ((ip6info->proto != 0) &&
178 !(ip6info->invflags & IP6T_INV_PROTO))
184 /* should be ip6 safe */
186 ip6_checkentry(const struct ip6t_ip6 *ipv6)
188 if (ipv6->flags & ~IP6T_F_MASK) {
189 duprintf("Unknown flag bits set: %08X\n",
190 ipv6->flags & ~IP6T_F_MASK);
193 if (ipv6->invflags & ~IP6T_INV_MASK) {
194 duprintf("Unknown invflag bits set: %08X\n",
195 ipv6->invflags & ~IP6T_INV_MASK);
202 ip6t_error(struct sk_buff *skb,
203 const struct net_device *in,
204 const struct net_device *out,
205 unsigned int hooknum,
206 const struct xt_target *target,
207 const void *targinfo)
210 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
216 bool do_match(struct ip6t_entry_match *m,
217 const struct sk_buff *skb,
218 const struct net_device *in,
219 const struct net_device *out,
221 unsigned int protoff,
224 /* Stop iteration if it doesn't match */
225 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
226 offset, protoff, hotdrop))
232 static inline struct ip6t_entry *
233 get_entry(void *base, unsigned int offset)
235 return (struct ip6t_entry *)(base + offset);
238 /* All zeroes == unconditional rule. */
240 unconditional(const struct ip6t_ip6 *ipv6)
244 for (i = 0; i < sizeof(*ipv6); i++)
245 if (((char *)ipv6)[i])
248 return (i == sizeof(*ipv6));
251 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
252 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
253 /* This cries for unification! */
254 static const char *hooknames[] = {
255 [NF_INET_PRE_ROUTING] = "PREROUTING",
256 [NF_INET_LOCAL_IN] = "INPUT",
257 [NF_INET_FORWARD] = "FORWARD",
258 [NF_INET_LOCAL_OUT] = "OUTPUT",
259 [NF_INET_POST_ROUTING] = "POSTROUTING",
262 enum nf_ip_trace_comments {
263 NF_IP6_TRACE_COMMENT_RULE,
264 NF_IP6_TRACE_COMMENT_RETURN,
265 NF_IP6_TRACE_COMMENT_POLICY,
268 static const char *comments[] = {
269 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
270 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
271 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
274 static struct nf_loginfo trace_loginfo = {
275 .type = NF_LOG_TYPE_LOG,
279 .logflags = NF_LOG_MASK,
285 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
286 char *hookname, char **chainname,
287 char **comment, unsigned int *rulenum)
289 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
291 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
292 /* Head of user chain: ERROR target with chainname */
293 *chainname = t->target.data;
298 if (s->target_offset == sizeof(struct ip6t_entry)
299 && strcmp(t->target.u.kernel.target->name,
300 IP6T_STANDARD_TARGET) == 0
302 && unconditional(&s->ipv6)) {
303 /* Tail of chains: STANDARD target (return/policy) */
304 *comment = *chainname == hookname
305 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
306 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
315 static void trace_packet(struct sk_buff *skb,
317 const struct net_device *in,
318 const struct net_device *out,
320 struct xt_table_info *private,
321 struct ip6t_entry *e)
324 struct ip6t_entry *root;
325 char *hookname, *chainname, *comment;
326 unsigned int rulenum = 0;
328 table_base = (void *)private->entries[smp_processor_id()];
329 root = get_entry(table_base, private->hook_entry[hook]);
331 hookname = chainname = (char *)hooknames[hook];
332 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
334 IP6T_ENTRY_ITERATE(root,
335 private->size - private->hook_entry[hook],
336 get_chainname_rulenum,
337 e, hookname, &chainname, &comment, &rulenum);
339 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
340 "TRACE: %s:%s:%s:%u ",
341 tablename, chainname, comment, rulenum);
345 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
347 ip6t_do_table(struct sk_buff *skb,
349 const struct net_device *in,
350 const struct net_device *out,
351 struct xt_table *table)
353 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
355 unsigned int protoff = 0;
356 bool hotdrop = false;
357 /* Initializing verdict to NF_DROP keeps gcc happy. */
358 unsigned int verdict = NF_DROP;
359 const char *indev, *outdev;
361 struct ip6t_entry *e, *back;
362 struct xt_table_info *private;
365 indev = in ? in->name : nulldevname;
366 outdev = out ? out->name : nulldevname;
367 /* We handle fragments by dealing with the first fragment as
368 * if it was a normal packet. All other fragments are treated
369 * normally, except that they will NEVER match rules that ask
370 * things we don't know, ie. tcp syn flag or ports). If the
371 * rule is also a fragment-specific rule, non-fragments won't
374 read_lock_bh(&table->lock);
375 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
376 private = table->private;
377 table_base = (void *)private->entries[smp_processor_id()];
378 e = get_entry(table_base, private->hook_entry[hook]);
380 /* For return from builtin chain */
381 back = get_entry(table_base, private->underflow[hook]);
386 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
387 &protoff, &offset, &hotdrop)) {
388 struct ip6t_entry_target *t;
390 if (IP6T_MATCH_ITERATE(e, do_match,
392 offset, protoff, &hotdrop) != 0)
395 ADD_COUNTER(e->counters,
396 ntohs(ipv6_hdr(skb)->payload_len) +
397 sizeof(struct ipv6hdr), 1);
399 t = ip6t_get_target(e);
400 IP_NF_ASSERT(t->u.kernel.target);
402 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
403 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
404 /* The packet is traced: log it */
405 if (unlikely(skb->nf_trace))
406 trace_packet(skb, hook, in, out,
407 table->name, private, e);
409 /* Standard target? */
410 if (!t->u.kernel.target->target) {
413 v = ((struct ip6t_standard_target *)t)->verdict;
415 /* Pop from stack? */
416 if (v != IP6T_RETURN) {
417 verdict = (unsigned)(-v) - 1;
421 back = get_entry(table_base,
425 if (table_base + v != (void *)e + e->next_offset
426 && !(e->ipv6.flags & IP6T_F_GOTO)) {
427 /* Save old back ptr in next entry */
428 struct ip6t_entry *next
429 = (void *)e + e->next_offset;
431 = (void *)back - table_base;
432 /* set back pointer to next entry */
436 e = get_entry(table_base, v);
438 /* Targets which reenter must return
440 #ifdef CONFIG_NETFILTER_DEBUG
441 ((struct ip6t_entry *)table_base)->comefrom
444 verdict = t->u.kernel.target->target(skb,
450 #ifdef CONFIG_NETFILTER_DEBUG
451 if (((struct ip6t_entry *)table_base)->comefrom
453 && verdict == IP6T_CONTINUE) {
454 printk("Target %s reentered!\n",
455 t->u.kernel.target->name);
458 ((struct ip6t_entry *)table_base)->comefrom
461 if (verdict == IP6T_CONTINUE)
462 e = (void *)e + e->next_offset;
470 e = (void *)e + e->next_offset;
474 #ifdef CONFIG_NETFILTER_DEBUG
475 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
477 read_unlock_bh(&table->lock);
479 #ifdef DEBUG_ALLOW_ALL
488 /* Figures out from what hook each rule can be called: returns 0 if
489 there are loops. Puts hook bitmask in comefrom. */
491 mark_source_chains(struct xt_table_info *newinfo,
492 unsigned int valid_hooks, void *entry0)
496 /* No recursion; use packet counter to save back ptrs (reset
497 to 0 as we leave), and comefrom to save source hook bitmask */
498 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
499 unsigned int pos = newinfo->hook_entry[hook];
500 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
502 if (!(valid_hooks & (1 << hook)))
505 /* Set initial back pointer. */
506 e->counters.pcnt = pos;
509 struct ip6t_standard_target *t
510 = (void *)ip6t_get_target(e);
511 int visited = e->comefrom & (1 << hook);
513 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
514 printk("iptables: loop hook %u pos %u %08X.\n",
515 hook, pos, e->comefrom);
518 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
520 /* Unconditional return/END. */
521 if ((e->target_offset == sizeof(struct ip6t_entry)
522 && (strcmp(t->target.u.user.name,
523 IP6T_STANDARD_TARGET) == 0)
525 && unconditional(&e->ipv6)) || visited) {
526 unsigned int oldpos, size;
528 if (t->verdict < -NF_MAX_VERDICT - 1) {
529 duprintf("mark_source_chains: bad "
530 "negative verdict (%i)\n",
535 /* Return: backtrack through the last
538 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
539 #ifdef DEBUG_IP_FIREWALL_USER
541 & (1 << NF_INET_NUMHOOKS)) {
542 duprintf("Back unset "
549 pos = e->counters.pcnt;
550 e->counters.pcnt = 0;
552 /* We're at the start. */
556 e = (struct ip6t_entry *)
558 } while (oldpos == pos + e->next_offset);
561 size = e->next_offset;
562 e = (struct ip6t_entry *)
563 (entry0 + pos + size);
564 e->counters.pcnt = pos;
567 int newpos = t->verdict;
569 if (strcmp(t->target.u.user.name,
570 IP6T_STANDARD_TARGET) == 0
572 if (newpos > newinfo->size -
573 sizeof(struct ip6t_entry)) {
574 duprintf("mark_source_chains: "
575 "bad verdict (%i)\n",
579 /* This a jump; chase it. */
580 duprintf("Jump rule %u -> %u\n",
583 /* ... this is a fallthru */
584 newpos = pos + e->next_offset;
586 e = (struct ip6t_entry *)
588 e->counters.pcnt = pos;
593 duprintf("Finished chain %u\n", hook);
599 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
601 if (i && (*i)-- == 0)
604 if (m->u.kernel.match->destroy)
605 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
606 module_put(m->u.kernel.match->me);
611 check_entry(struct ip6t_entry *e, const char *name)
613 struct ip6t_entry_target *t;
615 if (!ip6_checkentry(&e->ipv6)) {
616 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
620 if (e->target_offset + sizeof(struct ip6t_entry_target) >
624 t = ip6t_get_target(e);
625 if (e->target_offset + t->u.target_size > e->next_offset)
631 static inline int check_match(struct ip6t_entry_match *m, const char *name,
632 const struct ip6t_ip6 *ipv6,
633 unsigned int hookmask, unsigned int *i)
635 struct xt_match *match;
638 match = m->u.kernel.match;
639 ret = xt_check_match(match, AF_INET6, m->u.match_size - sizeof(*m),
640 name, hookmask, ipv6->proto,
641 ipv6->invflags & IP6T_INV_PROTO);
642 if (!ret && m->u.kernel.match->checkentry
643 && !m->u.kernel.match->checkentry(name, ipv6, match, m->data,
645 duprintf("ip_tables: check failed for `%s'.\n",
646 m->u.kernel.match->name);
655 find_check_match(struct ip6t_entry_match *m,
657 const struct ip6t_ip6 *ipv6,
658 unsigned int hookmask,
661 struct xt_match *match;
664 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
666 "ip6t_%s", m->u.user.name);
667 if (IS_ERR(match) || !match) {
668 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
669 return match ? PTR_ERR(match) : -ENOENT;
671 m->u.kernel.match = match;
673 ret = check_match(m, name, ipv6, hookmask, i);
679 module_put(m->u.kernel.match->me);
683 static inline int check_target(struct ip6t_entry *e, const char *name)
685 struct ip6t_entry_target *t;
686 struct xt_target *target;
689 t = ip6t_get_target(e);
690 target = t->u.kernel.target;
691 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
692 name, e->comefrom, e->ipv6.proto,
693 e->ipv6.invflags & IP6T_INV_PROTO);
694 if (!ret && t->u.kernel.target->checkentry
695 && !t->u.kernel.target->checkentry(name, e, target, t->data,
697 duprintf("ip_tables: check failed for `%s'.\n",
698 t->u.kernel.target->name);
705 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
708 struct ip6t_entry_target *t;
709 struct xt_target *target;
713 ret = check_entry(e, name);
718 ret = IP6T_MATCH_ITERATE(e, find_check_match, name, &e->ipv6,
721 goto cleanup_matches;
723 t = ip6t_get_target(e);
724 target = try_then_request_module(xt_find_target(AF_INET6,
727 "ip6t_%s", t->u.user.name);
728 if (IS_ERR(target) || !target) {
729 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
730 ret = target ? PTR_ERR(target) : -ENOENT;
731 goto cleanup_matches;
733 t->u.kernel.target = target;
735 ret = check_target(e, name);
742 module_put(t->u.kernel.target->me);
744 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
749 check_entry_size_and_hooks(struct ip6t_entry *e,
750 struct xt_table_info *newinfo,
752 unsigned char *limit,
753 const unsigned int *hook_entries,
754 const unsigned int *underflows,
759 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
760 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
761 duprintf("Bad offset %p\n", e);
766 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
767 duprintf("checking: element %p size %u\n",
772 /* Check hooks & underflows */
773 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h])
777 newinfo->underflow[h] = underflows[h];
780 /* FIXME: underflows must be unconditional, standard verdicts
781 < 0 (not IP6T_RETURN). --RR */
783 /* Clear counters and comefrom */
784 e->counters = ((struct xt_counters) { 0, 0 });
792 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
794 struct ip6t_entry_target *t;
796 if (i && (*i)-- == 0)
799 /* Cleanup all matches */
800 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
801 t = ip6t_get_target(e);
802 if (t->u.kernel.target->destroy)
803 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
804 module_put(t->u.kernel.target->me);
808 /* Checks and translates the user-supplied table segment (held in
811 translate_table(const char *name,
812 unsigned int valid_hooks,
813 struct xt_table_info *newinfo,
817 const unsigned int *hook_entries,
818 const unsigned int *underflows)
823 newinfo->size = size;
824 newinfo->number = number;
826 /* Init all hooks to impossible value. */
827 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
828 newinfo->hook_entry[i] = 0xFFFFFFFF;
829 newinfo->underflow[i] = 0xFFFFFFFF;
832 duprintf("translate_table: size %u\n", newinfo->size);
834 /* Walk through entries, checking offsets. */
835 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
836 check_entry_size_and_hooks,
840 hook_entries, underflows, &i);
845 duprintf("translate_table: %u not %u entries\n",
850 /* Check hooks all assigned */
851 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
852 /* Only hooks which are valid */
853 if (!(valid_hooks & (1 << i)))
855 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
856 duprintf("Invalid hook entry %u %u\n",
860 if (newinfo->underflow[i] == 0xFFFFFFFF) {
861 duprintf("Invalid underflow %u %u\n",
867 if (!mark_source_chains(newinfo, valid_hooks, entry0))
870 /* Finally, each sanity check must pass */
872 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
873 find_check_entry, name, size, &i);
876 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
881 /* And one copy for every other CPU */
882 for_each_possible_cpu(i) {
883 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
884 memcpy(newinfo->entries[i], entry0, newinfo->size);
892 add_entry_to_counter(const struct ip6t_entry *e,
893 struct xt_counters total[],
896 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
903 set_entry_to_counter(const struct ip6t_entry *e,
904 struct ip6t_counters total[],
907 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
914 get_counters(const struct xt_table_info *t,
915 struct xt_counters counters[])
921 /* Instead of clearing (by a previous call to memset())
922 * the counters and using adds, we set the counters
923 * with data used by 'current' CPU
924 * We dont care about preemption here.
926 curcpu = raw_smp_processor_id();
929 IP6T_ENTRY_ITERATE(t->entries[curcpu],
931 set_entry_to_counter,
935 for_each_possible_cpu(cpu) {
939 IP6T_ENTRY_ITERATE(t->entries[cpu],
941 add_entry_to_counter,
947 static inline struct xt_counters *alloc_counters(struct xt_table *table)
949 unsigned int countersize;
950 struct xt_counters *counters;
951 struct xt_table_info *private = table->private;
953 /* We need atomic snapshot of counters: rest doesn't change
954 (other than comefrom, which userspace doesn't care
956 countersize = sizeof(struct xt_counters) * private->number;
957 counters = vmalloc_node(countersize, numa_node_id());
959 if (counters == NULL)
960 return ERR_PTR(-ENOMEM);
962 /* First, sum counters... */
963 write_lock_bh(&table->lock);
964 get_counters(private, counters);
965 write_unlock_bh(&table->lock);
971 copy_entries_to_user(unsigned int total_size,
972 struct xt_table *table,
973 void __user *userptr)
975 unsigned int off, num;
976 struct ip6t_entry *e;
977 struct xt_counters *counters;
978 struct xt_table_info *private = table->private;
982 counters = alloc_counters(table);
983 if (IS_ERR(counters))
984 return PTR_ERR(counters);
986 /* choose the copy that is on our node/cpu, ...
987 * This choice is lazy (because current thread is
988 * allowed to migrate to another cpu)
990 loc_cpu_entry = private->entries[raw_smp_processor_id()];
991 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
996 /* FIXME: use iterator macros --RR */
997 /* ... then go back and fix counters and names */
998 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1000 struct ip6t_entry_match *m;
1001 struct ip6t_entry_target *t;
1003 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1004 if (copy_to_user(userptr + off
1005 + offsetof(struct ip6t_entry, counters),
1007 sizeof(counters[num])) != 0) {
1012 for (i = sizeof(struct ip6t_entry);
1013 i < e->target_offset;
1014 i += m->u.match_size) {
1017 if (copy_to_user(userptr + off + i
1018 + offsetof(struct ip6t_entry_match,
1020 m->u.kernel.match->name,
1021 strlen(m->u.kernel.match->name)+1)
1028 t = ip6t_get_target(e);
1029 if (copy_to_user(userptr + off + e->target_offset
1030 + offsetof(struct ip6t_entry_target,
1032 t->u.kernel.target->name,
1033 strlen(t->u.kernel.target->name)+1) != 0) {
1044 #ifdef CONFIG_COMPAT
1045 static void compat_standard_from_user(void *dst, void *src)
1047 int v = *(compat_int_t *)src;
1050 v += xt_compat_calc_jump(AF_INET6, v);
1051 memcpy(dst, &v, sizeof(v));
1054 static int compat_standard_to_user(void __user *dst, void *src)
1056 compat_int_t cv = *(int *)src;
1059 cv -= xt_compat_calc_jump(AF_INET6, cv);
1060 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1064 compat_calc_match(struct ip6t_entry_match *m, int *size)
1066 *size += xt_compat_match_offset(m->u.kernel.match);
1070 static int compat_calc_entry(struct ip6t_entry *e,
1071 const struct xt_table_info *info,
1072 void *base, struct xt_table_info *newinfo)
1074 struct ip6t_entry_target *t;
1075 unsigned int entry_offset;
1078 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1079 entry_offset = (void *)e - base;
1080 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1081 t = ip6t_get_target(e);
1082 off += xt_compat_target_offset(t->u.kernel.target);
1083 newinfo->size -= off;
1084 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1088 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1089 if (info->hook_entry[i] &&
1090 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1091 newinfo->hook_entry[i] -= off;
1092 if (info->underflow[i] &&
1093 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1094 newinfo->underflow[i] -= off;
1099 static int compat_table_info(const struct xt_table_info *info,
1100 struct xt_table_info *newinfo)
1102 void *loc_cpu_entry;
1104 if (!newinfo || !info)
1107 /* we dont care about newinfo->entries[] */
1108 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1109 newinfo->initial_entries = 0;
1110 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1111 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1112 compat_calc_entry, info, loc_cpu_entry,
1117 static int get_info(void __user *user, int *len, int compat)
1119 char name[IP6T_TABLE_MAXNAMELEN];
1123 if (*len != sizeof(struct ip6t_getinfo)) {
1124 duprintf("length %u != %zu\n", *len,
1125 sizeof(struct ip6t_getinfo));
1129 if (copy_from_user(name, user, sizeof(name)) != 0)
1132 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1133 #ifdef CONFIG_COMPAT
1135 xt_compat_lock(AF_INET6);
1137 t = try_then_request_module(xt_find_table_lock(AF_INET6, name),
1138 "ip6table_%s", name);
1139 if (t && !IS_ERR(t)) {
1140 struct ip6t_getinfo info;
1141 struct xt_table_info *private = t->private;
1143 #ifdef CONFIG_COMPAT
1145 struct xt_table_info tmp;
1146 ret = compat_table_info(private, &tmp);
1147 xt_compat_flush_offsets(AF_INET6);
1151 info.valid_hooks = t->valid_hooks;
1152 memcpy(info.hook_entry, private->hook_entry,
1153 sizeof(info.hook_entry));
1154 memcpy(info.underflow, private->underflow,
1155 sizeof(info.underflow));
1156 info.num_entries = private->number;
1157 info.size = private->size;
1158 strcpy(info.name, name);
1160 if (copy_to_user(user, &info, *len) != 0)
1168 ret = t ? PTR_ERR(t) : -ENOENT;
1169 #ifdef CONFIG_COMPAT
1171 xt_compat_unlock(AF_INET6);
1177 get_entries(struct ip6t_get_entries __user *uptr, int *len)
1180 struct ip6t_get_entries get;
1183 if (*len < sizeof(get)) {
1184 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1187 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1189 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1190 duprintf("get_entries: %u != %zu\n",
1191 *len, sizeof(get) + get.size);
1195 t = xt_find_table_lock(AF_INET6, get.name);
1196 if (t && !IS_ERR(t)) {
1197 struct xt_table_info *private = t->private;
1198 duprintf("t->private->number = %u\n", private->number);
1199 if (get.size == private->size)
1200 ret = copy_entries_to_user(private->size,
1201 t, uptr->entrytable);
1203 duprintf("get_entries: I've got %u not %u!\n",
1204 private->size, get.size);
1210 ret = t ? PTR_ERR(t) : -ENOENT;
1216 __do_replace(const char *name, unsigned int valid_hooks,
1217 struct xt_table_info *newinfo, unsigned int num_counters,
1218 void __user *counters_ptr)
1222 struct xt_table_info *oldinfo;
1223 struct xt_counters *counters;
1224 void *loc_cpu_old_entry;
1227 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1234 t = try_then_request_module(xt_find_table_lock(AF_INET6, name),
1235 "ip6table_%s", name);
1236 if (!t || IS_ERR(t)) {
1237 ret = t ? PTR_ERR(t) : -ENOENT;
1238 goto free_newinfo_counters_untrans;
1242 if (valid_hooks != t->valid_hooks) {
1243 duprintf("Valid hook crap: %08X vs %08X\n",
1244 valid_hooks, t->valid_hooks);
1249 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1253 /* Update module usage count based on number of rules */
1254 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1255 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1256 if ((oldinfo->number > oldinfo->initial_entries) ||
1257 (newinfo->number <= oldinfo->initial_entries))
1259 if ((oldinfo->number > oldinfo->initial_entries) &&
1260 (newinfo->number <= oldinfo->initial_entries))
1263 /* Get the old counters. */
1264 get_counters(oldinfo, counters);
1265 /* Decrease module usage counts and free resource */
1266 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1267 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1269 xt_free_table_info(oldinfo);
1270 if (copy_to_user(counters_ptr, counters,
1271 sizeof(struct xt_counters) * num_counters) != 0)
1280 free_newinfo_counters_untrans:
1287 do_replace(void __user *user, unsigned int len)
1290 struct ip6t_replace tmp;
1291 struct xt_table_info *newinfo;
1292 void *loc_cpu_entry;
1294 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1297 /* overflow check */
1298 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1301 newinfo = xt_alloc_table_info(tmp.size);
1305 /* choose the copy that is on our node/cpu */
1306 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1307 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1313 ret = translate_table(tmp.name, tmp.valid_hooks,
1314 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1315 tmp.hook_entry, tmp.underflow);
1319 duprintf("ip_tables: Translated table\n");
1321 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1322 tmp.num_counters, tmp.counters);
1324 goto free_newinfo_untrans;
1327 free_newinfo_untrans:
1328 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1330 xt_free_table_info(newinfo);
1334 /* We're lazy, and add to the first CPU; overflow works its fey magic
1335 * and everything is OK. */
1337 add_counter_to_entry(struct ip6t_entry *e,
1338 const struct xt_counters addme[],
1342 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1344 (long unsigned int)e->counters.pcnt,
1345 (long unsigned int)e->counters.bcnt,
1346 (long unsigned int)addme[*i].pcnt,
1347 (long unsigned int)addme[*i].bcnt);
1350 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1357 do_add_counters(void __user *user, unsigned int len, int compat)
1360 struct xt_counters_info tmp;
1361 struct xt_counters *paddc;
1362 unsigned int num_counters;
1367 struct xt_table_info *private;
1369 void *loc_cpu_entry;
1370 #ifdef CONFIG_COMPAT
1371 struct compat_xt_counters_info compat_tmp;
1375 size = sizeof(struct compat_xt_counters_info);
1380 size = sizeof(struct xt_counters_info);
1383 if (copy_from_user(ptmp, user, size) != 0)
1386 #ifdef CONFIG_COMPAT
1388 num_counters = compat_tmp.num_counters;
1389 name = compat_tmp.name;
1393 num_counters = tmp.num_counters;
1397 if (len != size + num_counters * sizeof(struct xt_counters))
1400 paddc = vmalloc_node(len - size, numa_node_id());
1404 if (copy_from_user(paddc, user + size, len - size) != 0) {
1409 t = xt_find_table_lock(AF_INET6, name);
1410 if (!t || IS_ERR(t)) {
1411 ret = t ? PTR_ERR(t) : -ENOENT;
1415 write_lock_bh(&t->lock);
1416 private = t->private;
1417 if (private->number != num_counters) {
1419 goto unlock_up_free;
1423 /* Choose the copy that is on our node */
1424 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1425 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1427 add_counter_to_entry,
1431 write_unlock_bh(&t->lock);
1440 #ifdef CONFIG_COMPAT
1441 struct compat_ip6t_replace {
1442 char name[IP6T_TABLE_MAXNAMELEN];
1446 u32 hook_entry[NF_INET_NUMHOOKS];
1447 u32 underflow[NF_INET_NUMHOOKS];
1449 compat_uptr_t counters; /* struct ip6t_counters * */
1450 struct compat_ip6t_entry entries[0];
1454 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1455 compat_uint_t *size, struct xt_counters *counters,
1458 struct ip6t_entry_target *t;
1459 struct compat_ip6t_entry __user *ce;
1460 u_int16_t target_offset, next_offset;
1461 compat_uint_t origsize;
1466 ce = (struct compat_ip6t_entry __user *)*dstptr;
1467 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1470 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1473 *dstptr += sizeof(struct compat_ip6t_entry);
1474 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1476 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1477 target_offset = e->target_offset - (origsize - *size);
1480 t = ip6t_get_target(e);
1481 ret = xt_compat_target_to_user(t, dstptr, size);
1485 next_offset = e->next_offset - (origsize - *size);
1486 if (put_user(target_offset, &ce->target_offset))
1488 if (put_user(next_offset, &ce->next_offset))
1498 compat_find_calc_match(struct ip6t_entry_match *m,
1500 const struct ip6t_ip6 *ipv6,
1501 unsigned int hookmask,
1504 struct xt_match *match;
1506 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1507 m->u.user.revision),
1508 "ip6t_%s", m->u.user.name);
1509 if (IS_ERR(match) || !match) {
1510 duprintf("compat_check_calc_match: `%s' not found\n",
1512 return match ? PTR_ERR(match) : -ENOENT;
1514 m->u.kernel.match = match;
1515 *size += xt_compat_match_offset(match);
1522 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1524 if (i && (*i)-- == 0)
1527 module_put(m->u.kernel.match->me);
1532 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1534 struct ip6t_entry_target *t;
1536 if (i && (*i)-- == 0)
1539 /* Cleanup all matches */
1540 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1541 t = compat_ip6t_get_target(e);
1542 module_put(t->u.kernel.target->me);
1547 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1548 struct xt_table_info *newinfo,
1550 unsigned char *base,
1551 unsigned char *limit,
1552 unsigned int *hook_entries,
1553 unsigned int *underflows,
1557 struct ip6t_entry_target *t;
1558 struct xt_target *target;
1559 unsigned int entry_offset;
1562 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1563 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1564 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1565 duprintf("Bad offset %p, limit = %p\n", e, limit);
1569 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1570 sizeof(struct compat_xt_entry_target)) {
1571 duprintf("checking: element %p size %u\n",
1576 /* For purposes of check_entry casting the compat entry is fine */
1577 ret = check_entry((struct ip6t_entry *)e, name);
1581 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1582 entry_offset = (void *)e - (void *)base;
1584 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1585 &e->ipv6, e->comefrom, &off, &j);
1587 goto release_matches;
1589 t = compat_ip6t_get_target(e);
1590 target = try_then_request_module(xt_find_target(AF_INET6,
1592 t->u.user.revision),
1593 "ip6t_%s", t->u.user.name);
1594 if (IS_ERR(target) || !target) {
1595 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1597 ret = target ? PTR_ERR(target) : -ENOENT;
1598 goto release_matches;
1600 t->u.kernel.target = target;
1602 off += xt_compat_target_offset(target);
1604 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1608 /* Check hooks & underflows */
1609 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1610 if ((unsigned char *)e - base == hook_entries[h])
1611 newinfo->hook_entry[h] = hook_entries[h];
1612 if ((unsigned char *)e - base == underflows[h])
1613 newinfo->underflow[h] = underflows[h];
1616 /* Clear counters and comefrom */
1617 memset(&e->counters, 0, sizeof(e->counters));
1624 module_put(t->u.kernel.target->me);
1626 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1631 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1632 unsigned int *size, const char *name,
1633 struct xt_table_info *newinfo, unsigned char *base)
1635 struct ip6t_entry_target *t;
1636 struct xt_target *target;
1637 struct ip6t_entry *de;
1638 unsigned int origsize;
1643 de = (struct ip6t_entry *)*dstptr;
1644 memcpy(de, e, sizeof(struct ip6t_entry));
1645 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1647 *dstptr += sizeof(struct ip6t_entry);
1648 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1650 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1654 de->target_offset = e->target_offset - (origsize - *size);
1655 t = compat_ip6t_get_target(e);
1656 target = t->u.kernel.target;
1657 xt_compat_target_from_user(t, dstptr, size);
1659 de->next_offset = e->next_offset - (origsize - *size);
1660 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1661 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1662 newinfo->hook_entry[h] -= origsize - *size;
1663 if ((unsigned char *)de - base < newinfo->underflow[h])
1664 newinfo->underflow[h] -= origsize - *size;
1669 static inline int compat_check_entry(struct ip6t_entry *e, const char *name,
1675 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6,
1678 goto cleanup_matches;
1680 ret = check_target(e, name);
1682 goto cleanup_matches;
1688 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1693 translate_compat_table(const char *name,
1694 unsigned int valid_hooks,
1695 struct xt_table_info **pinfo,
1697 unsigned int total_size,
1698 unsigned int number,
1699 unsigned int *hook_entries,
1700 unsigned int *underflows)
1703 struct xt_table_info *newinfo, *info;
1704 void *pos, *entry0, *entry1;
1711 info->number = number;
1713 /* Init all hooks to impossible value. */
1714 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1715 info->hook_entry[i] = 0xFFFFFFFF;
1716 info->underflow[i] = 0xFFFFFFFF;
1719 duprintf("translate_compat_table: size %u\n", info->size);
1721 xt_compat_lock(AF_INET6);
1722 /* Walk through entries, checking offsets. */
1723 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1724 check_compat_entry_size_and_hooks,
1725 info, &size, entry0,
1726 entry0 + total_size,
1727 hook_entries, underflows, &j, name);
1733 duprintf("translate_compat_table: %u not %u entries\n",
1738 /* Check hooks all assigned */
1739 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1740 /* Only hooks which are valid */
1741 if (!(valid_hooks & (1 << i)))
1743 if (info->hook_entry[i] == 0xFFFFFFFF) {
1744 duprintf("Invalid hook entry %u %u\n",
1745 i, hook_entries[i]);
1748 if (info->underflow[i] == 0xFFFFFFFF) {
1749 duprintf("Invalid underflow %u %u\n",
1756 newinfo = xt_alloc_table_info(size);
1760 newinfo->number = number;
1761 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1762 newinfo->hook_entry[i] = info->hook_entry[i];
1763 newinfo->underflow[i] = info->underflow[i];
1765 entry1 = newinfo->entries[raw_smp_processor_id()];
1768 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1769 compat_copy_entry_from_user,
1770 &pos, &size, name, newinfo, entry1);
1771 xt_compat_flush_offsets(AF_INET6);
1772 xt_compat_unlock(AF_INET6);
1777 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1781 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1785 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1786 compat_release_entry, &j);
1787 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1788 xt_free_table_info(newinfo);
1792 /* And one copy for every other CPU */
1793 for_each_possible_cpu(i)
1794 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1795 memcpy(newinfo->entries[i], entry1, newinfo->size);
1799 xt_free_table_info(info);
1803 xt_free_table_info(newinfo);
1805 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1808 xt_compat_flush_offsets(AF_INET6);
1809 xt_compat_unlock(AF_INET6);
1814 compat_do_replace(void __user *user, unsigned int len)
1817 struct compat_ip6t_replace tmp;
1818 struct xt_table_info *newinfo;
1819 void *loc_cpu_entry;
1821 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1824 /* overflow check */
1825 if (tmp.size >= INT_MAX / num_possible_cpus())
1827 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1830 newinfo = xt_alloc_table_info(tmp.size);
1834 /* choose the copy that is on our node/cpu */
1835 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1836 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1842 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1843 &newinfo, &loc_cpu_entry, tmp.size,
1844 tmp.num_entries, tmp.hook_entry,
1849 duprintf("compat_do_replace: Translated table\n");
1851 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1852 tmp.num_counters, compat_ptr(tmp.counters));
1854 goto free_newinfo_untrans;
1857 free_newinfo_untrans:
1858 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1860 xt_free_table_info(newinfo);
1865 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1870 if (!capable(CAP_NET_ADMIN))
1874 case IP6T_SO_SET_REPLACE:
1875 ret = compat_do_replace(user, len);
1878 case IP6T_SO_SET_ADD_COUNTERS:
1879 ret = do_add_counters(user, len, 1);
1883 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1890 struct compat_ip6t_get_entries {
1891 char name[IP6T_TABLE_MAXNAMELEN];
1893 struct compat_ip6t_entry entrytable[0];
1897 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1898 void __user *userptr)
1900 struct xt_counters *counters;
1901 struct xt_table_info *private = table->private;
1905 void *loc_cpu_entry;
1908 counters = alloc_counters(table);
1909 if (IS_ERR(counters))
1910 return PTR_ERR(counters);
1912 /* choose the copy that is on our node/cpu, ...
1913 * This choice is lazy (because current thread is
1914 * allowed to migrate to another cpu)
1916 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1919 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1920 compat_copy_entry_to_user,
1921 &pos, &size, counters, &i);
1928 compat_get_entries(struct compat_ip6t_get_entries __user *uptr, int *len)
1931 struct compat_ip6t_get_entries get;
1934 if (*len < sizeof(get)) {
1935 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1939 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1942 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1943 duprintf("compat_get_entries: %u != %zu\n",
1944 *len, sizeof(get) + get.size);
1948 xt_compat_lock(AF_INET6);
1949 t = xt_find_table_lock(AF_INET6, get.name);
1950 if (t && !IS_ERR(t)) {
1951 struct xt_table_info *private = t->private;
1952 struct xt_table_info info;
1953 duprintf("t->private->number = %u\n", private->number);
1954 ret = compat_table_info(private, &info);
1955 if (!ret && get.size == info.size) {
1956 ret = compat_copy_entries_to_user(private->size,
1957 t, uptr->entrytable);
1959 duprintf("compat_get_entries: I've got %u not %u!\n",
1960 private->size, get.size);
1963 xt_compat_flush_offsets(AF_INET6);
1967 ret = t ? PTR_ERR(t) : -ENOENT;
1969 xt_compat_unlock(AF_INET6);
1973 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1976 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1980 if (!capable(CAP_NET_ADMIN))
1984 case IP6T_SO_GET_INFO:
1985 ret = get_info(user, len, 1);
1987 case IP6T_SO_GET_ENTRIES:
1988 ret = compat_get_entries(user, len);
1991 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1998 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2002 if (!capable(CAP_NET_ADMIN))
2006 case IP6T_SO_SET_REPLACE:
2007 ret = do_replace(user, len);
2010 case IP6T_SO_SET_ADD_COUNTERS:
2011 ret = do_add_counters(user, len, 0);
2015 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2023 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2027 if (!capable(CAP_NET_ADMIN))
2031 case IP6T_SO_GET_INFO:
2032 ret = get_info(user, len, 0);
2035 case IP6T_SO_GET_ENTRIES:
2036 ret = get_entries(user, len);
2039 case IP6T_SO_GET_REVISION_MATCH:
2040 case IP6T_SO_GET_REVISION_TARGET: {
2041 struct ip6t_get_revision rev;
2044 if (*len != sizeof(rev)) {
2048 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2053 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2058 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2061 "ip6t_%s", rev.name);
2066 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2073 int ip6t_register_table(struct xt_table *table, const struct ip6t_replace *repl)
2076 struct xt_table_info *newinfo;
2077 struct xt_table_info bootstrap
2078 = { 0, 0, 0, { 0 }, { 0 }, { } };
2079 void *loc_cpu_entry;
2081 newinfo = xt_alloc_table_info(repl->size);
2085 /* choose the copy on our node/cpu, but dont care about preemption */
2086 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2087 memcpy(loc_cpu_entry, repl->entries, repl->size);
2089 ret = translate_table(table->name, table->valid_hooks,
2090 newinfo, loc_cpu_entry, repl->size,
2095 xt_free_table_info(newinfo);
2099 ret = xt_register_table(table, &bootstrap, newinfo);
2101 xt_free_table_info(newinfo);
2108 void ip6t_unregister_table(struct xt_table *table)
2110 struct xt_table_info *private;
2111 void *loc_cpu_entry;
2113 private = xt_unregister_table(table);
2115 /* Decrease module usage counts and free resources */
2116 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2117 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2118 xt_free_table_info(private);
2121 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2123 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2124 u_int8_t type, u_int8_t code,
2127 return (type == test_type && code >= min_code && code <= max_code)
2132 icmp6_match(const struct sk_buff *skb,
2133 const struct net_device *in,
2134 const struct net_device *out,
2135 const struct xt_match *match,
2136 const void *matchinfo,
2138 unsigned int protoff,
2141 struct icmp6hdr _icmph, *ic;
2142 const struct ip6t_icmp *icmpinfo = matchinfo;
2144 /* Must not be a fragment. */
2148 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2150 /* We've been asked to examine this packet, and we
2151 * can't. Hence, no choice but to drop.
2153 duprintf("Dropping evil ICMP tinygram.\n");
2158 return icmp6_type_code_match(icmpinfo->type,
2161 ic->icmp6_type, ic->icmp6_code,
2162 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2165 /* Called when user tries to insert an entry of this type. */
2167 icmp6_checkentry(const char *tablename,
2169 const struct xt_match *match,
2171 unsigned int hook_mask)
2173 const struct ip6t_icmp *icmpinfo = matchinfo;
2175 /* Must specify no unknown invflags */
2176 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2179 /* The built-in targets: standard (NULL) and error. */
2180 static struct xt_target ip6t_standard_target __read_mostly = {
2181 .name = IP6T_STANDARD_TARGET,
2182 .targetsize = sizeof(int),
2184 #ifdef CONFIG_COMPAT
2185 .compatsize = sizeof(compat_int_t),
2186 .compat_from_user = compat_standard_from_user,
2187 .compat_to_user = compat_standard_to_user,
2191 static struct xt_target ip6t_error_target __read_mostly = {
2192 .name = IP6T_ERROR_TARGET,
2193 .target = ip6t_error,
2194 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2198 static struct nf_sockopt_ops ip6t_sockopts = {
2200 .set_optmin = IP6T_BASE_CTL,
2201 .set_optmax = IP6T_SO_SET_MAX+1,
2202 .set = do_ip6t_set_ctl,
2203 #ifdef CONFIG_COMPAT
2204 .compat_set = compat_do_ip6t_set_ctl,
2206 .get_optmin = IP6T_BASE_CTL,
2207 .get_optmax = IP6T_SO_GET_MAX+1,
2208 .get = do_ip6t_get_ctl,
2209 #ifdef CONFIG_COMPAT
2210 .compat_get = compat_do_ip6t_get_ctl,
2212 .owner = THIS_MODULE,
2215 static struct xt_match icmp6_matchstruct __read_mostly = {
2217 .match = icmp6_match,
2218 .matchsize = sizeof(struct ip6t_icmp),
2219 .checkentry = icmp6_checkentry,
2220 .proto = IPPROTO_ICMPV6,
2224 static int __init ip6_tables_init(void)
2228 ret = xt_proto_init(AF_INET6);
2232 /* Noone else will be downing sem now, so we won't sleep */
2233 ret = xt_register_target(&ip6t_standard_target);
2236 ret = xt_register_target(&ip6t_error_target);
2239 ret = xt_register_match(&icmp6_matchstruct);
2243 /* Register setsockopt */
2244 ret = nf_register_sockopt(&ip6t_sockopts);
2248 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2252 xt_unregister_match(&icmp6_matchstruct);
2254 xt_unregister_target(&ip6t_error_target);
2256 xt_unregister_target(&ip6t_standard_target);
2258 xt_proto_fini(AF_INET6);
2263 static void __exit ip6_tables_fini(void)
2265 nf_unregister_sockopt(&ip6t_sockopts);
2267 xt_unregister_match(&icmp6_matchstruct);
2268 xt_unregister_target(&ip6t_error_target);
2269 xt_unregister_target(&ip6t_standard_target);
2270 xt_proto_fini(AF_INET6);
2274 * find the offset to specified header or the protocol number of last header
2275 * if target < 0. "last header" is transport protocol header, ESP, or
2278 * If target header is found, its offset is set in *offset and return protocol
2279 * number. Otherwise, return -1.
2281 * If the first fragment doesn't contain the final protocol header or
2282 * NEXTHDR_NONE it is considered invalid.
2284 * Note that non-1st fragment is special case that "the protocol number
2285 * of last header" is "next header" field in Fragment header. In this case,
2286 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2290 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2291 int target, unsigned short *fragoff)
2293 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2294 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2295 unsigned int len = skb->len - start;
2300 while (nexthdr != target) {
2301 struct ipv6_opt_hdr _hdr, *hp;
2302 unsigned int hdrlen;
2304 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2310 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2313 if (nexthdr == NEXTHDR_FRAGMENT) {
2314 unsigned short _frag_off;
2316 fp = skb_header_pointer(skb,
2317 start+offsetof(struct frag_hdr,
2324 _frag_off = ntohs(*fp) & ~0x7;
2327 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2328 hp->nexthdr == NEXTHDR_NONE)) {
2330 *fragoff = _frag_off;
2336 } else if (nexthdr == NEXTHDR_AUTH)
2337 hdrlen = (hp->hdrlen + 2) << 2;
2339 hdrlen = ipv6_optlen(hp);
2341 nexthdr = hp->nexthdr;
2350 EXPORT_SYMBOL(ip6t_register_table);
2351 EXPORT_SYMBOL(ip6t_unregister_table);
2352 EXPORT_SYMBOL(ip6t_do_table);
2353 EXPORT_SYMBOL(ip6t_ext_hdr);
2354 EXPORT_SYMBOL(ipv6_find_hdr);
2356 module_init(ip6_tables_init);
2357 module_exit(ip6_tables_fini);