2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
109 &ip6info->src), IP6T_INV_SRCIP)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
111 &ip6info->dst), IP6T_INV_DSTIP)) {
112 dprintf("Source or dest mismatch.\n");
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev, ip6info->iniface,
133 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev, ip6info->outiface,
146 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
150 /* ... might want to do something with class and flowlabel here ... */
152 /* look for the desired protocol header */
153 if((ip6info->flags & IP6T_F_PROTO)) {
155 unsigned short _frag_off;
157 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
163 *fragoff = _frag_off;
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
167 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
170 if (ip6info->proto == protohdr) {
171 if(ip6info->invflags & IP6T_INV_PROTO) {
177 /* We need match for the '-p all', too! */
178 if ((ip6info->proto != 0) &&
179 !(ip6info->invflags & IP6T_INV_PROTO))
185 /* should be ip6 safe */
187 ip6_checkentry(const struct ip6t_ip6 *ipv6)
189 if (ipv6->flags & ~IP6T_F_MASK) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6->flags & ~IP6T_F_MASK);
194 if (ipv6->invflags & ~IP6T_INV_MASK) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6->invflags & ~IP6T_INV_MASK);
203 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
206 printk("ip6_tables: error: `%s'\n",
207 (const char *)par->targinfo);
212 /* Performance critical - called for every packet */
214 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
215 struct xt_match_param *par)
217 par->match = m->u.kernel.match;
218 par->matchinfo = m->data;
220 /* Stop iteration if it doesn't match */
221 if (!m->u.kernel.match->match(skb, par))
227 static inline struct ip6t_entry *
228 get_entry(void *base, unsigned int offset)
230 return (struct ip6t_entry *)(base + offset);
233 /* All zeroes == unconditional rule. */
234 /* Mildly perf critical (only if packet tracing is on) */
236 unconditional(const struct ip6t_ip6 *ipv6)
240 for (i = 0; i < sizeof(*ipv6); i++)
241 if (((char *)ipv6)[i])
244 return (i == sizeof(*ipv6));
247 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
248 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
249 /* This cries for unification! */
250 static const char *const hooknames[] = {
251 [NF_INET_PRE_ROUTING] = "PREROUTING",
252 [NF_INET_LOCAL_IN] = "INPUT",
253 [NF_INET_FORWARD] = "FORWARD",
254 [NF_INET_LOCAL_OUT] = "OUTPUT",
255 [NF_INET_POST_ROUTING] = "POSTROUTING",
258 enum nf_ip_trace_comments {
259 NF_IP6_TRACE_COMMENT_RULE,
260 NF_IP6_TRACE_COMMENT_RETURN,
261 NF_IP6_TRACE_COMMENT_POLICY,
264 static const char *const comments[] = {
265 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
266 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
267 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
270 static struct nf_loginfo trace_loginfo = {
271 .type = NF_LOG_TYPE_LOG,
275 .logflags = NF_LOG_MASK,
280 /* Mildly perf critical (only if packet tracing is on) */
282 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
283 char *hookname, char **chainname,
284 char **comment, unsigned int *rulenum)
286 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
288 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
289 /* Head of user chain: ERROR target with chainname */
290 *chainname = t->target.data;
295 if (s->target_offset == sizeof(struct ip6t_entry)
296 && strcmp(t->target.u.kernel.target->name,
297 IP6T_STANDARD_TARGET) == 0
299 && unconditional(&s->ipv6)) {
300 /* Tail of chains: STANDARD target (return/policy) */
301 *comment = *chainname == hookname
302 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
303 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
312 static void trace_packet(struct sk_buff *skb,
314 const struct net_device *in,
315 const struct net_device *out,
316 const char *tablename,
317 struct xt_table_info *private,
318 struct ip6t_entry *e)
321 const struct ip6t_entry *root;
322 char *hookname, *chainname, *comment;
323 unsigned int rulenum = 0;
325 table_base = (void *)private->entries[smp_processor_id()];
326 root = get_entry(table_base, private->hook_entry[hook]);
328 hookname = chainname = (char *)hooknames[hook];
329 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
331 IP6T_ENTRY_ITERATE(root,
332 private->size - private->hook_entry[hook],
333 get_chainname_rulenum,
334 e, hookname, &chainname, &comment, &rulenum);
336 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
337 "TRACE: %s:%s:%s:%u ",
338 tablename, chainname, comment, rulenum);
342 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
344 ip6t_do_table(struct sk_buff *skb,
346 const struct net_device *in,
347 const struct net_device *out,
348 struct xt_table *table)
350 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
351 bool hotdrop = false;
352 /* Initializing verdict to NF_DROP keeps gcc happy. */
353 unsigned int verdict = NF_DROP;
354 const char *indev, *outdev;
356 struct ip6t_entry *e, *back;
357 struct xt_table_info *private;
358 struct xt_match_param mtpar;
359 struct xt_target_param tgpar;
362 indev = in ? in->name : nulldevname;
363 outdev = out ? out->name : nulldevname;
364 /* We handle fragments by dealing with the first fragment as
365 * if it was a normal packet. All other fragments are treated
366 * normally, except that they will NEVER match rules that ask
367 * things we don't know, ie. tcp syn flag or ports). If the
368 * rule is also a fragment-specific rule, non-fragments won't
370 mtpar.hotdrop = &hotdrop;
371 mtpar.in = tgpar.in = in;
372 mtpar.out = tgpar.out = out;
373 tgpar.hooknum = hook;
375 read_lock_bh(&table->lock);
376 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
377 private = table->private;
378 table_base = (void *)private->entries[smp_processor_id()];
379 e = get_entry(table_base, private->hook_entry[hook]);
381 /* For return from builtin chain */
382 back = get_entry(table_base, private->underflow[hook]);
387 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
388 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
389 struct ip6t_entry_target *t;
391 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
394 ADD_COUNTER(e->counters,
395 ntohs(ipv6_hdr(skb)->payload_len) +
396 sizeof(struct ipv6hdr), 1);
398 t = ip6t_get_target(e);
399 IP_NF_ASSERT(t->u.kernel.target);
401 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
402 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
403 /* The packet is traced: log it */
404 if (unlikely(skb->nf_trace))
405 trace_packet(skb, hook, in, out,
406 table->name, private, e);
408 /* Standard target? */
409 if (!t->u.kernel.target->target) {
412 v = ((struct ip6t_standard_target *)t)->verdict;
414 /* Pop from stack? */
415 if (v != IP6T_RETURN) {
416 verdict = (unsigned)(-v) - 1;
420 back = get_entry(table_base,
424 if (table_base + v != (void *)e + e->next_offset
425 && !(e->ipv6.flags & IP6T_F_GOTO)) {
426 /* Save old back ptr in next entry */
427 struct ip6t_entry *next
428 = (void *)e + e->next_offset;
430 = (void *)back - table_base;
431 /* set back pointer to next entry */
435 e = get_entry(table_base, v);
437 /* Targets which reenter must return
439 tgpar.target = t->u.kernel.target;
440 tgpar.targinfo = t->data;
442 #ifdef CONFIG_NETFILTER_DEBUG
443 ((struct ip6t_entry *)table_base)->comefrom
446 verdict = t->u.kernel.target->target(skb,
449 #ifdef CONFIG_NETFILTER_DEBUG
450 if (((struct ip6t_entry *)table_base)->comefrom
452 && verdict == IP6T_CONTINUE) {
453 printk("Target %s reentered!\n",
454 t->u.kernel.target->name);
457 ((struct ip6t_entry *)table_base)->comefrom
460 if (verdict == IP6T_CONTINUE)
461 e = (void *)e + e->next_offset;
469 e = (void *)e + e->next_offset;
473 #ifdef CONFIG_NETFILTER_DEBUG
474 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
476 read_unlock_bh(&table->lock);
478 #ifdef DEBUG_ALLOW_ALL
487 /* Figures out from what hook each rule can be called: returns 0 if
488 there are loops. Puts hook bitmask in comefrom. */
490 mark_source_chains(struct xt_table_info *newinfo,
491 unsigned int valid_hooks, void *entry0)
495 /* No recursion; use packet counter to save back ptrs (reset
496 to 0 as we leave), and comefrom to save source hook bitmask */
497 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
498 unsigned int pos = newinfo->hook_entry[hook];
499 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
501 if (!(valid_hooks & (1 << hook)))
504 /* Set initial back pointer. */
505 e->counters.pcnt = pos;
508 struct ip6t_standard_target *t
509 = (void *)ip6t_get_target(e);
510 int visited = e->comefrom & (1 << hook);
512 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
513 printk("iptables: loop hook %u pos %u %08X.\n",
514 hook, pos, e->comefrom);
517 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
519 /* Unconditional return/END. */
520 if ((e->target_offset == sizeof(struct ip6t_entry)
521 && (strcmp(t->target.u.user.name,
522 IP6T_STANDARD_TARGET) == 0)
524 && unconditional(&e->ipv6)) || visited) {
525 unsigned int oldpos, size;
527 if (t->verdict < -NF_MAX_VERDICT - 1) {
528 duprintf("mark_source_chains: bad "
529 "negative verdict (%i)\n",
534 /* Return: backtrack through the last
537 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
538 #ifdef DEBUG_IP_FIREWALL_USER
540 & (1 << NF_INET_NUMHOOKS)) {
541 duprintf("Back unset "
548 pos = e->counters.pcnt;
549 e->counters.pcnt = 0;
551 /* We're at the start. */
555 e = (struct ip6t_entry *)
557 } while (oldpos == pos + e->next_offset);
560 size = e->next_offset;
561 e = (struct ip6t_entry *)
562 (entry0 + pos + size);
563 e->counters.pcnt = pos;
566 int newpos = t->verdict;
568 if (strcmp(t->target.u.user.name,
569 IP6T_STANDARD_TARGET) == 0
571 if (newpos > newinfo->size -
572 sizeof(struct ip6t_entry)) {
573 duprintf("mark_source_chains: "
574 "bad verdict (%i)\n",
578 /* This a jump; chase it. */
579 duprintf("Jump rule %u -> %u\n",
582 /* ... this is a fallthru */
583 newpos = pos + e->next_offset;
585 e = (struct ip6t_entry *)
587 e->counters.pcnt = pos;
592 duprintf("Finished chain %u\n", hook);
598 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
600 struct xt_mtdtor_param par;
602 if (i && (*i)-- == 0)
605 par.match = m->u.kernel.match;
606 par.matchinfo = m->data;
607 if (par.match->destroy != NULL)
608 par.match->destroy(&par);
609 module_put(par.match->me);
614 check_entry(struct ip6t_entry *e, const char *name)
616 struct ip6t_entry_target *t;
618 if (!ip6_checkentry(&e->ipv6)) {
619 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
623 if (e->target_offset + sizeof(struct ip6t_entry_target) >
627 t = ip6t_get_target(e);
628 if (e->target_offset + t->u.target_size > e->next_offset)
634 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
637 const struct ip6t_ip6 *ipv6 = par->entryinfo;
640 par->match = m->u.kernel.match;
641 par->matchinfo = m->data;
643 ret = xt_check_match(par, NFPROTO_IPV6, m->u.match_size - sizeof(*m),
644 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
646 duprintf("ip_tables: check failed for `%s'.\n",
655 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
658 struct xt_match *match;
661 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
663 "ip6t_%s", m->u.user.name);
664 if (IS_ERR(match) || !match) {
665 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
666 return match ? PTR_ERR(match) : -ENOENT;
668 m->u.kernel.match = match;
670 ret = check_match(m, par, i);
676 module_put(m->u.kernel.match->me);
680 static int check_target(struct ip6t_entry *e, const char *name)
682 struct ip6t_entry_target *t = ip6t_get_target(e);
683 struct xt_tgchk_param par = {
686 .target = t->u.kernel.target,
688 .hook_mask = e->comefrom,
692 t = ip6t_get_target(e);
693 ret = xt_check_target(&par, NFPROTO_IPV6, t->u.target_size - sizeof(*t),
694 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
696 duprintf("ip_tables: check failed for `%s'.\n",
697 t->u.kernel.target->name);
704 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
707 struct ip6t_entry_target *t;
708 struct xt_target *target;
711 struct xt_mtchk_param mtpar;
713 ret = check_entry(e, name);
719 mtpar.entryinfo = &e->ipv6;
720 mtpar.hook_mask = e->comefrom;
721 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
723 goto cleanup_matches;
725 t = ip6t_get_target(e);
726 target = try_then_request_module(xt_find_target(AF_INET6,
729 "ip6t_%s", t->u.user.name);
730 if (IS_ERR(target) || !target) {
731 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
732 ret = target ? PTR_ERR(target) : -ENOENT;
733 goto cleanup_matches;
735 t->u.kernel.target = target;
737 ret = check_target(e, name);
744 module_put(t->u.kernel.target->me);
746 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
751 check_entry_size_and_hooks(struct ip6t_entry *e,
752 struct xt_table_info *newinfo,
754 unsigned char *limit,
755 const unsigned int *hook_entries,
756 const unsigned int *underflows,
761 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
762 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
763 duprintf("Bad offset %p\n", e);
768 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
769 duprintf("checking: element %p size %u\n",
774 /* Check hooks & underflows */
775 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
776 if ((unsigned char *)e - base == hook_entries[h])
777 newinfo->hook_entry[h] = hook_entries[h];
778 if ((unsigned char *)e - base == underflows[h])
779 newinfo->underflow[h] = underflows[h];
782 /* FIXME: underflows must be unconditional, standard verdicts
783 < 0 (not IP6T_RETURN). --RR */
785 /* Clear counters and comefrom */
786 e->counters = ((struct xt_counters) { 0, 0 });
794 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
796 struct ip6t_entry_target *t;
798 if (i && (*i)-- == 0)
801 /* Cleanup all matches */
802 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
803 t = ip6t_get_target(e);
804 if (t->u.kernel.target->destroy)
805 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
806 module_put(t->u.kernel.target->me);
810 /* Checks and translates the user-supplied table segment (held in
813 translate_table(const char *name,
814 unsigned int valid_hooks,
815 struct xt_table_info *newinfo,
819 const unsigned int *hook_entries,
820 const unsigned int *underflows)
825 newinfo->size = size;
826 newinfo->number = number;
828 /* Init all hooks to impossible value. */
829 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
830 newinfo->hook_entry[i] = 0xFFFFFFFF;
831 newinfo->underflow[i] = 0xFFFFFFFF;
834 duprintf("translate_table: size %u\n", newinfo->size);
836 /* Walk through entries, checking offsets. */
837 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
838 check_entry_size_and_hooks,
842 hook_entries, underflows, &i);
847 duprintf("translate_table: %u not %u entries\n",
852 /* Check hooks all assigned */
853 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
854 /* Only hooks which are valid */
855 if (!(valid_hooks & (1 << i)))
857 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
858 duprintf("Invalid hook entry %u %u\n",
862 if (newinfo->underflow[i] == 0xFFFFFFFF) {
863 duprintf("Invalid underflow %u %u\n",
869 if (!mark_source_chains(newinfo, valid_hooks, entry0))
872 /* Finally, each sanity check must pass */
874 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
875 find_check_entry, name, size, &i);
878 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
883 /* And one copy for every other CPU */
884 for_each_possible_cpu(i) {
885 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
886 memcpy(newinfo->entries[i], entry0, newinfo->size);
894 add_entry_to_counter(const struct ip6t_entry *e,
895 struct xt_counters total[],
898 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
905 set_entry_to_counter(const struct ip6t_entry *e,
906 struct ip6t_counters total[],
909 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
916 get_counters(const struct xt_table_info *t,
917 struct xt_counters counters[])
923 /* Instead of clearing (by a previous call to memset())
924 * the counters and using adds, we set the counters
925 * with data used by 'current' CPU
926 * We dont care about preemption here.
928 curcpu = raw_smp_processor_id();
931 IP6T_ENTRY_ITERATE(t->entries[curcpu],
933 set_entry_to_counter,
937 for_each_possible_cpu(cpu) {
941 IP6T_ENTRY_ITERATE(t->entries[cpu],
943 add_entry_to_counter,
949 static struct xt_counters *alloc_counters(struct xt_table *table)
951 unsigned int countersize;
952 struct xt_counters *counters;
953 const struct xt_table_info *private = table->private;
955 /* We need atomic snapshot of counters: rest doesn't change
956 (other than comefrom, which userspace doesn't care
958 countersize = sizeof(struct xt_counters) * private->number;
959 counters = vmalloc_node(countersize, numa_node_id());
961 if (counters == NULL)
962 return ERR_PTR(-ENOMEM);
964 /* First, sum counters... */
965 write_lock_bh(&table->lock);
966 get_counters(private, counters);
967 write_unlock_bh(&table->lock);
973 copy_entries_to_user(unsigned int total_size,
974 struct xt_table *table,
975 void __user *userptr)
977 unsigned int off, num;
978 struct ip6t_entry *e;
979 struct xt_counters *counters;
980 const struct xt_table_info *private = table->private;
982 const void *loc_cpu_entry;
984 counters = alloc_counters(table);
985 if (IS_ERR(counters))
986 return PTR_ERR(counters);
988 /* choose the copy that is on our node/cpu, ...
989 * This choice is lazy (because current thread is
990 * allowed to migrate to another cpu)
992 loc_cpu_entry = private->entries[raw_smp_processor_id()];
993 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
998 /* FIXME: use iterator macros --RR */
999 /* ... then go back and fix counters and names */
1000 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1002 const struct ip6t_entry_match *m;
1003 const struct ip6t_entry_target *t;
1005 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1006 if (copy_to_user(userptr + off
1007 + offsetof(struct ip6t_entry, counters),
1009 sizeof(counters[num])) != 0) {
1014 for (i = sizeof(struct ip6t_entry);
1015 i < e->target_offset;
1016 i += m->u.match_size) {
1019 if (copy_to_user(userptr + off + i
1020 + offsetof(struct ip6t_entry_match,
1022 m->u.kernel.match->name,
1023 strlen(m->u.kernel.match->name)+1)
1030 t = ip6t_get_target(e);
1031 if (copy_to_user(userptr + off + e->target_offset
1032 + offsetof(struct ip6t_entry_target,
1034 t->u.kernel.target->name,
1035 strlen(t->u.kernel.target->name)+1) != 0) {
1046 #ifdef CONFIG_COMPAT
1047 static void compat_standard_from_user(void *dst, void *src)
1049 int v = *(compat_int_t *)src;
1052 v += xt_compat_calc_jump(AF_INET6, v);
1053 memcpy(dst, &v, sizeof(v));
1056 static int compat_standard_to_user(void __user *dst, void *src)
1058 compat_int_t cv = *(int *)src;
1061 cv -= xt_compat_calc_jump(AF_INET6, cv);
1062 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1066 compat_calc_match(struct ip6t_entry_match *m, int *size)
1068 *size += xt_compat_match_offset(m->u.kernel.match);
1072 static int compat_calc_entry(struct ip6t_entry *e,
1073 const struct xt_table_info *info,
1074 void *base, struct xt_table_info *newinfo)
1076 struct ip6t_entry_target *t;
1077 unsigned int entry_offset;
1080 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1081 entry_offset = (void *)e - base;
1082 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1083 t = ip6t_get_target(e);
1084 off += xt_compat_target_offset(t->u.kernel.target);
1085 newinfo->size -= off;
1086 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1090 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1091 if (info->hook_entry[i] &&
1092 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1093 newinfo->hook_entry[i] -= off;
1094 if (info->underflow[i] &&
1095 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1096 newinfo->underflow[i] -= off;
1101 static int compat_table_info(const struct xt_table_info *info,
1102 struct xt_table_info *newinfo)
1104 void *loc_cpu_entry;
1106 if (!newinfo || !info)
1109 /* we dont care about newinfo->entries[] */
1110 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1111 newinfo->initial_entries = 0;
1112 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1113 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1114 compat_calc_entry, info, loc_cpu_entry,
1119 static int get_info(struct net *net, void __user *user, int *len, int compat)
1121 char name[IP6T_TABLE_MAXNAMELEN];
1125 if (*len != sizeof(struct ip6t_getinfo)) {
1126 duprintf("length %u != %zu\n", *len,
1127 sizeof(struct ip6t_getinfo));
1131 if (copy_from_user(name, user, sizeof(name)) != 0)
1134 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1135 #ifdef CONFIG_COMPAT
1137 xt_compat_lock(AF_INET6);
1139 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1140 "ip6table_%s", name);
1141 if (t && !IS_ERR(t)) {
1142 struct ip6t_getinfo info;
1143 const struct xt_table_info *private = t->private;
1145 #ifdef CONFIG_COMPAT
1147 struct xt_table_info tmp;
1148 ret = compat_table_info(private, &tmp);
1149 xt_compat_flush_offsets(AF_INET6);
1153 info.valid_hooks = t->valid_hooks;
1154 memcpy(info.hook_entry, private->hook_entry,
1155 sizeof(info.hook_entry));
1156 memcpy(info.underflow, private->underflow,
1157 sizeof(info.underflow));
1158 info.num_entries = private->number;
1159 info.size = private->size;
1160 strcpy(info.name, name);
1162 if (copy_to_user(user, &info, *len) != 0)
1170 ret = t ? PTR_ERR(t) : -ENOENT;
1171 #ifdef CONFIG_COMPAT
1173 xt_compat_unlock(AF_INET6);
1179 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1182 struct ip6t_get_entries get;
1185 if (*len < sizeof(get)) {
1186 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1189 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1191 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1192 duprintf("get_entries: %u != %zu\n",
1193 *len, sizeof(get) + get.size);
1197 t = xt_find_table_lock(net, AF_INET6, get.name);
1198 if (t && !IS_ERR(t)) {
1199 struct xt_table_info *private = t->private;
1200 duprintf("t->private->number = %u\n", private->number);
1201 if (get.size == private->size)
1202 ret = copy_entries_to_user(private->size,
1203 t, uptr->entrytable);
1205 duprintf("get_entries: I've got %u not %u!\n",
1206 private->size, get.size);
1212 ret = t ? PTR_ERR(t) : -ENOENT;
1218 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1219 struct xt_table_info *newinfo, unsigned int num_counters,
1220 void __user *counters_ptr)
1224 struct xt_table_info *oldinfo;
1225 struct xt_counters *counters;
1226 const void *loc_cpu_old_entry;
1229 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1236 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1237 "ip6table_%s", name);
1238 if (!t || IS_ERR(t)) {
1239 ret = t ? PTR_ERR(t) : -ENOENT;
1240 goto free_newinfo_counters_untrans;
1244 if (valid_hooks != t->valid_hooks) {
1245 duprintf("Valid hook crap: %08X vs %08X\n",
1246 valid_hooks, t->valid_hooks);
1251 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1255 /* Update module usage count based on number of rules */
1256 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1257 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1258 if ((oldinfo->number > oldinfo->initial_entries) ||
1259 (newinfo->number <= oldinfo->initial_entries))
1261 if ((oldinfo->number > oldinfo->initial_entries) &&
1262 (newinfo->number <= oldinfo->initial_entries))
1265 /* Get the old counters. */
1266 get_counters(oldinfo, counters);
1267 /* Decrease module usage counts and free resource */
1268 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1269 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1271 xt_free_table_info(oldinfo);
1272 if (copy_to_user(counters_ptr, counters,
1273 sizeof(struct xt_counters) * num_counters) != 0)
1282 free_newinfo_counters_untrans:
1289 do_replace(struct net *net, void __user *user, unsigned int len)
1292 struct ip6t_replace tmp;
1293 struct xt_table_info *newinfo;
1294 void *loc_cpu_entry;
1296 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1299 /* overflow check */
1300 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1303 newinfo = xt_alloc_table_info(tmp.size);
1307 /* choose the copy that is on our node/cpu */
1308 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1309 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1315 ret = translate_table(tmp.name, tmp.valid_hooks,
1316 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1317 tmp.hook_entry, tmp.underflow);
1321 duprintf("ip_tables: Translated table\n");
1323 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1324 tmp.num_counters, tmp.counters);
1326 goto free_newinfo_untrans;
1329 free_newinfo_untrans:
1330 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1332 xt_free_table_info(newinfo);
1336 /* We're lazy, and add to the first CPU; overflow works its fey magic
1337 * and everything is OK. */
1339 add_counter_to_entry(struct ip6t_entry *e,
1340 const struct xt_counters addme[],
1344 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1346 (long unsigned int)e->counters.pcnt,
1347 (long unsigned int)e->counters.bcnt,
1348 (long unsigned int)addme[*i].pcnt,
1349 (long unsigned int)addme[*i].bcnt);
1352 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1359 do_add_counters(struct net *net, void __user *user, unsigned int len,
1363 struct xt_counters_info tmp;
1364 struct xt_counters *paddc;
1365 unsigned int num_counters;
1370 const struct xt_table_info *private;
1372 const void *loc_cpu_entry;
1373 #ifdef CONFIG_COMPAT
1374 struct compat_xt_counters_info compat_tmp;
1378 size = sizeof(struct compat_xt_counters_info);
1383 size = sizeof(struct xt_counters_info);
1386 if (copy_from_user(ptmp, user, size) != 0)
1389 #ifdef CONFIG_COMPAT
1391 num_counters = compat_tmp.num_counters;
1392 name = compat_tmp.name;
1396 num_counters = tmp.num_counters;
1400 if (len != size + num_counters * sizeof(struct xt_counters))
1403 paddc = vmalloc_node(len - size, numa_node_id());
1407 if (copy_from_user(paddc, user + size, len - size) != 0) {
1412 t = xt_find_table_lock(net, AF_INET6, name);
1413 if (!t || IS_ERR(t)) {
1414 ret = t ? PTR_ERR(t) : -ENOENT;
1418 write_lock_bh(&t->lock);
1419 private = t->private;
1420 if (private->number != num_counters) {
1422 goto unlock_up_free;
1426 /* Choose the copy that is on our node */
1427 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1428 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1430 add_counter_to_entry,
1434 write_unlock_bh(&t->lock);
1443 #ifdef CONFIG_COMPAT
1444 struct compat_ip6t_replace {
1445 char name[IP6T_TABLE_MAXNAMELEN];
1449 u32 hook_entry[NF_INET_NUMHOOKS];
1450 u32 underflow[NF_INET_NUMHOOKS];
1452 compat_uptr_t counters; /* struct ip6t_counters * */
1453 struct compat_ip6t_entry entries[0];
1457 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1458 unsigned int *size, struct xt_counters *counters,
1461 struct ip6t_entry_target *t;
1462 struct compat_ip6t_entry __user *ce;
1463 u_int16_t target_offset, next_offset;
1464 compat_uint_t origsize;
1469 ce = (struct compat_ip6t_entry __user *)*dstptr;
1470 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1473 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1476 *dstptr += sizeof(struct compat_ip6t_entry);
1477 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1479 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1480 target_offset = e->target_offset - (origsize - *size);
1483 t = ip6t_get_target(e);
1484 ret = xt_compat_target_to_user(t, dstptr, size);
1488 next_offset = e->next_offset - (origsize - *size);
1489 if (put_user(target_offset, &ce->target_offset))
1491 if (put_user(next_offset, &ce->next_offset))
1501 compat_find_calc_match(struct ip6t_entry_match *m,
1503 const struct ip6t_ip6 *ipv6,
1504 unsigned int hookmask,
1505 int *size, unsigned int *i)
1507 struct xt_match *match;
1509 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1510 m->u.user.revision),
1511 "ip6t_%s", m->u.user.name);
1512 if (IS_ERR(match) || !match) {
1513 duprintf("compat_check_calc_match: `%s' not found\n",
1515 return match ? PTR_ERR(match) : -ENOENT;
1517 m->u.kernel.match = match;
1518 *size += xt_compat_match_offset(match);
1525 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1527 if (i && (*i)-- == 0)
1530 module_put(m->u.kernel.match->me);
1535 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1537 struct ip6t_entry_target *t;
1539 if (i && (*i)-- == 0)
1542 /* Cleanup all matches */
1543 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1544 t = compat_ip6t_get_target(e);
1545 module_put(t->u.kernel.target->me);
1550 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1551 struct xt_table_info *newinfo,
1553 unsigned char *base,
1554 unsigned char *limit,
1555 unsigned int *hook_entries,
1556 unsigned int *underflows,
1560 struct ip6t_entry_target *t;
1561 struct xt_target *target;
1562 unsigned int entry_offset;
1566 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1567 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1568 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1569 duprintf("Bad offset %p, limit = %p\n", e, limit);
1573 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1574 sizeof(struct compat_xt_entry_target)) {
1575 duprintf("checking: element %p size %u\n",
1580 /* For purposes of check_entry casting the compat entry is fine */
1581 ret = check_entry((struct ip6t_entry *)e, name);
1585 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1586 entry_offset = (void *)e - (void *)base;
1588 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1589 &e->ipv6, e->comefrom, &off, &j);
1591 goto release_matches;
1593 t = compat_ip6t_get_target(e);
1594 target = try_then_request_module(xt_find_target(AF_INET6,
1596 t->u.user.revision),
1597 "ip6t_%s", t->u.user.name);
1598 if (IS_ERR(target) || !target) {
1599 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1601 ret = target ? PTR_ERR(target) : -ENOENT;
1602 goto release_matches;
1604 t->u.kernel.target = target;
1606 off += xt_compat_target_offset(target);
1608 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1612 /* Check hooks & underflows */
1613 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1614 if ((unsigned char *)e - base == hook_entries[h])
1615 newinfo->hook_entry[h] = hook_entries[h];
1616 if ((unsigned char *)e - base == underflows[h])
1617 newinfo->underflow[h] = underflows[h];
1620 /* Clear counters and comefrom */
1621 memset(&e->counters, 0, sizeof(e->counters));
1628 module_put(t->u.kernel.target->me);
1630 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1635 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1636 unsigned int *size, const char *name,
1637 struct xt_table_info *newinfo, unsigned char *base)
1639 struct ip6t_entry_target *t;
1640 struct xt_target *target;
1641 struct ip6t_entry *de;
1642 unsigned int origsize;
1647 de = (struct ip6t_entry *)*dstptr;
1648 memcpy(de, e, sizeof(struct ip6t_entry));
1649 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1651 *dstptr += sizeof(struct ip6t_entry);
1652 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1654 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1658 de->target_offset = e->target_offset - (origsize - *size);
1659 t = compat_ip6t_get_target(e);
1660 target = t->u.kernel.target;
1661 xt_compat_target_from_user(t, dstptr, size);
1663 de->next_offset = e->next_offset - (origsize - *size);
1664 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1665 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1666 newinfo->hook_entry[h] -= origsize - *size;
1667 if ((unsigned char *)de - base < newinfo->underflow[h])
1668 newinfo->underflow[h] -= origsize - *size;
1673 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1678 struct xt_mtchk_param mtpar;
1682 mtpar.entryinfo = &e->ipv6;
1683 mtpar.hook_mask = e->comefrom;
1684 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1686 goto cleanup_matches;
1688 ret = check_target(e, name);
1690 goto cleanup_matches;
1696 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1701 translate_compat_table(const char *name,
1702 unsigned int valid_hooks,
1703 struct xt_table_info **pinfo,
1705 unsigned int total_size,
1706 unsigned int number,
1707 unsigned int *hook_entries,
1708 unsigned int *underflows)
1711 struct xt_table_info *newinfo, *info;
1712 void *pos, *entry0, *entry1;
1719 info->number = number;
1721 /* Init all hooks to impossible value. */
1722 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1723 info->hook_entry[i] = 0xFFFFFFFF;
1724 info->underflow[i] = 0xFFFFFFFF;
1727 duprintf("translate_compat_table: size %u\n", info->size);
1729 xt_compat_lock(AF_INET6);
1730 /* Walk through entries, checking offsets. */
1731 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1732 check_compat_entry_size_and_hooks,
1733 info, &size, entry0,
1734 entry0 + total_size,
1735 hook_entries, underflows, &j, name);
1741 duprintf("translate_compat_table: %u not %u entries\n",
1746 /* Check hooks all assigned */
1747 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1748 /* Only hooks which are valid */
1749 if (!(valid_hooks & (1 << i)))
1751 if (info->hook_entry[i] == 0xFFFFFFFF) {
1752 duprintf("Invalid hook entry %u %u\n",
1753 i, hook_entries[i]);
1756 if (info->underflow[i] == 0xFFFFFFFF) {
1757 duprintf("Invalid underflow %u %u\n",
1764 newinfo = xt_alloc_table_info(size);
1768 newinfo->number = number;
1769 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1770 newinfo->hook_entry[i] = info->hook_entry[i];
1771 newinfo->underflow[i] = info->underflow[i];
1773 entry1 = newinfo->entries[raw_smp_processor_id()];
1776 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1777 compat_copy_entry_from_user,
1778 &pos, &size, name, newinfo, entry1);
1779 xt_compat_flush_offsets(AF_INET6);
1780 xt_compat_unlock(AF_INET6);
1785 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1789 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1793 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1794 compat_release_entry, &j);
1795 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1796 xt_free_table_info(newinfo);
1800 /* And one copy for every other CPU */
1801 for_each_possible_cpu(i)
1802 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1803 memcpy(newinfo->entries[i], entry1, newinfo->size);
1807 xt_free_table_info(info);
1811 xt_free_table_info(newinfo);
1813 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1816 xt_compat_flush_offsets(AF_INET6);
1817 xt_compat_unlock(AF_INET6);
1822 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1825 struct compat_ip6t_replace tmp;
1826 struct xt_table_info *newinfo;
1827 void *loc_cpu_entry;
1829 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1832 /* overflow check */
1833 if (tmp.size >= INT_MAX / num_possible_cpus())
1835 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1838 newinfo = xt_alloc_table_info(tmp.size);
1842 /* choose the copy that is on our node/cpu */
1843 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1844 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1850 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1851 &newinfo, &loc_cpu_entry, tmp.size,
1852 tmp.num_entries, tmp.hook_entry,
1857 duprintf("compat_do_replace: Translated table\n");
1859 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1860 tmp.num_counters, compat_ptr(tmp.counters));
1862 goto free_newinfo_untrans;
1865 free_newinfo_untrans:
1866 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1868 xt_free_table_info(newinfo);
1873 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1878 if (!capable(CAP_NET_ADMIN))
1882 case IP6T_SO_SET_REPLACE:
1883 ret = compat_do_replace(sock_net(sk), user, len);
1886 case IP6T_SO_SET_ADD_COUNTERS:
1887 ret = do_add_counters(sock_net(sk), user, len, 1);
1891 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1898 struct compat_ip6t_get_entries {
1899 char name[IP6T_TABLE_MAXNAMELEN];
1901 struct compat_ip6t_entry entrytable[0];
1905 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1906 void __user *userptr)
1908 struct xt_counters *counters;
1909 const struct xt_table_info *private = table->private;
1913 const void *loc_cpu_entry;
1916 counters = alloc_counters(table);
1917 if (IS_ERR(counters))
1918 return PTR_ERR(counters);
1920 /* choose the copy that is on our node/cpu, ...
1921 * This choice is lazy (because current thread is
1922 * allowed to migrate to another cpu)
1924 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1927 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1928 compat_copy_entry_to_user,
1929 &pos, &size, counters, &i);
1936 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1940 struct compat_ip6t_get_entries get;
1943 if (*len < sizeof(get)) {
1944 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1948 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1951 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1952 duprintf("compat_get_entries: %u != %zu\n",
1953 *len, sizeof(get) + get.size);
1957 xt_compat_lock(AF_INET6);
1958 t = xt_find_table_lock(net, AF_INET6, get.name);
1959 if (t && !IS_ERR(t)) {
1960 const struct xt_table_info *private = t->private;
1961 struct xt_table_info info;
1962 duprintf("t->private->number = %u\n", private->number);
1963 ret = compat_table_info(private, &info);
1964 if (!ret && get.size == info.size) {
1965 ret = compat_copy_entries_to_user(private->size,
1966 t, uptr->entrytable);
1968 duprintf("compat_get_entries: I've got %u not %u!\n",
1969 private->size, get.size);
1972 xt_compat_flush_offsets(AF_INET6);
1976 ret = t ? PTR_ERR(t) : -ENOENT;
1978 xt_compat_unlock(AF_INET6);
1982 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1985 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1989 if (!capable(CAP_NET_ADMIN))
1993 case IP6T_SO_GET_INFO:
1994 ret = get_info(sock_net(sk), user, len, 1);
1996 case IP6T_SO_GET_ENTRIES:
1997 ret = compat_get_entries(sock_net(sk), user, len);
2000 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2007 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2011 if (!capable(CAP_NET_ADMIN))
2015 case IP6T_SO_SET_REPLACE:
2016 ret = do_replace(sock_net(sk), user, len);
2019 case IP6T_SO_SET_ADD_COUNTERS:
2020 ret = do_add_counters(sock_net(sk), user, len, 0);
2024 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2032 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2036 if (!capable(CAP_NET_ADMIN))
2040 case IP6T_SO_GET_INFO:
2041 ret = get_info(sock_net(sk), user, len, 0);
2044 case IP6T_SO_GET_ENTRIES:
2045 ret = get_entries(sock_net(sk), user, len);
2048 case IP6T_SO_GET_REVISION_MATCH:
2049 case IP6T_SO_GET_REVISION_TARGET: {
2050 struct ip6t_get_revision rev;
2053 if (*len != sizeof(rev)) {
2057 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2062 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2067 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2070 "ip6t_%s", rev.name);
2075 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2082 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2083 const struct ip6t_replace *repl)
2086 struct xt_table_info *newinfo;
2087 struct xt_table_info bootstrap
2088 = { 0, 0, 0, { 0 }, { 0 }, { } };
2089 void *loc_cpu_entry;
2090 struct xt_table *new_table;
2092 newinfo = xt_alloc_table_info(repl->size);
2098 /* choose the copy on our node/cpu, but dont care about preemption */
2099 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2100 memcpy(loc_cpu_entry, repl->entries, repl->size);
2102 ret = translate_table(table->name, table->valid_hooks,
2103 newinfo, loc_cpu_entry, repl->size,
2110 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2111 if (IS_ERR(new_table)) {
2112 ret = PTR_ERR(new_table);
2118 xt_free_table_info(newinfo);
2120 return ERR_PTR(ret);
2123 void ip6t_unregister_table(struct xt_table *table)
2125 struct xt_table_info *private;
2126 void *loc_cpu_entry;
2127 struct module *table_owner = table->me;
2129 private = xt_unregister_table(table);
2131 /* Decrease module usage counts and free resources */
2132 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2133 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2134 if (private->number > private->initial_entries)
2135 module_put(table_owner);
2136 xt_free_table_info(private);
2139 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2141 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2142 u_int8_t type, u_int8_t code,
2145 return (type == test_type && code >= min_code && code <= max_code)
2150 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2152 const struct icmp6hdr *ic;
2153 struct icmp6hdr _icmph;
2154 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2156 /* Must not be a fragment. */
2157 if (par->fragoff != 0)
2160 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2162 /* We've been asked to examine this packet, and we
2163 * can't. Hence, no choice but to drop.
2165 duprintf("Dropping evil ICMP tinygram.\n");
2166 *par->hotdrop = true;
2170 return icmp6_type_code_match(icmpinfo->type,
2173 ic->icmp6_type, ic->icmp6_code,
2174 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2177 /* Called when user tries to insert an entry of this type. */
2178 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2180 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2182 /* Must specify no unknown invflags */
2183 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2186 /* The built-in targets: standard (NULL) and error. */
2187 static struct xt_target ip6t_standard_target __read_mostly = {
2188 .name = IP6T_STANDARD_TARGET,
2189 .targetsize = sizeof(int),
2191 #ifdef CONFIG_COMPAT
2192 .compatsize = sizeof(compat_int_t),
2193 .compat_from_user = compat_standard_from_user,
2194 .compat_to_user = compat_standard_to_user,
2198 static struct xt_target ip6t_error_target __read_mostly = {
2199 .name = IP6T_ERROR_TARGET,
2200 .target = ip6t_error,
2201 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2205 static struct nf_sockopt_ops ip6t_sockopts = {
2207 .set_optmin = IP6T_BASE_CTL,
2208 .set_optmax = IP6T_SO_SET_MAX+1,
2209 .set = do_ip6t_set_ctl,
2210 #ifdef CONFIG_COMPAT
2211 .compat_set = compat_do_ip6t_set_ctl,
2213 .get_optmin = IP6T_BASE_CTL,
2214 .get_optmax = IP6T_SO_GET_MAX+1,
2215 .get = do_ip6t_get_ctl,
2216 #ifdef CONFIG_COMPAT
2217 .compat_get = compat_do_ip6t_get_ctl,
2219 .owner = THIS_MODULE,
2222 static struct xt_match icmp6_matchstruct __read_mostly = {
2224 .match = icmp6_match,
2225 .matchsize = sizeof(struct ip6t_icmp),
2226 .checkentry = icmp6_checkentry,
2227 .proto = IPPROTO_ICMPV6,
2231 static int __net_init ip6_tables_net_init(struct net *net)
2233 return xt_proto_init(net, AF_INET6);
2236 static void __net_exit ip6_tables_net_exit(struct net *net)
2238 xt_proto_fini(net, AF_INET6);
2241 static struct pernet_operations ip6_tables_net_ops = {
2242 .init = ip6_tables_net_init,
2243 .exit = ip6_tables_net_exit,
2246 static int __init ip6_tables_init(void)
2250 ret = register_pernet_subsys(&ip6_tables_net_ops);
2254 /* Noone else will be downing sem now, so we won't sleep */
2255 ret = xt_register_target(&ip6t_standard_target);
2258 ret = xt_register_target(&ip6t_error_target);
2261 ret = xt_register_match(&icmp6_matchstruct);
2265 /* Register setsockopt */
2266 ret = nf_register_sockopt(&ip6t_sockopts);
2270 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2274 xt_unregister_match(&icmp6_matchstruct);
2276 xt_unregister_target(&ip6t_error_target);
2278 xt_unregister_target(&ip6t_standard_target);
2280 unregister_pernet_subsys(&ip6_tables_net_ops);
2285 static void __exit ip6_tables_fini(void)
2287 nf_unregister_sockopt(&ip6t_sockopts);
2289 xt_unregister_match(&icmp6_matchstruct);
2290 xt_unregister_target(&ip6t_error_target);
2291 xt_unregister_target(&ip6t_standard_target);
2293 unregister_pernet_subsys(&ip6_tables_net_ops);
2297 * find the offset to specified header or the protocol number of last header
2298 * if target < 0. "last header" is transport protocol header, ESP, or
2301 * If target header is found, its offset is set in *offset and return protocol
2302 * number. Otherwise, return -1.
2304 * If the first fragment doesn't contain the final protocol header or
2305 * NEXTHDR_NONE it is considered invalid.
2307 * Note that non-1st fragment is special case that "the protocol number
2308 * of last header" is "next header" field in Fragment header. In this case,
2309 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2313 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2314 int target, unsigned short *fragoff)
2316 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2317 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2318 unsigned int len = skb->len - start;
2323 while (nexthdr != target) {
2324 struct ipv6_opt_hdr _hdr, *hp;
2325 unsigned int hdrlen;
2327 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2333 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2336 if (nexthdr == NEXTHDR_FRAGMENT) {
2337 unsigned short _frag_off;
2339 fp = skb_header_pointer(skb,
2340 start+offsetof(struct frag_hdr,
2347 _frag_off = ntohs(*fp) & ~0x7;
2350 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2351 hp->nexthdr == NEXTHDR_NONE)) {
2353 *fragoff = _frag_off;
2359 } else if (nexthdr == NEXTHDR_AUTH)
2360 hdrlen = (hp->hdrlen + 2) << 2;
2362 hdrlen = ipv6_optlen(hp);
2364 nexthdr = hp->nexthdr;
2373 EXPORT_SYMBOL(ip6t_register_table);
2374 EXPORT_SYMBOL(ip6t_unregister_table);
2375 EXPORT_SYMBOL(ip6t_do_table);
2376 EXPORT_SYMBOL(ip6t_ext_hdr);
2377 EXPORT_SYMBOL(ipv6_find_hdr);
2379 module_init(ip6_tables_init);
2380 module_exit(ip6_tables_fini);