2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
109 &ip6info->src), IP6T_INV_SRCIP)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
111 &ip6info->dst), IP6T_INV_DSTIP)) {
112 dprintf("Source or dest mismatch.\n");
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev, ip6info->iniface,
133 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev, ip6info->outiface,
146 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
150 /* ... might want to do something with class and flowlabel here ... */
152 /* look for the desired protocol header */
153 if((ip6info->flags & IP6T_F_PROTO)) {
155 unsigned short _frag_off;
157 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
163 *fragoff = _frag_off;
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
167 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
170 if (ip6info->proto == protohdr) {
171 if(ip6info->invflags & IP6T_INV_PROTO) {
177 /* We need match for the '-p all', too! */
178 if ((ip6info->proto != 0) &&
179 !(ip6info->invflags & IP6T_INV_PROTO))
185 /* should be ip6 safe */
187 ip6_checkentry(const struct ip6t_ip6 *ipv6)
189 if (ipv6->flags & ~IP6T_F_MASK) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6->flags & ~IP6T_F_MASK);
194 if (ipv6->invflags & ~IP6T_INV_MASK) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6->invflags & ~IP6T_INV_MASK);
203 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
206 printk("ip6_tables: error: `%s'\n",
207 (const char *)par->targinfo);
212 /* Performance critical - called for every packet */
214 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
215 struct xt_match_param *par)
217 par->match = m->u.kernel.match;
218 par->matchinfo = m->data;
220 /* Stop iteration if it doesn't match */
221 if (!m->u.kernel.match->match(skb, par))
227 static inline struct ip6t_entry *
228 get_entry(void *base, unsigned int offset)
230 return (struct ip6t_entry *)(base + offset);
233 /* All zeroes == unconditional rule. */
234 /* Mildly perf critical (only if packet tracing is on) */
236 unconditional(const struct ip6t_ip6 *ipv6)
240 for (i = 0; i < sizeof(*ipv6); i++)
241 if (((char *)ipv6)[i])
244 return (i == sizeof(*ipv6));
247 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
248 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
249 /* This cries for unification! */
250 static const char *const hooknames[] = {
251 [NF_INET_PRE_ROUTING] = "PREROUTING",
252 [NF_INET_LOCAL_IN] = "INPUT",
253 [NF_INET_FORWARD] = "FORWARD",
254 [NF_INET_LOCAL_OUT] = "OUTPUT",
255 [NF_INET_POST_ROUTING] = "POSTROUTING",
258 enum nf_ip_trace_comments {
259 NF_IP6_TRACE_COMMENT_RULE,
260 NF_IP6_TRACE_COMMENT_RETURN,
261 NF_IP6_TRACE_COMMENT_POLICY,
264 static const char *const comments[] = {
265 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
266 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
267 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
270 static struct nf_loginfo trace_loginfo = {
271 .type = NF_LOG_TYPE_LOG,
275 .logflags = NF_LOG_MASK,
280 /* Mildly perf critical (only if packet tracing is on) */
282 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
283 char *hookname, char **chainname,
284 char **comment, unsigned int *rulenum)
286 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
288 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
289 /* Head of user chain: ERROR target with chainname */
290 *chainname = t->target.data;
295 if (s->target_offset == sizeof(struct ip6t_entry)
296 && strcmp(t->target.u.kernel.target->name,
297 IP6T_STANDARD_TARGET) == 0
299 && unconditional(&s->ipv6)) {
300 /* Tail of chains: STANDARD target (return/policy) */
301 *comment = *chainname == hookname
302 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
303 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
312 static void trace_packet(struct sk_buff *skb,
314 const struct net_device *in,
315 const struct net_device *out,
316 const char *tablename,
317 struct xt_table_info *private,
318 struct ip6t_entry *e)
321 const struct ip6t_entry *root;
322 char *hookname, *chainname, *comment;
323 unsigned int rulenum = 0;
325 table_base = (void *)private->entries[smp_processor_id()];
326 root = get_entry(table_base, private->hook_entry[hook]);
328 hookname = chainname = (char *)hooknames[hook];
329 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
331 IP6T_ENTRY_ITERATE(root,
332 private->size - private->hook_entry[hook],
333 get_chainname_rulenum,
334 e, hookname, &chainname, &comment, &rulenum);
336 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
337 "TRACE: %s:%s:%s:%u ",
338 tablename, chainname, comment, rulenum);
342 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
344 ip6t_do_table(struct sk_buff *skb,
346 const struct net_device *in,
347 const struct net_device *out,
348 struct xt_table *table)
350 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
351 bool hotdrop = false;
352 /* Initializing verdict to NF_DROP keeps gcc happy. */
353 unsigned int verdict = NF_DROP;
354 const char *indev, *outdev;
356 struct ip6t_entry *e, *back;
357 struct xt_table_info *private;
358 struct xt_match_param mtpar;
359 struct xt_target_param tgpar;
362 indev = in ? in->name : nulldevname;
363 outdev = out ? out->name : nulldevname;
364 /* We handle fragments by dealing with the first fragment as
365 * if it was a normal packet. All other fragments are treated
366 * normally, except that they will NEVER match rules that ask
367 * things we don't know, ie. tcp syn flag or ports). If the
368 * rule is also a fragment-specific rule, non-fragments won't
370 mtpar.hotdrop = &hotdrop;
371 mtpar.in = tgpar.in = in;
372 mtpar.out = tgpar.out = out;
373 tgpar.hooknum = hook;
375 read_lock_bh(&table->lock);
376 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
377 private = table->private;
378 table_base = (void *)private->entries[smp_processor_id()];
379 e = get_entry(table_base, private->hook_entry[hook]);
381 /* For return from builtin chain */
382 back = get_entry(table_base, private->underflow[hook]);
387 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
388 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
389 struct ip6t_entry_target *t;
391 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
394 ADD_COUNTER(e->counters,
395 ntohs(ipv6_hdr(skb)->payload_len) +
396 sizeof(struct ipv6hdr), 1);
398 t = ip6t_get_target(e);
399 IP_NF_ASSERT(t->u.kernel.target);
401 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
402 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
403 /* The packet is traced: log it */
404 if (unlikely(skb->nf_trace))
405 trace_packet(skb, hook, in, out,
406 table->name, private, e);
408 /* Standard target? */
409 if (!t->u.kernel.target->target) {
412 v = ((struct ip6t_standard_target *)t)->verdict;
414 /* Pop from stack? */
415 if (v != IP6T_RETURN) {
416 verdict = (unsigned)(-v) - 1;
420 back = get_entry(table_base,
424 if (table_base + v != (void *)e + e->next_offset
425 && !(e->ipv6.flags & IP6T_F_GOTO)) {
426 /* Save old back ptr in next entry */
427 struct ip6t_entry *next
428 = (void *)e + e->next_offset;
430 = (void *)back - table_base;
431 /* set back pointer to next entry */
435 e = get_entry(table_base, v);
437 /* Targets which reenter must return
439 tgpar.target = t->u.kernel.target;
440 tgpar.targinfo = t->data;
442 #ifdef CONFIG_NETFILTER_DEBUG
443 ((struct ip6t_entry *)table_base)->comefrom
446 verdict = t->u.kernel.target->target(skb,
449 #ifdef CONFIG_NETFILTER_DEBUG
450 if (((struct ip6t_entry *)table_base)->comefrom
452 && verdict == IP6T_CONTINUE) {
453 printk("Target %s reentered!\n",
454 t->u.kernel.target->name);
457 ((struct ip6t_entry *)table_base)->comefrom
460 if (verdict == IP6T_CONTINUE)
461 e = (void *)e + e->next_offset;
469 e = (void *)e + e->next_offset;
473 #ifdef CONFIG_NETFILTER_DEBUG
474 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
476 read_unlock_bh(&table->lock);
478 #ifdef DEBUG_ALLOW_ALL
487 /* Figures out from what hook each rule can be called: returns 0 if
488 there are loops. Puts hook bitmask in comefrom. */
490 mark_source_chains(struct xt_table_info *newinfo,
491 unsigned int valid_hooks, void *entry0)
495 /* No recursion; use packet counter to save back ptrs (reset
496 to 0 as we leave), and comefrom to save source hook bitmask */
497 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
498 unsigned int pos = newinfo->hook_entry[hook];
499 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
501 if (!(valid_hooks & (1 << hook)))
504 /* Set initial back pointer. */
505 e->counters.pcnt = pos;
508 struct ip6t_standard_target *t
509 = (void *)ip6t_get_target(e);
510 int visited = e->comefrom & (1 << hook);
512 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
513 printk("iptables: loop hook %u pos %u %08X.\n",
514 hook, pos, e->comefrom);
517 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
519 /* Unconditional return/END. */
520 if ((e->target_offset == sizeof(struct ip6t_entry)
521 && (strcmp(t->target.u.user.name,
522 IP6T_STANDARD_TARGET) == 0)
524 && unconditional(&e->ipv6)) || visited) {
525 unsigned int oldpos, size;
527 if (t->verdict < -NF_MAX_VERDICT - 1) {
528 duprintf("mark_source_chains: bad "
529 "negative verdict (%i)\n",
534 /* Return: backtrack through the last
537 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
538 #ifdef DEBUG_IP_FIREWALL_USER
540 & (1 << NF_INET_NUMHOOKS)) {
541 duprintf("Back unset "
548 pos = e->counters.pcnt;
549 e->counters.pcnt = 0;
551 /* We're at the start. */
555 e = (struct ip6t_entry *)
557 } while (oldpos == pos + e->next_offset);
560 size = e->next_offset;
561 e = (struct ip6t_entry *)
562 (entry0 + pos + size);
563 e->counters.pcnt = pos;
566 int newpos = t->verdict;
568 if (strcmp(t->target.u.user.name,
569 IP6T_STANDARD_TARGET) == 0
571 if (newpos > newinfo->size -
572 sizeof(struct ip6t_entry)) {
573 duprintf("mark_source_chains: "
574 "bad verdict (%i)\n",
578 /* This a jump; chase it. */
579 duprintf("Jump rule %u -> %u\n",
582 /* ... this is a fallthru */
583 newpos = pos + e->next_offset;
585 e = (struct ip6t_entry *)
587 e->counters.pcnt = pos;
592 duprintf("Finished chain %u\n", hook);
598 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
600 struct xt_mtdtor_param par;
602 if (i && (*i)-- == 0)
605 par.match = m->u.kernel.match;
606 par.matchinfo = m->data;
607 if (par.match->destroy != NULL)
608 par.match->destroy(&par);
609 module_put(par.match->me);
614 check_entry(struct ip6t_entry *e, const char *name)
616 struct ip6t_entry_target *t;
618 if (!ip6_checkentry(&e->ipv6)) {
619 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
623 if (e->target_offset + sizeof(struct ip6t_entry_target) >
627 t = ip6t_get_target(e);
628 if (e->target_offset + t->u.target_size > e->next_offset)
634 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
637 const struct ip6t_ip6 *ipv6 = par->entryinfo;
640 par->match = m->u.kernel.match;
641 par->matchinfo = m->data;
643 ret = xt_check_match(par, NFPROTO_IPV6, m->u.match_size - sizeof(*m),
644 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
646 duprintf("ip_tables: check failed for `%s'.\n",
655 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
658 struct xt_match *match;
661 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
663 "ip6t_%s", m->u.user.name);
664 if (IS_ERR(match) || !match) {
665 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
666 return match ? PTR_ERR(match) : -ENOENT;
668 m->u.kernel.match = match;
670 ret = check_match(m, par, i);
676 module_put(m->u.kernel.match->me);
680 static int check_target(struct ip6t_entry *e, const char *name)
682 struct ip6t_entry_target *t;
683 struct xt_target *target;
686 t = ip6t_get_target(e);
687 target = t->u.kernel.target;
688 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
689 name, e->comefrom, e->ipv6.proto,
690 e->ipv6.invflags & IP6T_INV_PROTO, e, t->data);
692 duprintf("ip_tables: check failed for `%s'.\n",
693 t->u.kernel.target->name);
700 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
703 struct ip6t_entry_target *t;
704 struct xt_target *target;
707 struct xt_mtchk_param mtpar;
709 ret = check_entry(e, name);
715 mtpar.entryinfo = &e->ipv6;
716 mtpar.hook_mask = e->comefrom;
717 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
719 goto cleanup_matches;
721 t = ip6t_get_target(e);
722 target = try_then_request_module(xt_find_target(AF_INET6,
725 "ip6t_%s", t->u.user.name);
726 if (IS_ERR(target) || !target) {
727 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
728 ret = target ? PTR_ERR(target) : -ENOENT;
729 goto cleanup_matches;
731 t->u.kernel.target = target;
733 ret = check_target(e, name);
740 module_put(t->u.kernel.target->me);
742 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
747 check_entry_size_and_hooks(struct ip6t_entry *e,
748 struct xt_table_info *newinfo,
750 unsigned char *limit,
751 const unsigned int *hook_entries,
752 const unsigned int *underflows,
757 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
758 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
759 duprintf("Bad offset %p\n", e);
764 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
765 duprintf("checking: element %p size %u\n",
770 /* Check hooks & underflows */
771 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
772 if ((unsigned char *)e - base == hook_entries[h])
773 newinfo->hook_entry[h] = hook_entries[h];
774 if ((unsigned char *)e - base == underflows[h])
775 newinfo->underflow[h] = underflows[h];
778 /* FIXME: underflows must be unconditional, standard verdicts
779 < 0 (not IP6T_RETURN). --RR */
781 /* Clear counters and comefrom */
782 e->counters = ((struct xt_counters) { 0, 0 });
790 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
792 struct ip6t_entry_target *t;
794 if (i && (*i)-- == 0)
797 /* Cleanup all matches */
798 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
799 t = ip6t_get_target(e);
800 if (t->u.kernel.target->destroy)
801 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
802 module_put(t->u.kernel.target->me);
806 /* Checks and translates the user-supplied table segment (held in
809 translate_table(const char *name,
810 unsigned int valid_hooks,
811 struct xt_table_info *newinfo,
815 const unsigned int *hook_entries,
816 const unsigned int *underflows)
821 newinfo->size = size;
822 newinfo->number = number;
824 /* Init all hooks to impossible value. */
825 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
826 newinfo->hook_entry[i] = 0xFFFFFFFF;
827 newinfo->underflow[i] = 0xFFFFFFFF;
830 duprintf("translate_table: size %u\n", newinfo->size);
832 /* Walk through entries, checking offsets. */
833 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
834 check_entry_size_and_hooks,
838 hook_entries, underflows, &i);
843 duprintf("translate_table: %u not %u entries\n",
848 /* Check hooks all assigned */
849 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
850 /* Only hooks which are valid */
851 if (!(valid_hooks & (1 << i)))
853 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
854 duprintf("Invalid hook entry %u %u\n",
858 if (newinfo->underflow[i] == 0xFFFFFFFF) {
859 duprintf("Invalid underflow %u %u\n",
865 if (!mark_source_chains(newinfo, valid_hooks, entry0))
868 /* Finally, each sanity check must pass */
870 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
871 find_check_entry, name, size, &i);
874 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
879 /* And one copy for every other CPU */
880 for_each_possible_cpu(i) {
881 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
882 memcpy(newinfo->entries[i], entry0, newinfo->size);
890 add_entry_to_counter(const struct ip6t_entry *e,
891 struct xt_counters total[],
894 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
901 set_entry_to_counter(const struct ip6t_entry *e,
902 struct ip6t_counters total[],
905 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
912 get_counters(const struct xt_table_info *t,
913 struct xt_counters counters[])
919 /* Instead of clearing (by a previous call to memset())
920 * the counters and using adds, we set the counters
921 * with data used by 'current' CPU
922 * We dont care about preemption here.
924 curcpu = raw_smp_processor_id();
927 IP6T_ENTRY_ITERATE(t->entries[curcpu],
929 set_entry_to_counter,
933 for_each_possible_cpu(cpu) {
937 IP6T_ENTRY_ITERATE(t->entries[cpu],
939 add_entry_to_counter,
945 static struct xt_counters *alloc_counters(struct xt_table *table)
947 unsigned int countersize;
948 struct xt_counters *counters;
949 const struct xt_table_info *private = table->private;
951 /* We need atomic snapshot of counters: rest doesn't change
952 (other than comefrom, which userspace doesn't care
954 countersize = sizeof(struct xt_counters) * private->number;
955 counters = vmalloc_node(countersize, numa_node_id());
957 if (counters == NULL)
958 return ERR_PTR(-ENOMEM);
960 /* First, sum counters... */
961 write_lock_bh(&table->lock);
962 get_counters(private, counters);
963 write_unlock_bh(&table->lock);
969 copy_entries_to_user(unsigned int total_size,
970 struct xt_table *table,
971 void __user *userptr)
973 unsigned int off, num;
974 struct ip6t_entry *e;
975 struct xt_counters *counters;
976 const struct xt_table_info *private = table->private;
978 const void *loc_cpu_entry;
980 counters = alloc_counters(table);
981 if (IS_ERR(counters))
982 return PTR_ERR(counters);
984 /* choose the copy that is on our node/cpu, ...
985 * This choice is lazy (because current thread is
986 * allowed to migrate to another cpu)
988 loc_cpu_entry = private->entries[raw_smp_processor_id()];
989 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
994 /* FIXME: use iterator macros --RR */
995 /* ... then go back and fix counters and names */
996 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
998 const struct ip6t_entry_match *m;
999 const struct ip6t_entry_target *t;
1001 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1002 if (copy_to_user(userptr + off
1003 + offsetof(struct ip6t_entry, counters),
1005 sizeof(counters[num])) != 0) {
1010 for (i = sizeof(struct ip6t_entry);
1011 i < e->target_offset;
1012 i += m->u.match_size) {
1015 if (copy_to_user(userptr + off + i
1016 + offsetof(struct ip6t_entry_match,
1018 m->u.kernel.match->name,
1019 strlen(m->u.kernel.match->name)+1)
1026 t = ip6t_get_target(e);
1027 if (copy_to_user(userptr + off + e->target_offset
1028 + offsetof(struct ip6t_entry_target,
1030 t->u.kernel.target->name,
1031 strlen(t->u.kernel.target->name)+1) != 0) {
1042 #ifdef CONFIG_COMPAT
1043 static void compat_standard_from_user(void *dst, void *src)
1045 int v = *(compat_int_t *)src;
1048 v += xt_compat_calc_jump(AF_INET6, v);
1049 memcpy(dst, &v, sizeof(v));
1052 static int compat_standard_to_user(void __user *dst, void *src)
1054 compat_int_t cv = *(int *)src;
1057 cv -= xt_compat_calc_jump(AF_INET6, cv);
1058 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1062 compat_calc_match(struct ip6t_entry_match *m, int *size)
1064 *size += xt_compat_match_offset(m->u.kernel.match);
1068 static int compat_calc_entry(struct ip6t_entry *e,
1069 const struct xt_table_info *info,
1070 void *base, struct xt_table_info *newinfo)
1072 struct ip6t_entry_target *t;
1073 unsigned int entry_offset;
1076 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1077 entry_offset = (void *)e - base;
1078 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1079 t = ip6t_get_target(e);
1080 off += xt_compat_target_offset(t->u.kernel.target);
1081 newinfo->size -= off;
1082 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1086 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1087 if (info->hook_entry[i] &&
1088 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1089 newinfo->hook_entry[i] -= off;
1090 if (info->underflow[i] &&
1091 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1092 newinfo->underflow[i] -= off;
1097 static int compat_table_info(const struct xt_table_info *info,
1098 struct xt_table_info *newinfo)
1100 void *loc_cpu_entry;
1102 if (!newinfo || !info)
1105 /* we dont care about newinfo->entries[] */
1106 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1107 newinfo->initial_entries = 0;
1108 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1109 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1110 compat_calc_entry, info, loc_cpu_entry,
1115 static int get_info(struct net *net, void __user *user, int *len, int compat)
1117 char name[IP6T_TABLE_MAXNAMELEN];
1121 if (*len != sizeof(struct ip6t_getinfo)) {
1122 duprintf("length %u != %zu\n", *len,
1123 sizeof(struct ip6t_getinfo));
1127 if (copy_from_user(name, user, sizeof(name)) != 0)
1130 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1131 #ifdef CONFIG_COMPAT
1133 xt_compat_lock(AF_INET6);
1135 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1136 "ip6table_%s", name);
1137 if (t && !IS_ERR(t)) {
1138 struct ip6t_getinfo info;
1139 const struct xt_table_info *private = t->private;
1141 #ifdef CONFIG_COMPAT
1143 struct xt_table_info tmp;
1144 ret = compat_table_info(private, &tmp);
1145 xt_compat_flush_offsets(AF_INET6);
1149 info.valid_hooks = t->valid_hooks;
1150 memcpy(info.hook_entry, private->hook_entry,
1151 sizeof(info.hook_entry));
1152 memcpy(info.underflow, private->underflow,
1153 sizeof(info.underflow));
1154 info.num_entries = private->number;
1155 info.size = private->size;
1156 strcpy(info.name, name);
1158 if (copy_to_user(user, &info, *len) != 0)
1166 ret = t ? PTR_ERR(t) : -ENOENT;
1167 #ifdef CONFIG_COMPAT
1169 xt_compat_unlock(AF_INET6);
1175 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1178 struct ip6t_get_entries get;
1181 if (*len < sizeof(get)) {
1182 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1185 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1187 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1188 duprintf("get_entries: %u != %zu\n",
1189 *len, sizeof(get) + get.size);
1193 t = xt_find_table_lock(net, AF_INET6, get.name);
1194 if (t && !IS_ERR(t)) {
1195 struct xt_table_info *private = t->private;
1196 duprintf("t->private->number = %u\n", private->number);
1197 if (get.size == private->size)
1198 ret = copy_entries_to_user(private->size,
1199 t, uptr->entrytable);
1201 duprintf("get_entries: I've got %u not %u!\n",
1202 private->size, get.size);
1208 ret = t ? PTR_ERR(t) : -ENOENT;
1214 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1215 struct xt_table_info *newinfo, unsigned int num_counters,
1216 void __user *counters_ptr)
1220 struct xt_table_info *oldinfo;
1221 struct xt_counters *counters;
1222 const void *loc_cpu_old_entry;
1225 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1232 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1233 "ip6table_%s", name);
1234 if (!t || IS_ERR(t)) {
1235 ret = t ? PTR_ERR(t) : -ENOENT;
1236 goto free_newinfo_counters_untrans;
1240 if (valid_hooks != t->valid_hooks) {
1241 duprintf("Valid hook crap: %08X vs %08X\n",
1242 valid_hooks, t->valid_hooks);
1247 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1251 /* Update module usage count based on number of rules */
1252 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1253 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1254 if ((oldinfo->number > oldinfo->initial_entries) ||
1255 (newinfo->number <= oldinfo->initial_entries))
1257 if ((oldinfo->number > oldinfo->initial_entries) &&
1258 (newinfo->number <= oldinfo->initial_entries))
1261 /* Get the old counters. */
1262 get_counters(oldinfo, counters);
1263 /* Decrease module usage counts and free resource */
1264 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1265 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1267 xt_free_table_info(oldinfo);
1268 if (copy_to_user(counters_ptr, counters,
1269 sizeof(struct xt_counters) * num_counters) != 0)
1278 free_newinfo_counters_untrans:
1285 do_replace(struct net *net, void __user *user, unsigned int len)
1288 struct ip6t_replace tmp;
1289 struct xt_table_info *newinfo;
1290 void *loc_cpu_entry;
1292 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1295 /* overflow check */
1296 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1299 newinfo = xt_alloc_table_info(tmp.size);
1303 /* choose the copy that is on our node/cpu */
1304 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1305 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1311 ret = translate_table(tmp.name, tmp.valid_hooks,
1312 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1313 tmp.hook_entry, tmp.underflow);
1317 duprintf("ip_tables: Translated table\n");
1319 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1320 tmp.num_counters, tmp.counters);
1322 goto free_newinfo_untrans;
1325 free_newinfo_untrans:
1326 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1328 xt_free_table_info(newinfo);
1332 /* We're lazy, and add to the first CPU; overflow works its fey magic
1333 * and everything is OK. */
1335 add_counter_to_entry(struct ip6t_entry *e,
1336 const struct xt_counters addme[],
1340 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1342 (long unsigned int)e->counters.pcnt,
1343 (long unsigned int)e->counters.bcnt,
1344 (long unsigned int)addme[*i].pcnt,
1345 (long unsigned int)addme[*i].bcnt);
1348 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1355 do_add_counters(struct net *net, void __user *user, unsigned int len,
1359 struct xt_counters_info tmp;
1360 struct xt_counters *paddc;
1361 unsigned int num_counters;
1366 const struct xt_table_info *private;
1368 const void *loc_cpu_entry;
1369 #ifdef CONFIG_COMPAT
1370 struct compat_xt_counters_info compat_tmp;
1374 size = sizeof(struct compat_xt_counters_info);
1379 size = sizeof(struct xt_counters_info);
1382 if (copy_from_user(ptmp, user, size) != 0)
1385 #ifdef CONFIG_COMPAT
1387 num_counters = compat_tmp.num_counters;
1388 name = compat_tmp.name;
1392 num_counters = tmp.num_counters;
1396 if (len != size + num_counters * sizeof(struct xt_counters))
1399 paddc = vmalloc_node(len - size, numa_node_id());
1403 if (copy_from_user(paddc, user + size, len - size) != 0) {
1408 t = xt_find_table_lock(net, AF_INET6, name);
1409 if (!t || IS_ERR(t)) {
1410 ret = t ? PTR_ERR(t) : -ENOENT;
1414 write_lock_bh(&t->lock);
1415 private = t->private;
1416 if (private->number != num_counters) {
1418 goto unlock_up_free;
1422 /* Choose the copy that is on our node */
1423 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1424 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1426 add_counter_to_entry,
1430 write_unlock_bh(&t->lock);
1439 #ifdef CONFIG_COMPAT
1440 struct compat_ip6t_replace {
1441 char name[IP6T_TABLE_MAXNAMELEN];
1445 u32 hook_entry[NF_INET_NUMHOOKS];
1446 u32 underflow[NF_INET_NUMHOOKS];
1448 compat_uptr_t counters; /* struct ip6t_counters * */
1449 struct compat_ip6t_entry entries[0];
1453 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1454 unsigned int *size, struct xt_counters *counters,
1457 struct ip6t_entry_target *t;
1458 struct compat_ip6t_entry __user *ce;
1459 u_int16_t target_offset, next_offset;
1460 compat_uint_t origsize;
1465 ce = (struct compat_ip6t_entry __user *)*dstptr;
1466 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1469 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1472 *dstptr += sizeof(struct compat_ip6t_entry);
1473 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1475 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1476 target_offset = e->target_offset - (origsize - *size);
1479 t = ip6t_get_target(e);
1480 ret = xt_compat_target_to_user(t, dstptr, size);
1484 next_offset = e->next_offset - (origsize - *size);
1485 if (put_user(target_offset, &ce->target_offset))
1487 if (put_user(next_offset, &ce->next_offset))
1497 compat_find_calc_match(struct ip6t_entry_match *m,
1499 const struct ip6t_ip6 *ipv6,
1500 unsigned int hookmask,
1501 int *size, unsigned int *i)
1503 struct xt_match *match;
1505 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1506 m->u.user.revision),
1507 "ip6t_%s", m->u.user.name);
1508 if (IS_ERR(match) || !match) {
1509 duprintf("compat_check_calc_match: `%s' not found\n",
1511 return match ? PTR_ERR(match) : -ENOENT;
1513 m->u.kernel.match = match;
1514 *size += xt_compat_match_offset(match);
1521 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1523 if (i && (*i)-- == 0)
1526 module_put(m->u.kernel.match->me);
1531 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1533 struct ip6t_entry_target *t;
1535 if (i && (*i)-- == 0)
1538 /* Cleanup all matches */
1539 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1540 t = compat_ip6t_get_target(e);
1541 module_put(t->u.kernel.target->me);
1546 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1547 struct xt_table_info *newinfo,
1549 unsigned char *base,
1550 unsigned char *limit,
1551 unsigned int *hook_entries,
1552 unsigned int *underflows,
1556 struct ip6t_entry_target *t;
1557 struct xt_target *target;
1558 unsigned int entry_offset;
1562 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1563 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1564 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1565 duprintf("Bad offset %p, limit = %p\n", e, limit);
1569 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1570 sizeof(struct compat_xt_entry_target)) {
1571 duprintf("checking: element %p size %u\n",
1576 /* For purposes of check_entry casting the compat entry is fine */
1577 ret = check_entry((struct ip6t_entry *)e, name);
1581 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1582 entry_offset = (void *)e - (void *)base;
1584 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1585 &e->ipv6, e->comefrom, &off, &j);
1587 goto release_matches;
1589 t = compat_ip6t_get_target(e);
1590 target = try_then_request_module(xt_find_target(AF_INET6,
1592 t->u.user.revision),
1593 "ip6t_%s", t->u.user.name);
1594 if (IS_ERR(target) || !target) {
1595 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1597 ret = target ? PTR_ERR(target) : -ENOENT;
1598 goto release_matches;
1600 t->u.kernel.target = target;
1602 off += xt_compat_target_offset(target);
1604 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1608 /* Check hooks & underflows */
1609 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1610 if ((unsigned char *)e - base == hook_entries[h])
1611 newinfo->hook_entry[h] = hook_entries[h];
1612 if ((unsigned char *)e - base == underflows[h])
1613 newinfo->underflow[h] = underflows[h];
1616 /* Clear counters and comefrom */
1617 memset(&e->counters, 0, sizeof(e->counters));
1624 module_put(t->u.kernel.target->me);
1626 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1631 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1632 unsigned int *size, const char *name,
1633 struct xt_table_info *newinfo, unsigned char *base)
1635 struct ip6t_entry_target *t;
1636 struct xt_target *target;
1637 struct ip6t_entry *de;
1638 unsigned int origsize;
1643 de = (struct ip6t_entry *)*dstptr;
1644 memcpy(de, e, sizeof(struct ip6t_entry));
1645 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1647 *dstptr += sizeof(struct ip6t_entry);
1648 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1650 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1654 de->target_offset = e->target_offset - (origsize - *size);
1655 t = compat_ip6t_get_target(e);
1656 target = t->u.kernel.target;
1657 xt_compat_target_from_user(t, dstptr, size);
1659 de->next_offset = e->next_offset - (origsize - *size);
1660 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1661 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1662 newinfo->hook_entry[h] -= origsize - *size;
1663 if ((unsigned char *)de - base < newinfo->underflow[h])
1664 newinfo->underflow[h] -= origsize - *size;
1669 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1674 struct xt_mtchk_param mtpar;
1678 mtpar.entryinfo = &e->ipv6;
1679 mtpar.hook_mask = e->comefrom;
1680 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1682 goto cleanup_matches;
1684 ret = check_target(e, name);
1686 goto cleanup_matches;
1692 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1697 translate_compat_table(const char *name,
1698 unsigned int valid_hooks,
1699 struct xt_table_info **pinfo,
1701 unsigned int total_size,
1702 unsigned int number,
1703 unsigned int *hook_entries,
1704 unsigned int *underflows)
1707 struct xt_table_info *newinfo, *info;
1708 void *pos, *entry0, *entry1;
1715 info->number = number;
1717 /* Init all hooks to impossible value. */
1718 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1719 info->hook_entry[i] = 0xFFFFFFFF;
1720 info->underflow[i] = 0xFFFFFFFF;
1723 duprintf("translate_compat_table: size %u\n", info->size);
1725 xt_compat_lock(AF_INET6);
1726 /* Walk through entries, checking offsets. */
1727 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1728 check_compat_entry_size_and_hooks,
1729 info, &size, entry0,
1730 entry0 + total_size,
1731 hook_entries, underflows, &j, name);
1737 duprintf("translate_compat_table: %u not %u entries\n",
1742 /* Check hooks all assigned */
1743 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1744 /* Only hooks which are valid */
1745 if (!(valid_hooks & (1 << i)))
1747 if (info->hook_entry[i] == 0xFFFFFFFF) {
1748 duprintf("Invalid hook entry %u %u\n",
1749 i, hook_entries[i]);
1752 if (info->underflow[i] == 0xFFFFFFFF) {
1753 duprintf("Invalid underflow %u %u\n",
1760 newinfo = xt_alloc_table_info(size);
1764 newinfo->number = number;
1765 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1766 newinfo->hook_entry[i] = info->hook_entry[i];
1767 newinfo->underflow[i] = info->underflow[i];
1769 entry1 = newinfo->entries[raw_smp_processor_id()];
1772 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1773 compat_copy_entry_from_user,
1774 &pos, &size, name, newinfo, entry1);
1775 xt_compat_flush_offsets(AF_INET6);
1776 xt_compat_unlock(AF_INET6);
1781 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1785 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1789 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1790 compat_release_entry, &j);
1791 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1792 xt_free_table_info(newinfo);
1796 /* And one copy for every other CPU */
1797 for_each_possible_cpu(i)
1798 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1799 memcpy(newinfo->entries[i], entry1, newinfo->size);
1803 xt_free_table_info(info);
1807 xt_free_table_info(newinfo);
1809 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1812 xt_compat_flush_offsets(AF_INET6);
1813 xt_compat_unlock(AF_INET6);
1818 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1821 struct compat_ip6t_replace tmp;
1822 struct xt_table_info *newinfo;
1823 void *loc_cpu_entry;
1825 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1828 /* overflow check */
1829 if (tmp.size >= INT_MAX / num_possible_cpus())
1831 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1834 newinfo = xt_alloc_table_info(tmp.size);
1838 /* choose the copy that is on our node/cpu */
1839 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1840 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1846 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1847 &newinfo, &loc_cpu_entry, tmp.size,
1848 tmp.num_entries, tmp.hook_entry,
1853 duprintf("compat_do_replace: Translated table\n");
1855 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1856 tmp.num_counters, compat_ptr(tmp.counters));
1858 goto free_newinfo_untrans;
1861 free_newinfo_untrans:
1862 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1864 xt_free_table_info(newinfo);
1869 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1874 if (!capable(CAP_NET_ADMIN))
1878 case IP6T_SO_SET_REPLACE:
1879 ret = compat_do_replace(sock_net(sk), user, len);
1882 case IP6T_SO_SET_ADD_COUNTERS:
1883 ret = do_add_counters(sock_net(sk), user, len, 1);
1887 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1894 struct compat_ip6t_get_entries {
1895 char name[IP6T_TABLE_MAXNAMELEN];
1897 struct compat_ip6t_entry entrytable[0];
1901 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1902 void __user *userptr)
1904 struct xt_counters *counters;
1905 const struct xt_table_info *private = table->private;
1909 const void *loc_cpu_entry;
1912 counters = alloc_counters(table);
1913 if (IS_ERR(counters))
1914 return PTR_ERR(counters);
1916 /* choose the copy that is on our node/cpu, ...
1917 * This choice is lazy (because current thread is
1918 * allowed to migrate to another cpu)
1920 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1923 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1924 compat_copy_entry_to_user,
1925 &pos, &size, counters, &i);
1932 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1936 struct compat_ip6t_get_entries get;
1939 if (*len < sizeof(get)) {
1940 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1944 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1947 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1948 duprintf("compat_get_entries: %u != %zu\n",
1949 *len, sizeof(get) + get.size);
1953 xt_compat_lock(AF_INET6);
1954 t = xt_find_table_lock(net, AF_INET6, get.name);
1955 if (t && !IS_ERR(t)) {
1956 const struct xt_table_info *private = t->private;
1957 struct xt_table_info info;
1958 duprintf("t->private->number = %u\n", private->number);
1959 ret = compat_table_info(private, &info);
1960 if (!ret && get.size == info.size) {
1961 ret = compat_copy_entries_to_user(private->size,
1962 t, uptr->entrytable);
1964 duprintf("compat_get_entries: I've got %u not %u!\n",
1965 private->size, get.size);
1968 xt_compat_flush_offsets(AF_INET6);
1972 ret = t ? PTR_ERR(t) : -ENOENT;
1974 xt_compat_unlock(AF_INET6);
1978 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1981 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1985 if (!capable(CAP_NET_ADMIN))
1989 case IP6T_SO_GET_INFO:
1990 ret = get_info(sock_net(sk), user, len, 1);
1992 case IP6T_SO_GET_ENTRIES:
1993 ret = compat_get_entries(sock_net(sk), user, len);
1996 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2003 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2007 if (!capable(CAP_NET_ADMIN))
2011 case IP6T_SO_SET_REPLACE:
2012 ret = do_replace(sock_net(sk), user, len);
2015 case IP6T_SO_SET_ADD_COUNTERS:
2016 ret = do_add_counters(sock_net(sk), user, len, 0);
2020 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2028 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2032 if (!capable(CAP_NET_ADMIN))
2036 case IP6T_SO_GET_INFO:
2037 ret = get_info(sock_net(sk), user, len, 0);
2040 case IP6T_SO_GET_ENTRIES:
2041 ret = get_entries(sock_net(sk), user, len);
2044 case IP6T_SO_GET_REVISION_MATCH:
2045 case IP6T_SO_GET_REVISION_TARGET: {
2046 struct ip6t_get_revision rev;
2049 if (*len != sizeof(rev)) {
2053 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2058 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2063 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2066 "ip6t_%s", rev.name);
2071 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2078 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2079 const struct ip6t_replace *repl)
2082 struct xt_table_info *newinfo;
2083 struct xt_table_info bootstrap
2084 = { 0, 0, 0, { 0 }, { 0 }, { } };
2085 void *loc_cpu_entry;
2086 struct xt_table *new_table;
2088 newinfo = xt_alloc_table_info(repl->size);
2094 /* choose the copy on our node/cpu, but dont care about preemption */
2095 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2096 memcpy(loc_cpu_entry, repl->entries, repl->size);
2098 ret = translate_table(table->name, table->valid_hooks,
2099 newinfo, loc_cpu_entry, repl->size,
2106 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2107 if (IS_ERR(new_table)) {
2108 ret = PTR_ERR(new_table);
2114 xt_free_table_info(newinfo);
2116 return ERR_PTR(ret);
2119 void ip6t_unregister_table(struct xt_table *table)
2121 struct xt_table_info *private;
2122 void *loc_cpu_entry;
2123 struct module *table_owner = table->me;
2125 private = xt_unregister_table(table);
2127 /* Decrease module usage counts and free resources */
2128 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2129 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2130 if (private->number > private->initial_entries)
2131 module_put(table_owner);
2132 xt_free_table_info(private);
2135 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2137 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2138 u_int8_t type, u_int8_t code,
2141 return (type == test_type && code >= min_code && code <= max_code)
2146 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2148 const struct icmp6hdr *ic;
2149 struct icmp6hdr _icmph;
2150 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2152 /* Must not be a fragment. */
2153 if (par->fragoff != 0)
2156 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2158 /* We've been asked to examine this packet, and we
2159 * can't. Hence, no choice but to drop.
2161 duprintf("Dropping evil ICMP tinygram.\n");
2162 *par->hotdrop = true;
2166 return icmp6_type_code_match(icmpinfo->type,
2169 ic->icmp6_type, ic->icmp6_code,
2170 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2173 /* Called when user tries to insert an entry of this type. */
2174 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2176 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2178 /* Must specify no unknown invflags */
2179 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2182 /* The built-in targets: standard (NULL) and error. */
2183 static struct xt_target ip6t_standard_target __read_mostly = {
2184 .name = IP6T_STANDARD_TARGET,
2185 .targetsize = sizeof(int),
2187 #ifdef CONFIG_COMPAT
2188 .compatsize = sizeof(compat_int_t),
2189 .compat_from_user = compat_standard_from_user,
2190 .compat_to_user = compat_standard_to_user,
2194 static struct xt_target ip6t_error_target __read_mostly = {
2195 .name = IP6T_ERROR_TARGET,
2196 .target = ip6t_error,
2197 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2201 static struct nf_sockopt_ops ip6t_sockopts = {
2203 .set_optmin = IP6T_BASE_CTL,
2204 .set_optmax = IP6T_SO_SET_MAX+1,
2205 .set = do_ip6t_set_ctl,
2206 #ifdef CONFIG_COMPAT
2207 .compat_set = compat_do_ip6t_set_ctl,
2209 .get_optmin = IP6T_BASE_CTL,
2210 .get_optmax = IP6T_SO_GET_MAX+1,
2211 .get = do_ip6t_get_ctl,
2212 #ifdef CONFIG_COMPAT
2213 .compat_get = compat_do_ip6t_get_ctl,
2215 .owner = THIS_MODULE,
2218 static struct xt_match icmp6_matchstruct __read_mostly = {
2220 .match = icmp6_match,
2221 .matchsize = sizeof(struct ip6t_icmp),
2222 .checkentry = icmp6_checkentry,
2223 .proto = IPPROTO_ICMPV6,
2227 static int __net_init ip6_tables_net_init(struct net *net)
2229 return xt_proto_init(net, AF_INET6);
2232 static void __net_exit ip6_tables_net_exit(struct net *net)
2234 xt_proto_fini(net, AF_INET6);
2237 static struct pernet_operations ip6_tables_net_ops = {
2238 .init = ip6_tables_net_init,
2239 .exit = ip6_tables_net_exit,
2242 static int __init ip6_tables_init(void)
2246 ret = register_pernet_subsys(&ip6_tables_net_ops);
2250 /* Noone else will be downing sem now, so we won't sleep */
2251 ret = xt_register_target(&ip6t_standard_target);
2254 ret = xt_register_target(&ip6t_error_target);
2257 ret = xt_register_match(&icmp6_matchstruct);
2261 /* Register setsockopt */
2262 ret = nf_register_sockopt(&ip6t_sockopts);
2266 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2270 xt_unregister_match(&icmp6_matchstruct);
2272 xt_unregister_target(&ip6t_error_target);
2274 xt_unregister_target(&ip6t_standard_target);
2276 unregister_pernet_subsys(&ip6_tables_net_ops);
2281 static void __exit ip6_tables_fini(void)
2283 nf_unregister_sockopt(&ip6t_sockopts);
2285 xt_unregister_match(&icmp6_matchstruct);
2286 xt_unregister_target(&ip6t_error_target);
2287 xt_unregister_target(&ip6t_standard_target);
2289 unregister_pernet_subsys(&ip6_tables_net_ops);
2293 * find the offset to specified header or the protocol number of last header
2294 * if target < 0. "last header" is transport protocol header, ESP, or
2297 * If target header is found, its offset is set in *offset and return protocol
2298 * number. Otherwise, return -1.
2300 * If the first fragment doesn't contain the final protocol header or
2301 * NEXTHDR_NONE it is considered invalid.
2303 * Note that non-1st fragment is special case that "the protocol number
2304 * of last header" is "next header" field in Fragment header. In this case,
2305 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2309 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2310 int target, unsigned short *fragoff)
2312 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2313 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2314 unsigned int len = skb->len - start;
2319 while (nexthdr != target) {
2320 struct ipv6_opt_hdr _hdr, *hp;
2321 unsigned int hdrlen;
2323 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2329 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2332 if (nexthdr == NEXTHDR_FRAGMENT) {
2333 unsigned short _frag_off;
2335 fp = skb_header_pointer(skb,
2336 start+offsetof(struct frag_hdr,
2343 _frag_off = ntohs(*fp) & ~0x7;
2346 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2347 hp->nexthdr == NEXTHDR_NONE)) {
2349 *fragoff = _frag_off;
2355 } else if (nexthdr == NEXTHDR_AUTH)
2356 hdrlen = (hp->hdrlen + 2) << 2;
2358 hdrlen = ipv6_optlen(hp);
2360 nexthdr = hp->nexthdr;
2369 EXPORT_SYMBOL(ip6t_register_table);
2370 EXPORT_SYMBOL(ip6t_unregister_table);
2371 EXPORT_SYMBOL(ip6t_do_table);
2372 EXPORT_SYMBOL(ip6t_ext_hdr);
2373 EXPORT_SYMBOL(ipv6_find_hdr);
2375 module_init(ip6_tables_init);
2376 module_exit(ip6_tables_fini);