2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
109 &ip6info->src), IP6T_INV_SRCIP)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
111 &ip6info->dst), IP6T_INV_DSTIP)) {
112 dprintf("Source or dest mismatch.\n");
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev, ip6info->iniface,
133 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev, ip6info->outiface,
146 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
150 /* ... might want to do something with class and flowlabel here ... */
152 /* look for the desired protocol header */
153 if((ip6info->flags & IP6T_F_PROTO)) {
155 unsigned short _frag_off;
157 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
163 *fragoff = _frag_off;
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
167 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
170 if (ip6info->proto == protohdr) {
171 if(ip6info->invflags & IP6T_INV_PROTO) {
177 /* We need match for the '-p all', too! */
178 if ((ip6info->proto != 0) &&
179 !(ip6info->invflags & IP6T_INV_PROTO))
185 /* should be ip6 safe */
187 ip6_checkentry(const struct ip6t_ip6 *ipv6)
189 if (ipv6->flags & ~IP6T_F_MASK) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6->flags & ~IP6T_F_MASK);
194 if (ipv6->invflags & ~IP6T_INV_MASK) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6->invflags & ~IP6T_INV_MASK);
203 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
206 printk("ip6_tables: error: `%s'\n",
207 (const char *)par->targinfo);
212 /* Performance critical - called for every packet */
214 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
215 struct xt_match_param *par)
217 par->match = m->u.kernel.match;
218 par->matchinfo = m->data;
220 /* Stop iteration if it doesn't match */
221 if (!m->u.kernel.match->match(skb, par))
227 static inline struct ip6t_entry *
228 get_entry(void *base, unsigned int offset)
230 return (struct ip6t_entry *)(base + offset);
233 /* All zeroes == unconditional rule. */
234 /* Mildly perf critical (only if packet tracing is on) */
236 unconditional(const struct ip6t_ip6 *ipv6)
240 for (i = 0; i < sizeof(*ipv6); i++)
241 if (((char *)ipv6)[i])
244 return (i == sizeof(*ipv6));
247 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
248 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
249 /* This cries for unification! */
250 static const char *const hooknames[] = {
251 [NF_INET_PRE_ROUTING] = "PREROUTING",
252 [NF_INET_LOCAL_IN] = "INPUT",
253 [NF_INET_FORWARD] = "FORWARD",
254 [NF_INET_LOCAL_OUT] = "OUTPUT",
255 [NF_INET_POST_ROUTING] = "POSTROUTING",
258 enum nf_ip_trace_comments {
259 NF_IP6_TRACE_COMMENT_RULE,
260 NF_IP6_TRACE_COMMENT_RETURN,
261 NF_IP6_TRACE_COMMENT_POLICY,
264 static const char *const comments[] = {
265 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
266 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
267 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
270 static struct nf_loginfo trace_loginfo = {
271 .type = NF_LOG_TYPE_LOG,
275 .logflags = NF_LOG_MASK,
280 /* Mildly perf critical (only if packet tracing is on) */
282 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
283 char *hookname, char **chainname,
284 char **comment, unsigned int *rulenum)
286 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
288 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
289 /* Head of user chain: ERROR target with chainname */
290 *chainname = t->target.data;
295 if (s->target_offset == sizeof(struct ip6t_entry)
296 && strcmp(t->target.u.kernel.target->name,
297 IP6T_STANDARD_TARGET) == 0
299 && unconditional(&s->ipv6)) {
300 /* Tail of chains: STANDARD target (return/policy) */
301 *comment = *chainname == hookname
302 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
303 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
312 static void trace_packet(struct sk_buff *skb,
314 const struct net_device *in,
315 const struct net_device *out,
316 const char *tablename,
317 struct xt_table_info *private,
318 struct ip6t_entry *e)
321 const struct ip6t_entry *root;
322 char *hookname, *chainname, *comment;
323 unsigned int rulenum = 0;
325 table_base = (void *)private->entries[smp_processor_id()];
326 root = get_entry(table_base, private->hook_entry[hook]);
328 hookname = chainname = (char *)hooknames[hook];
329 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
331 IP6T_ENTRY_ITERATE(root,
332 private->size - private->hook_entry[hook],
333 get_chainname_rulenum,
334 e, hookname, &chainname, &comment, &rulenum);
336 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
337 "TRACE: %s:%s:%s:%u ",
338 tablename, chainname, comment, rulenum);
342 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
344 ip6t_do_table(struct sk_buff *skb,
346 const struct net_device *in,
347 const struct net_device *out,
348 struct xt_table *table)
350 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
351 bool hotdrop = false;
352 /* Initializing verdict to NF_DROP keeps gcc happy. */
353 unsigned int verdict = NF_DROP;
354 const char *indev, *outdev;
356 struct ip6t_entry *e, *back;
357 struct xt_table_info *private;
358 struct xt_match_param mtpar;
359 struct xt_target_param tgpar;
362 indev = in ? in->name : nulldevname;
363 outdev = out ? out->name : nulldevname;
364 /* We handle fragments by dealing with the first fragment as
365 * if it was a normal packet. All other fragments are treated
366 * normally, except that they will NEVER match rules that ask
367 * things we don't know, ie. tcp syn flag or ports). If the
368 * rule is also a fragment-specific rule, non-fragments won't
370 mtpar.hotdrop = &hotdrop;
371 mtpar.in = tgpar.in = in;
372 mtpar.out = tgpar.out = out;
373 tgpar.hooknum = hook;
375 read_lock_bh(&table->lock);
376 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
377 private = table->private;
378 table_base = (void *)private->entries[smp_processor_id()];
379 e = get_entry(table_base, private->hook_entry[hook]);
381 /* For return from builtin chain */
382 back = get_entry(table_base, private->underflow[hook]);
387 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
388 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
389 struct ip6t_entry_target *t;
391 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
394 ADD_COUNTER(e->counters,
395 ntohs(ipv6_hdr(skb)->payload_len) +
396 sizeof(struct ipv6hdr), 1);
398 t = ip6t_get_target(e);
399 IP_NF_ASSERT(t->u.kernel.target);
401 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
402 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
403 /* The packet is traced: log it */
404 if (unlikely(skb->nf_trace))
405 trace_packet(skb, hook, in, out,
406 table->name, private, e);
408 /* Standard target? */
409 if (!t->u.kernel.target->target) {
412 v = ((struct ip6t_standard_target *)t)->verdict;
414 /* Pop from stack? */
415 if (v != IP6T_RETURN) {
416 verdict = (unsigned)(-v) - 1;
420 back = get_entry(table_base,
424 if (table_base + v != (void *)e + e->next_offset
425 && !(e->ipv6.flags & IP6T_F_GOTO)) {
426 /* Save old back ptr in next entry */
427 struct ip6t_entry *next
428 = (void *)e + e->next_offset;
430 = (void *)back - table_base;
431 /* set back pointer to next entry */
435 e = get_entry(table_base, v);
437 /* Targets which reenter must return
439 tgpar.target = t->u.kernel.target;
440 tgpar.targinfo = t->data;
442 #ifdef CONFIG_NETFILTER_DEBUG
443 ((struct ip6t_entry *)table_base)->comefrom
446 verdict = t->u.kernel.target->target(skb,
449 #ifdef CONFIG_NETFILTER_DEBUG
450 if (((struct ip6t_entry *)table_base)->comefrom
452 && verdict == IP6T_CONTINUE) {
453 printk("Target %s reentered!\n",
454 t->u.kernel.target->name);
457 ((struct ip6t_entry *)table_base)->comefrom
460 if (verdict == IP6T_CONTINUE)
461 e = (void *)e + e->next_offset;
469 e = (void *)e + e->next_offset;
473 #ifdef CONFIG_NETFILTER_DEBUG
474 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
476 read_unlock_bh(&table->lock);
478 #ifdef DEBUG_ALLOW_ALL
487 /* Figures out from what hook each rule can be called: returns 0 if
488 there are loops. Puts hook bitmask in comefrom. */
490 mark_source_chains(struct xt_table_info *newinfo,
491 unsigned int valid_hooks, void *entry0)
495 /* No recursion; use packet counter to save back ptrs (reset
496 to 0 as we leave), and comefrom to save source hook bitmask */
497 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
498 unsigned int pos = newinfo->hook_entry[hook];
499 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
501 if (!(valid_hooks & (1 << hook)))
504 /* Set initial back pointer. */
505 e->counters.pcnt = pos;
508 struct ip6t_standard_target *t
509 = (void *)ip6t_get_target(e);
510 int visited = e->comefrom & (1 << hook);
512 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
513 printk("iptables: loop hook %u pos %u %08X.\n",
514 hook, pos, e->comefrom);
517 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
519 /* Unconditional return/END. */
520 if ((e->target_offset == sizeof(struct ip6t_entry)
521 && (strcmp(t->target.u.user.name,
522 IP6T_STANDARD_TARGET) == 0)
524 && unconditional(&e->ipv6)) || visited) {
525 unsigned int oldpos, size;
527 if (t->verdict < -NF_MAX_VERDICT - 1) {
528 duprintf("mark_source_chains: bad "
529 "negative verdict (%i)\n",
534 /* Return: backtrack through the last
537 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
538 #ifdef DEBUG_IP_FIREWALL_USER
540 & (1 << NF_INET_NUMHOOKS)) {
541 duprintf("Back unset "
548 pos = e->counters.pcnt;
549 e->counters.pcnt = 0;
551 /* We're at the start. */
555 e = (struct ip6t_entry *)
557 } while (oldpos == pos + e->next_offset);
560 size = e->next_offset;
561 e = (struct ip6t_entry *)
562 (entry0 + pos + size);
563 e->counters.pcnt = pos;
566 int newpos = t->verdict;
568 if (strcmp(t->target.u.user.name,
569 IP6T_STANDARD_TARGET) == 0
571 if (newpos > newinfo->size -
572 sizeof(struct ip6t_entry)) {
573 duprintf("mark_source_chains: "
574 "bad verdict (%i)\n",
578 /* This a jump; chase it. */
579 duprintf("Jump rule %u -> %u\n",
582 /* ... this is a fallthru */
583 newpos = pos + e->next_offset;
585 e = (struct ip6t_entry *)
587 e->counters.pcnt = pos;
592 duprintf("Finished chain %u\n", hook);
598 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
600 struct xt_mtdtor_param par;
602 if (i && (*i)-- == 0)
605 par.match = m->u.kernel.match;
606 par.matchinfo = m->data;
607 if (par.match->destroy != NULL)
608 par.match->destroy(&par);
609 module_put(par.match->me);
614 check_entry(struct ip6t_entry *e, const char *name)
616 struct ip6t_entry_target *t;
618 if (!ip6_checkentry(&e->ipv6)) {
619 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
623 if (e->target_offset + sizeof(struct ip6t_entry_target) >
627 t = ip6t_get_target(e);
628 if (e->target_offset + t->u.target_size > e->next_offset)
634 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
637 const struct ip6t_ip6 *ipv6 = par->entryinfo;
640 par->match = m->u.kernel.match;
641 par->matchinfo = m->data;
643 ret = xt_check_match(par, NFPROTO_IPV6, m->u.match_size - sizeof(*m),
644 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
646 duprintf("ip_tables: check failed for `%s'.\n",
655 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
658 struct xt_match *match;
661 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
663 "ip6t_%s", m->u.user.name);
664 if (IS_ERR(match) || !match) {
665 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
666 return match ? PTR_ERR(match) : -ENOENT;
668 m->u.kernel.match = match;
670 ret = check_match(m, par, i);
676 module_put(m->u.kernel.match->me);
680 static int check_target(struct ip6t_entry *e, const char *name)
682 struct ip6t_entry_target *t = ip6t_get_target(e);
683 struct xt_tgchk_param par = {
686 .target = t->u.kernel.target,
688 .hook_mask = e->comefrom,
692 t = ip6t_get_target(e);
693 ret = xt_check_target(&par, NFPROTO_IPV6, t->u.target_size - sizeof(*t),
694 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
696 duprintf("ip_tables: check failed for `%s'.\n",
697 t->u.kernel.target->name);
704 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
707 struct ip6t_entry_target *t;
708 struct xt_target *target;
711 struct xt_mtchk_param mtpar;
713 ret = check_entry(e, name);
719 mtpar.entryinfo = &e->ipv6;
720 mtpar.hook_mask = e->comefrom;
721 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
723 goto cleanup_matches;
725 t = ip6t_get_target(e);
726 target = try_then_request_module(xt_find_target(AF_INET6,
729 "ip6t_%s", t->u.user.name);
730 if (IS_ERR(target) || !target) {
731 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
732 ret = target ? PTR_ERR(target) : -ENOENT;
733 goto cleanup_matches;
735 t->u.kernel.target = target;
737 ret = check_target(e, name);
744 module_put(t->u.kernel.target->me);
746 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
751 check_entry_size_and_hooks(struct ip6t_entry *e,
752 struct xt_table_info *newinfo,
754 unsigned char *limit,
755 const unsigned int *hook_entries,
756 const unsigned int *underflows,
761 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
762 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
763 duprintf("Bad offset %p\n", e);
768 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
769 duprintf("checking: element %p size %u\n",
774 /* Check hooks & underflows */
775 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
776 if ((unsigned char *)e - base == hook_entries[h])
777 newinfo->hook_entry[h] = hook_entries[h];
778 if ((unsigned char *)e - base == underflows[h])
779 newinfo->underflow[h] = underflows[h];
782 /* FIXME: underflows must be unconditional, standard verdicts
783 < 0 (not IP6T_RETURN). --RR */
785 /* Clear counters and comefrom */
786 e->counters = ((struct xt_counters) { 0, 0 });
794 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
796 struct xt_tgdtor_param par;
797 struct ip6t_entry_target *t;
799 if (i && (*i)-- == 0)
802 /* Cleanup all matches */
803 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
804 t = ip6t_get_target(e);
806 par.target = t->u.kernel.target;
807 par.targinfo = t->data;
808 if (par.target->destroy != NULL)
809 par.target->destroy(&par);
810 module_put(par.target->me);
814 /* Checks and translates the user-supplied table segment (held in
817 translate_table(const char *name,
818 unsigned int valid_hooks,
819 struct xt_table_info *newinfo,
823 const unsigned int *hook_entries,
824 const unsigned int *underflows)
829 newinfo->size = size;
830 newinfo->number = number;
832 /* Init all hooks to impossible value. */
833 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
834 newinfo->hook_entry[i] = 0xFFFFFFFF;
835 newinfo->underflow[i] = 0xFFFFFFFF;
838 duprintf("translate_table: size %u\n", newinfo->size);
840 /* Walk through entries, checking offsets. */
841 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
842 check_entry_size_and_hooks,
846 hook_entries, underflows, &i);
851 duprintf("translate_table: %u not %u entries\n",
856 /* Check hooks all assigned */
857 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
858 /* Only hooks which are valid */
859 if (!(valid_hooks & (1 << i)))
861 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
862 duprintf("Invalid hook entry %u %u\n",
866 if (newinfo->underflow[i] == 0xFFFFFFFF) {
867 duprintf("Invalid underflow %u %u\n",
873 if (!mark_source_chains(newinfo, valid_hooks, entry0))
876 /* Finally, each sanity check must pass */
878 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
879 find_check_entry, name, size, &i);
882 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
887 /* And one copy for every other CPU */
888 for_each_possible_cpu(i) {
889 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
890 memcpy(newinfo->entries[i], entry0, newinfo->size);
898 add_entry_to_counter(const struct ip6t_entry *e,
899 struct xt_counters total[],
902 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
909 set_entry_to_counter(const struct ip6t_entry *e,
910 struct ip6t_counters total[],
913 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
920 get_counters(const struct xt_table_info *t,
921 struct xt_counters counters[])
927 /* Instead of clearing (by a previous call to memset())
928 * the counters and using adds, we set the counters
929 * with data used by 'current' CPU
930 * We dont care about preemption here.
932 curcpu = raw_smp_processor_id();
935 IP6T_ENTRY_ITERATE(t->entries[curcpu],
937 set_entry_to_counter,
941 for_each_possible_cpu(cpu) {
945 IP6T_ENTRY_ITERATE(t->entries[cpu],
947 add_entry_to_counter,
953 static struct xt_counters *alloc_counters(struct xt_table *table)
955 unsigned int countersize;
956 struct xt_counters *counters;
957 const struct xt_table_info *private = table->private;
959 /* We need atomic snapshot of counters: rest doesn't change
960 (other than comefrom, which userspace doesn't care
962 countersize = sizeof(struct xt_counters) * private->number;
963 counters = vmalloc_node(countersize, numa_node_id());
965 if (counters == NULL)
966 return ERR_PTR(-ENOMEM);
968 /* First, sum counters... */
969 write_lock_bh(&table->lock);
970 get_counters(private, counters);
971 write_unlock_bh(&table->lock);
977 copy_entries_to_user(unsigned int total_size,
978 struct xt_table *table,
979 void __user *userptr)
981 unsigned int off, num;
982 struct ip6t_entry *e;
983 struct xt_counters *counters;
984 const struct xt_table_info *private = table->private;
986 const void *loc_cpu_entry;
988 counters = alloc_counters(table);
989 if (IS_ERR(counters))
990 return PTR_ERR(counters);
992 /* choose the copy that is on our node/cpu, ...
993 * This choice is lazy (because current thread is
994 * allowed to migrate to another cpu)
996 loc_cpu_entry = private->entries[raw_smp_processor_id()];
997 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1002 /* FIXME: use iterator macros --RR */
1003 /* ... then go back and fix counters and names */
1004 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1006 const struct ip6t_entry_match *m;
1007 const struct ip6t_entry_target *t;
1009 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1010 if (copy_to_user(userptr + off
1011 + offsetof(struct ip6t_entry, counters),
1013 sizeof(counters[num])) != 0) {
1018 for (i = sizeof(struct ip6t_entry);
1019 i < e->target_offset;
1020 i += m->u.match_size) {
1023 if (copy_to_user(userptr + off + i
1024 + offsetof(struct ip6t_entry_match,
1026 m->u.kernel.match->name,
1027 strlen(m->u.kernel.match->name)+1)
1034 t = ip6t_get_target(e);
1035 if (copy_to_user(userptr + off + e->target_offset
1036 + offsetof(struct ip6t_entry_target,
1038 t->u.kernel.target->name,
1039 strlen(t->u.kernel.target->name)+1) != 0) {
1050 #ifdef CONFIG_COMPAT
1051 static void compat_standard_from_user(void *dst, void *src)
1053 int v = *(compat_int_t *)src;
1056 v += xt_compat_calc_jump(AF_INET6, v);
1057 memcpy(dst, &v, sizeof(v));
1060 static int compat_standard_to_user(void __user *dst, void *src)
1062 compat_int_t cv = *(int *)src;
1065 cv -= xt_compat_calc_jump(AF_INET6, cv);
1066 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1070 compat_calc_match(struct ip6t_entry_match *m, int *size)
1072 *size += xt_compat_match_offset(m->u.kernel.match);
1076 static int compat_calc_entry(struct ip6t_entry *e,
1077 const struct xt_table_info *info,
1078 void *base, struct xt_table_info *newinfo)
1080 struct ip6t_entry_target *t;
1081 unsigned int entry_offset;
1084 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1085 entry_offset = (void *)e - base;
1086 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1087 t = ip6t_get_target(e);
1088 off += xt_compat_target_offset(t->u.kernel.target);
1089 newinfo->size -= off;
1090 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1094 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1095 if (info->hook_entry[i] &&
1096 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1097 newinfo->hook_entry[i] -= off;
1098 if (info->underflow[i] &&
1099 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1100 newinfo->underflow[i] -= off;
1105 static int compat_table_info(const struct xt_table_info *info,
1106 struct xt_table_info *newinfo)
1108 void *loc_cpu_entry;
1110 if (!newinfo || !info)
1113 /* we dont care about newinfo->entries[] */
1114 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1115 newinfo->initial_entries = 0;
1116 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1117 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1118 compat_calc_entry, info, loc_cpu_entry,
1123 static int get_info(struct net *net, void __user *user, int *len, int compat)
1125 char name[IP6T_TABLE_MAXNAMELEN];
1129 if (*len != sizeof(struct ip6t_getinfo)) {
1130 duprintf("length %u != %zu\n", *len,
1131 sizeof(struct ip6t_getinfo));
1135 if (copy_from_user(name, user, sizeof(name)) != 0)
1138 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1139 #ifdef CONFIG_COMPAT
1141 xt_compat_lock(AF_INET6);
1143 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1144 "ip6table_%s", name);
1145 if (t && !IS_ERR(t)) {
1146 struct ip6t_getinfo info;
1147 const struct xt_table_info *private = t->private;
1149 #ifdef CONFIG_COMPAT
1151 struct xt_table_info tmp;
1152 ret = compat_table_info(private, &tmp);
1153 xt_compat_flush_offsets(AF_INET6);
1157 info.valid_hooks = t->valid_hooks;
1158 memcpy(info.hook_entry, private->hook_entry,
1159 sizeof(info.hook_entry));
1160 memcpy(info.underflow, private->underflow,
1161 sizeof(info.underflow));
1162 info.num_entries = private->number;
1163 info.size = private->size;
1164 strcpy(info.name, name);
1166 if (copy_to_user(user, &info, *len) != 0)
1174 ret = t ? PTR_ERR(t) : -ENOENT;
1175 #ifdef CONFIG_COMPAT
1177 xt_compat_unlock(AF_INET6);
1183 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1186 struct ip6t_get_entries get;
1189 if (*len < sizeof(get)) {
1190 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1193 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1195 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1196 duprintf("get_entries: %u != %zu\n",
1197 *len, sizeof(get) + get.size);
1201 t = xt_find_table_lock(net, AF_INET6, get.name);
1202 if (t && !IS_ERR(t)) {
1203 struct xt_table_info *private = t->private;
1204 duprintf("t->private->number = %u\n", private->number);
1205 if (get.size == private->size)
1206 ret = copy_entries_to_user(private->size,
1207 t, uptr->entrytable);
1209 duprintf("get_entries: I've got %u not %u!\n",
1210 private->size, get.size);
1216 ret = t ? PTR_ERR(t) : -ENOENT;
1222 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1223 struct xt_table_info *newinfo, unsigned int num_counters,
1224 void __user *counters_ptr)
1228 struct xt_table_info *oldinfo;
1229 struct xt_counters *counters;
1230 const void *loc_cpu_old_entry;
1233 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1240 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1241 "ip6table_%s", name);
1242 if (!t || IS_ERR(t)) {
1243 ret = t ? PTR_ERR(t) : -ENOENT;
1244 goto free_newinfo_counters_untrans;
1248 if (valid_hooks != t->valid_hooks) {
1249 duprintf("Valid hook crap: %08X vs %08X\n",
1250 valid_hooks, t->valid_hooks);
1255 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1259 /* Update module usage count based on number of rules */
1260 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1261 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1262 if ((oldinfo->number > oldinfo->initial_entries) ||
1263 (newinfo->number <= oldinfo->initial_entries))
1265 if ((oldinfo->number > oldinfo->initial_entries) &&
1266 (newinfo->number <= oldinfo->initial_entries))
1269 /* Get the old counters. */
1270 get_counters(oldinfo, counters);
1271 /* Decrease module usage counts and free resource */
1272 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1273 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1275 xt_free_table_info(oldinfo);
1276 if (copy_to_user(counters_ptr, counters,
1277 sizeof(struct xt_counters) * num_counters) != 0)
1286 free_newinfo_counters_untrans:
1293 do_replace(struct net *net, void __user *user, unsigned int len)
1296 struct ip6t_replace tmp;
1297 struct xt_table_info *newinfo;
1298 void *loc_cpu_entry;
1300 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1303 /* overflow check */
1304 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1307 newinfo = xt_alloc_table_info(tmp.size);
1311 /* choose the copy that is on our node/cpu */
1312 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1313 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1319 ret = translate_table(tmp.name, tmp.valid_hooks,
1320 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1321 tmp.hook_entry, tmp.underflow);
1325 duprintf("ip_tables: Translated table\n");
1327 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1328 tmp.num_counters, tmp.counters);
1330 goto free_newinfo_untrans;
1333 free_newinfo_untrans:
1334 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1336 xt_free_table_info(newinfo);
1340 /* We're lazy, and add to the first CPU; overflow works its fey magic
1341 * and everything is OK. */
1343 add_counter_to_entry(struct ip6t_entry *e,
1344 const struct xt_counters addme[],
1348 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1350 (long unsigned int)e->counters.pcnt,
1351 (long unsigned int)e->counters.bcnt,
1352 (long unsigned int)addme[*i].pcnt,
1353 (long unsigned int)addme[*i].bcnt);
1356 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1363 do_add_counters(struct net *net, void __user *user, unsigned int len,
1367 struct xt_counters_info tmp;
1368 struct xt_counters *paddc;
1369 unsigned int num_counters;
1374 const struct xt_table_info *private;
1376 const void *loc_cpu_entry;
1377 #ifdef CONFIG_COMPAT
1378 struct compat_xt_counters_info compat_tmp;
1382 size = sizeof(struct compat_xt_counters_info);
1387 size = sizeof(struct xt_counters_info);
1390 if (copy_from_user(ptmp, user, size) != 0)
1393 #ifdef CONFIG_COMPAT
1395 num_counters = compat_tmp.num_counters;
1396 name = compat_tmp.name;
1400 num_counters = tmp.num_counters;
1404 if (len != size + num_counters * sizeof(struct xt_counters))
1407 paddc = vmalloc_node(len - size, numa_node_id());
1411 if (copy_from_user(paddc, user + size, len - size) != 0) {
1416 t = xt_find_table_lock(net, AF_INET6, name);
1417 if (!t || IS_ERR(t)) {
1418 ret = t ? PTR_ERR(t) : -ENOENT;
1422 write_lock_bh(&t->lock);
1423 private = t->private;
1424 if (private->number != num_counters) {
1426 goto unlock_up_free;
1430 /* Choose the copy that is on our node */
1431 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1432 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1434 add_counter_to_entry,
1438 write_unlock_bh(&t->lock);
1447 #ifdef CONFIG_COMPAT
1448 struct compat_ip6t_replace {
1449 char name[IP6T_TABLE_MAXNAMELEN];
1453 u32 hook_entry[NF_INET_NUMHOOKS];
1454 u32 underflow[NF_INET_NUMHOOKS];
1456 compat_uptr_t counters; /* struct ip6t_counters * */
1457 struct compat_ip6t_entry entries[0];
1461 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1462 unsigned int *size, struct xt_counters *counters,
1465 struct ip6t_entry_target *t;
1466 struct compat_ip6t_entry __user *ce;
1467 u_int16_t target_offset, next_offset;
1468 compat_uint_t origsize;
1473 ce = (struct compat_ip6t_entry __user *)*dstptr;
1474 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1477 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1480 *dstptr += sizeof(struct compat_ip6t_entry);
1481 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1483 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1484 target_offset = e->target_offset - (origsize - *size);
1487 t = ip6t_get_target(e);
1488 ret = xt_compat_target_to_user(t, dstptr, size);
1492 next_offset = e->next_offset - (origsize - *size);
1493 if (put_user(target_offset, &ce->target_offset))
1495 if (put_user(next_offset, &ce->next_offset))
1505 compat_find_calc_match(struct ip6t_entry_match *m,
1507 const struct ip6t_ip6 *ipv6,
1508 unsigned int hookmask,
1509 int *size, unsigned int *i)
1511 struct xt_match *match;
1513 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1514 m->u.user.revision),
1515 "ip6t_%s", m->u.user.name);
1516 if (IS_ERR(match) || !match) {
1517 duprintf("compat_check_calc_match: `%s' not found\n",
1519 return match ? PTR_ERR(match) : -ENOENT;
1521 m->u.kernel.match = match;
1522 *size += xt_compat_match_offset(match);
1529 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1531 if (i && (*i)-- == 0)
1534 module_put(m->u.kernel.match->me);
1539 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1541 struct ip6t_entry_target *t;
1543 if (i && (*i)-- == 0)
1546 /* Cleanup all matches */
1547 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1548 t = compat_ip6t_get_target(e);
1549 module_put(t->u.kernel.target->me);
1554 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1555 struct xt_table_info *newinfo,
1557 unsigned char *base,
1558 unsigned char *limit,
1559 unsigned int *hook_entries,
1560 unsigned int *underflows,
1564 struct ip6t_entry_target *t;
1565 struct xt_target *target;
1566 unsigned int entry_offset;
1570 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1571 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1572 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1573 duprintf("Bad offset %p, limit = %p\n", e, limit);
1577 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1578 sizeof(struct compat_xt_entry_target)) {
1579 duprintf("checking: element %p size %u\n",
1584 /* For purposes of check_entry casting the compat entry is fine */
1585 ret = check_entry((struct ip6t_entry *)e, name);
1589 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1590 entry_offset = (void *)e - (void *)base;
1592 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1593 &e->ipv6, e->comefrom, &off, &j);
1595 goto release_matches;
1597 t = compat_ip6t_get_target(e);
1598 target = try_then_request_module(xt_find_target(AF_INET6,
1600 t->u.user.revision),
1601 "ip6t_%s", t->u.user.name);
1602 if (IS_ERR(target) || !target) {
1603 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1605 ret = target ? PTR_ERR(target) : -ENOENT;
1606 goto release_matches;
1608 t->u.kernel.target = target;
1610 off += xt_compat_target_offset(target);
1612 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1616 /* Check hooks & underflows */
1617 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1618 if ((unsigned char *)e - base == hook_entries[h])
1619 newinfo->hook_entry[h] = hook_entries[h];
1620 if ((unsigned char *)e - base == underflows[h])
1621 newinfo->underflow[h] = underflows[h];
1624 /* Clear counters and comefrom */
1625 memset(&e->counters, 0, sizeof(e->counters));
1632 module_put(t->u.kernel.target->me);
1634 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1639 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1640 unsigned int *size, const char *name,
1641 struct xt_table_info *newinfo, unsigned char *base)
1643 struct ip6t_entry_target *t;
1644 struct xt_target *target;
1645 struct ip6t_entry *de;
1646 unsigned int origsize;
1651 de = (struct ip6t_entry *)*dstptr;
1652 memcpy(de, e, sizeof(struct ip6t_entry));
1653 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1655 *dstptr += sizeof(struct ip6t_entry);
1656 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1658 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1662 de->target_offset = e->target_offset - (origsize - *size);
1663 t = compat_ip6t_get_target(e);
1664 target = t->u.kernel.target;
1665 xt_compat_target_from_user(t, dstptr, size);
1667 de->next_offset = e->next_offset - (origsize - *size);
1668 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1669 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1670 newinfo->hook_entry[h] -= origsize - *size;
1671 if ((unsigned char *)de - base < newinfo->underflow[h])
1672 newinfo->underflow[h] -= origsize - *size;
1677 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1682 struct xt_mtchk_param mtpar;
1686 mtpar.entryinfo = &e->ipv6;
1687 mtpar.hook_mask = e->comefrom;
1688 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1690 goto cleanup_matches;
1692 ret = check_target(e, name);
1694 goto cleanup_matches;
1700 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1705 translate_compat_table(const char *name,
1706 unsigned int valid_hooks,
1707 struct xt_table_info **pinfo,
1709 unsigned int total_size,
1710 unsigned int number,
1711 unsigned int *hook_entries,
1712 unsigned int *underflows)
1715 struct xt_table_info *newinfo, *info;
1716 void *pos, *entry0, *entry1;
1723 info->number = number;
1725 /* Init all hooks to impossible value. */
1726 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1727 info->hook_entry[i] = 0xFFFFFFFF;
1728 info->underflow[i] = 0xFFFFFFFF;
1731 duprintf("translate_compat_table: size %u\n", info->size);
1733 xt_compat_lock(AF_INET6);
1734 /* Walk through entries, checking offsets. */
1735 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1736 check_compat_entry_size_and_hooks,
1737 info, &size, entry0,
1738 entry0 + total_size,
1739 hook_entries, underflows, &j, name);
1745 duprintf("translate_compat_table: %u not %u entries\n",
1750 /* Check hooks all assigned */
1751 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1752 /* Only hooks which are valid */
1753 if (!(valid_hooks & (1 << i)))
1755 if (info->hook_entry[i] == 0xFFFFFFFF) {
1756 duprintf("Invalid hook entry %u %u\n",
1757 i, hook_entries[i]);
1760 if (info->underflow[i] == 0xFFFFFFFF) {
1761 duprintf("Invalid underflow %u %u\n",
1768 newinfo = xt_alloc_table_info(size);
1772 newinfo->number = number;
1773 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1774 newinfo->hook_entry[i] = info->hook_entry[i];
1775 newinfo->underflow[i] = info->underflow[i];
1777 entry1 = newinfo->entries[raw_smp_processor_id()];
1780 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1781 compat_copy_entry_from_user,
1782 &pos, &size, name, newinfo, entry1);
1783 xt_compat_flush_offsets(AF_INET6);
1784 xt_compat_unlock(AF_INET6);
1789 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1793 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1797 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1798 compat_release_entry, &j);
1799 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1800 xt_free_table_info(newinfo);
1804 /* And one copy for every other CPU */
1805 for_each_possible_cpu(i)
1806 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1807 memcpy(newinfo->entries[i], entry1, newinfo->size);
1811 xt_free_table_info(info);
1815 xt_free_table_info(newinfo);
1817 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1820 xt_compat_flush_offsets(AF_INET6);
1821 xt_compat_unlock(AF_INET6);
1826 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1829 struct compat_ip6t_replace tmp;
1830 struct xt_table_info *newinfo;
1831 void *loc_cpu_entry;
1833 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1836 /* overflow check */
1837 if (tmp.size >= INT_MAX / num_possible_cpus())
1839 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1842 newinfo = xt_alloc_table_info(tmp.size);
1846 /* choose the copy that is on our node/cpu */
1847 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1848 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1854 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1855 &newinfo, &loc_cpu_entry, tmp.size,
1856 tmp.num_entries, tmp.hook_entry,
1861 duprintf("compat_do_replace: Translated table\n");
1863 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1864 tmp.num_counters, compat_ptr(tmp.counters));
1866 goto free_newinfo_untrans;
1869 free_newinfo_untrans:
1870 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1872 xt_free_table_info(newinfo);
1877 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1882 if (!capable(CAP_NET_ADMIN))
1886 case IP6T_SO_SET_REPLACE:
1887 ret = compat_do_replace(sock_net(sk), user, len);
1890 case IP6T_SO_SET_ADD_COUNTERS:
1891 ret = do_add_counters(sock_net(sk), user, len, 1);
1895 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1902 struct compat_ip6t_get_entries {
1903 char name[IP6T_TABLE_MAXNAMELEN];
1905 struct compat_ip6t_entry entrytable[0];
1909 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1910 void __user *userptr)
1912 struct xt_counters *counters;
1913 const struct xt_table_info *private = table->private;
1917 const void *loc_cpu_entry;
1920 counters = alloc_counters(table);
1921 if (IS_ERR(counters))
1922 return PTR_ERR(counters);
1924 /* choose the copy that is on our node/cpu, ...
1925 * This choice is lazy (because current thread is
1926 * allowed to migrate to another cpu)
1928 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1931 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1932 compat_copy_entry_to_user,
1933 &pos, &size, counters, &i);
1940 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1944 struct compat_ip6t_get_entries get;
1947 if (*len < sizeof(get)) {
1948 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1952 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1955 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1956 duprintf("compat_get_entries: %u != %zu\n",
1957 *len, sizeof(get) + get.size);
1961 xt_compat_lock(AF_INET6);
1962 t = xt_find_table_lock(net, AF_INET6, get.name);
1963 if (t && !IS_ERR(t)) {
1964 const struct xt_table_info *private = t->private;
1965 struct xt_table_info info;
1966 duprintf("t->private->number = %u\n", private->number);
1967 ret = compat_table_info(private, &info);
1968 if (!ret && get.size == info.size) {
1969 ret = compat_copy_entries_to_user(private->size,
1970 t, uptr->entrytable);
1972 duprintf("compat_get_entries: I've got %u not %u!\n",
1973 private->size, get.size);
1976 xt_compat_flush_offsets(AF_INET6);
1980 ret = t ? PTR_ERR(t) : -ENOENT;
1982 xt_compat_unlock(AF_INET6);
1986 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1989 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1993 if (!capable(CAP_NET_ADMIN))
1997 case IP6T_SO_GET_INFO:
1998 ret = get_info(sock_net(sk), user, len, 1);
2000 case IP6T_SO_GET_ENTRIES:
2001 ret = compat_get_entries(sock_net(sk), user, len);
2004 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2011 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2015 if (!capable(CAP_NET_ADMIN))
2019 case IP6T_SO_SET_REPLACE:
2020 ret = do_replace(sock_net(sk), user, len);
2023 case IP6T_SO_SET_ADD_COUNTERS:
2024 ret = do_add_counters(sock_net(sk), user, len, 0);
2028 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2036 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2040 if (!capable(CAP_NET_ADMIN))
2044 case IP6T_SO_GET_INFO:
2045 ret = get_info(sock_net(sk), user, len, 0);
2048 case IP6T_SO_GET_ENTRIES:
2049 ret = get_entries(sock_net(sk), user, len);
2052 case IP6T_SO_GET_REVISION_MATCH:
2053 case IP6T_SO_GET_REVISION_TARGET: {
2054 struct ip6t_get_revision rev;
2057 if (*len != sizeof(rev)) {
2061 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2066 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2071 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2074 "ip6t_%s", rev.name);
2079 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2086 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2087 const struct ip6t_replace *repl)
2090 struct xt_table_info *newinfo;
2091 struct xt_table_info bootstrap
2092 = { 0, 0, 0, { 0 }, { 0 }, { } };
2093 void *loc_cpu_entry;
2094 struct xt_table *new_table;
2096 newinfo = xt_alloc_table_info(repl->size);
2102 /* choose the copy on our node/cpu, but dont care about preemption */
2103 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2104 memcpy(loc_cpu_entry, repl->entries, repl->size);
2106 ret = translate_table(table->name, table->valid_hooks,
2107 newinfo, loc_cpu_entry, repl->size,
2114 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2115 if (IS_ERR(new_table)) {
2116 ret = PTR_ERR(new_table);
2122 xt_free_table_info(newinfo);
2124 return ERR_PTR(ret);
2127 void ip6t_unregister_table(struct xt_table *table)
2129 struct xt_table_info *private;
2130 void *loc_cpu_entry;
2131 struct module *table_owner = table->me;
2133 private = xt_unregister_table(table);
2135 /* Decrease module usage counts and free resources */
2136 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2137 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2138 if (private->number > private->initial_entries)
2139 module_put(table_owner);
2140 xt_free_table_info(private);
2143 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2145 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2146 u_int8_t type, u_int8_t code,
2149 return (type == test_type && code >= min_code && code <= max_code)
2154 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2156 const struct icmp6hdr *ic;
2157 struct icmp6hdr _icmph;
2158 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2160 /* Must not be a fragment. */
2161 if (par->fragoff != 0)
2164 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2166 /* We've been asked to examine this packet, and we
2167 * can't. Hence, no choice but to drop.
2169 duprintf("Dropping evil ICMP tinygram.\n");
2170 *par->hotdrop = true;
2174 return icmp6_type_code_match(icmpinfo->type,
2177 ic->icmp6_type, ic->icmp6_code,
2178 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2181 /* Called when user tries to insert an entry of this type. */
2182 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2184 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2186 /* Must specify no unknown invflags */
2187 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2190 /* The built-in targets: standard (NULL) and error. */
2191 static struct xt_target ip6t_standard_target __read_mostly = {
2192 .name = IP6T_STANDARD_TARGET,
2193 .targetsize = sizeof(int),
2195 #ifdef CONFIG_COMPAT
2196 .compatsize = sizeof(compat_int_t),
2197 .compat_from_user = compat_standard_from_user,
2198 .compat_to_user = compat_standard_to_user,
2202 static struct xt_target ip6t_error_target __read_mostly = {
2203 .name = IP6T_ERROR_TARGET,
2204 .target = ip6t_error,
2205 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2209 static struct nf_sockopt_ops ip6t_sockopts = {
2211 .set_optmin = IP6T_BASE_CTL,
2212 .set_optmax = IP6T_SO_SET_MAX+1,
2213 .set = do_ip6t_set_ctl,
2214 #ifdef CONFIG_COMPAT
2215 .compat_set = compat_do_ip6t_set_ctl,
2217 .get_optmin = IP6T_BASE_CTL,
2218 .get_optmax = IP6T_SO_GET_MAX+1,
2219 .get = do_ip6t_get_ctl,
2220 #ifdef CONFIG_COMPAT
2221 .compat_get = compat_do_ip6t_get_ctl,
2223 .owner = THIS_MODULE,
2226 static struct xt_match icmp6_matchstruct __read_mostly = {
2228 .match = icmp6_match,
2229 .matchsize = sizeof(struct ip6t_icmp),
2230 .checkentry = icmp6_checkentry,
2231 .proto = IPPROTO_ICMPV6,
2235 static int __net_init ip6_tables_net_init(struct net *net)
2237 return xt_proto_init(net, AF_INET6);
2240 static void __net_exit ip6_tables_net_exit(struct net *net)
2242 xt_proto_fini(net, AF_INET6);
2245 static struct pernet_operations ip6_tables_net_ops = {
2246 .init = ip6_tables_net_init,
2247 .exit = ip6_tables_net_exit,
2250 static int __init ip6_tables_init(void)
2254 ret = register_pernet_subsys(&ip6_tables_net_ops);
2258 /* Noone else will be downing sem now, so we won't sleep */
2259 ret = xt_register_target(&ip6t_standard_target);
2262 ret = xt_register_target(&ip6t_error_target);
2265 ret = xt_register_match(&icmp6_matchstruct);
2269 /* Register setsockopt */
2270 ret = nf_register_sockopt(&ip6t_sockopts);
2274 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2278 xt_unregister_match(&icmp6_matchstruct);
2280 xt_unregister_target(&ip6t_error_target);
2282 xt_unregister_target(&ip6t_standard_target);
2284 unregister_pernet_subsys(&ip6_tables_net_ops);
2289 static void __exit ip6_tables_fini(void)
2291 nf_unregister_sockopt(&ip6t_sockopts);
2293 xt_unregister_match(&icmp6_matchstruct);
2294 xt_unregister_target(&ip6t_error_target);
2295 xt_unregister_target(&ip6t_standard_target);
2297 unregister_pernet_subsys(&ip6_tables_net_ops);
2301 * find the offset to specified header or the protocol number of last header
2302 * if target < 0. "last header" is transport protocol header, ESP, or
2305 * If target header is found, its offset is set in *offset and return protocol
2306 * number. Otherwise, return -1.
2308 * If the first fragment doesn't contain the final protocol header or
2309 * NEXTHDR_NONE it is considered invalid.
2311 * Note that non-1st fragment is special case that "the protocol number
2312 * of last header" is "next header" field in Fragment header. In this case,
2313 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2317 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2318 int target, unsigned short *fragoff)
2320 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2321 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2322 unsigned int len = skb->len - start;
2327 while (nexthdr != target) {
2328 struct ipv6_opt_hdr _hdr, *hp;
2329 unsigned int hdrlen;
2331 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2337 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2340 if (nexthdr == NEXTHDR_FRAGMENT) {
2341 unsigned short _frag_off;
2343 fp = skb_header_pointer(skb,
2344 start+offsetof(struct frag_hdr,
2351 _frag_off = ntohs(*fp) & ~0x7;
2354 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2355 hp->nexthdr == NEXTHDR_NONE)) {
2357 *fragoff = _frag_off;
2363 } else if (nexthdr == NEXTHDR_AUTH)
2364 hdrlen = (hp->hdrlen + 2) << 2;
2366 hdrlen = ipv6_optlen(hp);
2368 nexthdr = hp->nexthdr;
2377 EXPORT_SYMBOL(ip6t_register_table);
2378 EXPORT_SYMBOL(ip6t_unregister_table);
2379 EXPORT_SYMBOL(ip6t_do_table);
2380 EXPORT_SYMBOL(ip6t_ext_hdr);
2381 EXPORT_SYMBOL(ipv6_find_hdr);
2383 module_init(ip6_tables_init);
2384 module_exit(ip6_tables_fini);