2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
175 /* should be ip6 safe */
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
202 /* Performance critical - called for every packet */
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
226 unconditional(const struct ip6t_ip6 *ipv6)
230 for (i = 0; i < sizeof(*ipv6); i++)
231 if (((char *)ipv6)[i])
234 return (i == sizeof(*ipv6));
237 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
238 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
239 /* This cries for unification! */
240 static const char *const hooknames[] = {
241 [NF_INET_PRE_ROUTING] = "PREROUTING",
242 [NF_INET_LOCAL_IN] = "INPUT",
243 [NF_INET_FORWARD] = "FORWARD",
244 [NF_INET_LOCAL_OUT] = "OUTPUT",
245 [NF_INET_POST_ROUTING] = "POSTROUTING",
248 enum nf_ip_trace_comments {
249 NF_IP6_TRACE_COMMENT_RULE,
250 NF_IP6_TRACE_COMMENT_RETURN,
251 NF_IP6_TRACE_COMMENT_POLICY,
254 static const char *const comments[] = {
255 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
256 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
257 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
260 static struct nf_loginfo trace_loginfo = {
261 .type = NF_LOG_TYPE_LOG,
265 .logflags = NF_LOG_MASK,
270 /* Mildly perf critical (only if packet tracing is on) */
272 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
273 const char *hookname, const char **chainname,
274 const char **comment, unsigned int *rulenum)
276 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
278 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
279 /* Head of user chain: ERROR target with chainname */
280 *chainname = t->target.data;
285 if (s->target_offset == sizeof(struct ip6t_entry)
286 && strcmp(t->target.u.kernel.target->name,
287 IP6T_STANDARD_TARGET) == 0
289 && unconditional(&s->ipv6)) {
290 /* Tail of chains: STANDARD target (return/policy) */
291 *comment = *chainname == hookname
292 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
293 : comments[NF_IP6_TRACE_COMMENT_RETURN];
302 static void trace_packet(struct sk_buff *skb,
304 const struct net_device *in,
305 const struct net_device *out,
306 const char *tablename,
307 struct xt_table_info *private,
308 struct ip6t_entry *e)
311 const struct ip6t_entry *root;
312 const char *hookname, *chainname, *comment;
313 unsigned int rulenum = 0;
315 table_base = private->entries[smp_processor_id()];
316 root = get_entry(table_base, private->hook_entry[hook]);
318 hookname = chainname = hooknames[hook];
319 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
321 IP6T_ENTRY_ITERATE(root,
322 private->size - private->hook_entry[hook],
323 get_chainname_rulenum,
324 e, hookname, &chainname, &comment, &rulenum);
326 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
327 "TRACE: %s:%s:%s:%u ",
328 tablename, chainname, comment, rulenum);
332 static inline __pure struct ip6t_entry *
333 ip6t_next_entry(const struct ip6t_entry *entry)
335 return (void *)entry + entry->next_offset;
338 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
340 ip6t_do_table(struct sk_buff *skb,
342 const struct net_device *in,
343 const struct net_device *out,
344 struct xt_table *table)
346 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
347 bool hotdrop = false;
348 /* Initializing verdict to NF_DROP keeps gcc happy. */
349 unsigned int verdict = NF_DROP;
350 const char *indev, *outdev;
352 struct ip6t_entry *e, *back;
353 struct xt_table_info *private;
354 struct xt_match_param mtpar;
355 struct xt_target_param tgpar;
358 indev = in ? in->name : nulldevname;
359 outdev = out ? out->name : nulldevname;
360 /* We handle fragments by dealing with the first fragment as
361 * if it was a normal packet. All other fragments are treated
362 * normally, except that they will NEVER match rules that ask
363 * things we don't know, ie. tcp syn flag or ports). If the
364 * rule is also a fragment-specific rule, non-fragments won't
366 mtpar.hotdrop = &hotdrop;
367 mtpar.in = tgpar.in = in;
368 mtpar.out = tgpar.out = out;
369 mtpar.family = tgpar.family = NFPROTO_IPV6;
370 tgpar.hooknum = hook;
372 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
375 private = table->private;
376 table_base = private->entries[smp_processor_id()];
378 e = get_entry(table_base, private->hook_entry[hook]);
380 /* For return from builtin chain */
381 back = get_entry(table_base, private->underflow[hook]);
384 struct ip6t_entry_target *t;
388 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
389 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
391 e = ip6t_next_entry(e);
395 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
398 ADD_COUNTER(e->counters,
399 ntohs(ipv6_hdr(skb)->payload_len) +
400 sizeof(struct ipv6hdr), 1);
402 t = ip6t_get_target(e);
403 IP_NF_ASSERT(t->u.kernel.target);
405 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
406 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
407 /* The packet is traced: log it */
408 if (unlikely(skb->nf_trace))
409 trace_packet(skb, hook, in, out,
410 table->name, private, e);
412 /* Standard target? */
413 if (!t->u.kernel.target->target) {
416 v = ((struct ip6t_standard_target *)t)->verdict;
418 /* Pop from stack? */
419 if (v != IP6T_RETURN) {
420 verdict = (unsigned)(-v) - 1;
424 back = get_entry(table_base, back->comefrom);
427 if (table_base + v != ip6t_next_entry(e)
428 && !(e->ipv6.flags & IP6T_F_GOTO)) {
429 /* Save old back ptr in next entry */
430 struct ip6t_entry *next = ip6t_next_entry(e);
431 next->comefrom = (void *)back - table_base;
432 /* set back pointer to next entry */
436 e = get_entry(table_base, v);
438 /* Targets which reenter must return
440 tgpar.target = t->u.kernel.target;
441 tgpar.targinfo = t->data;
443 #ifdef CONFIG_NETFILTER_DEBUG
444 ((struct ip6t_entry *)table_base)->comefrom
447 verdict = t->u.kernel.target->target(skb, &tgpar);
449 #ifdef CONFIG_NETFILTER_DEBUG
450 if (((struct ip6t_entry *)table_base)->comefrom
452 && verdict == IP6T_CONTINUE) {
453 printk("Target %s reentered!\n",
454 t->u.kernel.target->name);
457 ((struct ip6t_entry *)table_base)->comefrom
460 if (verdict == IP6T_CONTINUE)
461 e = ip6t_next_entry(e);
468 #ifdef CONFIG_NETFILTER_DEBUG
469 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
471 xt_info_rdunlock_bh();
473 #ifdef DEBUG_ALLOW_ALL
482 /* Figures out from what hook each rule can be called: returns 0 if
483 there are loops. Puts hook bitmask in comefrom. */
485 mark_source_chains(struct xt_table_info *newinfo,
486 unsigned int valid_hooks, void *entry0)
490 /* No recursion; use packet counter to save back ptrs (reset
491 to 0 as we leave), and comefrom to save source hook bitmask */
492 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
493 unsigned int pos = newinfo->hook_entry[hook];
494 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
496 if (!(valid_hooks & (1 << hook)))
499 /* Set initial back pointer. */
500 e->counters.pcnt = pos;
503 struct ip6t_standard_target *t
504 = (void *)ip6t_get_target(e);
505 int visited = e->comefrom & (1 << hook);
507 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
508 printk("iptables: loop hook %u pos %u %08X.\n",
509 hook, pos, e->comefrom);
512 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
514 /* Unconditional return/END. */
515 if ((e->target_offset == sizeof(struct ip6t_entry)
516 && (strcmp(t->target.u.user.name,
517 IP6T_STANDARD_TARGET) == 0)
519 && unconditional(&e->ipv6)) || visited) {
520 unsigned int oldpos, size;
522 if ((strcmp(t->target.u.user.name,
523 IP6T_STANDARD_TARGET) == 0) &&
524 t->verdict < -NF_MAX_VERDICT - 1) {
525 duprintf("mark_source_chains: bad "
526 "negative verdict (%i)\n",
531 /* Return: backtrack through the last
534 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
535 #ifdef DEBUG_IP_FIREWALL_USER
537 & (1 << NF_INET_NUMHOOKS)) {
538 duprintf("Back unset "
545 pos = e->counters.pcnt;
546 e->counters.pcnt = 0;
548 /* We're at the start. */
552 e = (struct ip6t_entry *)
554 } while (oldpos == pos + e->next_offset);
557 size = e->next_offset;
558 e = (struct ip6t_entry *)
559 (entry0 + pos + size);
560 e->counters.pcnt = pos;
563 int newpos = t->verdict;
565 if (strcmp(t->target.u.user.name,
566 IP6T_STANDARD_TARGET) == 0
568 if (newpos > newinfo->size -
569 sizeof(struct ip6t_entry)) {
570 duprintf("mark_source_chains: "
571 "bad verdict (%i)\n",
575 /* This a jump; chase it. */
576 duprintf("Jump rule %u -> %u\n",
579 /* ... this is a fallthru */
580 newpos = pos + e->next_offset;
582 e = (struct ip6t_entry *)
584 e->counters.pcnt = pos;
589 duprintf("Finished chain %u\n", hook);
595 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
597 struct xt_mtdtor_param par;
599 if (i && (*i)-- == 0)
602 par.match = m->u.kernel.match;
603 par.matchinfo = m->data;
604 par.family = NFPROTO_IPV6;
605 if (par.match->destroy != NULL)
606 par.match->destroy(&par);
607 module_put(par.match->me);
612 check_entry(struct ip6t_entry *e, const char *name)
614 struct ip6t_entry_target *t;
616 if (!ip6_checkentry(&e->ipv6)) {
617 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
621 if (e->target_offset + sizeof(struct ip6t_entry_target) >
625 t = ip6t_get_target(e);
626 if (e->target_offset + t->u.target_size > e->next_offset)
632 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
635 const struct ip6t_ip6 *ipv6 = par->entryinfo;
638 par->match = m->u.kernel.match;
639 par->matchinfo = m->data;
641 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
642 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
644 duprintf("ip_tables: check failed for `%s'.\n",
653 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
656 struct xt_match *match;
659 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
661 "ip6t_%s", m->u.user.name);
662 if (IS_ERR(match) || !match) {
663 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
664 return match ? PTR_ERR(match) : -ENOENT;
666 m->u.kernel.match = match;
668 ret = check_match(m, par, i);
674 module_put(m->u.kernel.match->me);
678 static int check_target(struct ip6t_entry *e, const char *name)
680 struct ip6t_entry_target *t = ip6t_get_target(e);
681 struct xt_tgchk_param par = {
684 .target = t->u.kernel.target,
686 .hook_mask = e->comefrom,
687 .family = NFPROTO_IPV6,
691 t = ip6t_get_target(e);
692 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
693 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
695 duprintf("ip_tables: check failed for `%s'.\n",
696 t->u.kernel.target->name);
703 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
706 struct ip6t_entry_target *t;
707 struct xt_target *target;
710 struct xt_mtchk_param mtpar;
712 ret = check_entry(e, name);
718 mtpar.entryinfo = &e->ipv6;
719 mtpar.hook_mask = e->comefrom;
720 mtpar.family = NFPROTO_IPV6;
721 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
723 goto cleanup_matches;
725 t = ip6t_get_target(e);
726 target = try_then_request_module(xt_find_target(AF_INET6,
729 "ip6t_%s", t->u.user.name);
730 if (IS_ERR(target) || !target) {
731 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
732 ret = target ? PTR_ERR(target) : -ENOENT;
733 goto cleanup_matches;
735 t->u.kernel.target = target;
737 ret = check_target(e, name);
744 module_put(t->u.kernel.target->me);
746 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
751 check_entry_size_and_hooks(struct ip6t_entry *e,
752 struct xt_table_info *newinfo,
754 unsigned char *limit,
755 const unsigned int *hook_entries,
756 const unsigned int *underflows,
761 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
762 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
763 duprintf("Bad offset %p\n", e);
768 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
769 duprintf("checking: element %p size %u\n",
774 /* Check hooks & underflows */
775 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
776 if ((unsigned char *)e - base == hook_entries[h])
777 newinfo->hook_entry[h] = hook_entries[h];
778 if ((unsigned char *)e - base == underflows[h])
779 newinfo->underflow[h] = underflows[h];
782 /* FIXME: underflows must be unconditional, standard verdicts
783 < 0 (not IP6T_RETURN). --RR */
785 /* Clear counters and comefrom */
786 e->counters = ((struct xt_counters) { 0, 0 });
794 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
796 struct xt_tgdtor_param par;
797 struct ip6t_entry_target *t;
799 if (i && (*i)-- == 0)
802 /* Cleanup all matches */
803 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
804 t = ip6t_get_target(e);
806 par.target = t->u.kernel.target;
807 par.targinfo = t->data;
808 par.family = NFPROTO_IPV6;
809 if (par.target->destroy != NULL)
810 par.target->destroy(&par);
811 module_put(par.target->me);
815 /* Checks and translates the user-supplied table segment (held in
818 translate_table(const char *name,
819 unsigned int valid_hooks,
820 struct xt_table_info *newinfo,
824 const unsigned int *hook_entries,
825 const unsigned int *underflows)
830 newinfo->size = size;
831 newinfo->number = number;
833 /* Init all hooks to impossible value. */
834 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
835 newinfo->hook_entry[i] = 0xFFFFFFFF;
836 newinfo->underflow[i] = 0xFFFFFFFF;
839 duprintf("translate_table: size %u\n", newinfo->size);
841 /* Walk through entries, checking offsets. */
842 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
843 check_entry_size_and_hooks,
847 hook_entries, underflows, &i);
852 duprintf("translate_table: %u not %u entries\n",
857 /* Check hooks all assigned */
858 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
859 /* Only hooks which are valid */
860 if (!(valid_hooks & (1 << i)))
862 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
863 duprintf("Invalid hook entry %u %u\n",
867 if (newinfo->underflow[i] == 0xFFFFFFFF) {
868 duprintf("Invalid underflow %u %u\n",
874 if (!mark_source_chains(newinfo, valid_hooks, entry0))
877 /* Finally, each sanity check must pass */
879 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
880 find_check_entry, name, size, &i);
883 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
888 /* And one copy for every other CPU */
889 for_each_possible_cpu(i) {
890 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
891 memcpy(newinfo->entries[i], entry0, newinfo->size);
899 add_entry_to_counter(const struct ip6t_entry *e,
900 struct xt_counters total[],
903 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
910 set_entry_to_counter(const struct ip6t_entry *e,
911 struct ip6t_counters total[],
914 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
921 get_counters(const struct xt_table_info *t,
922 struct xt_counters counters[])
928 /* Instead of clearing (by a previous call to memset())
929 * the counters and using adds, we set the counters
930 * with data used by 'current' CPU
932 * Bottom half has to be disabled to prevent deadlock
933 * if new softirq were to run and call ipt_do_table
936 curcpu = smp_processor_id();
939 IP6T_ENTRY_ITERATE(t->entries[curcpu],
941 set_entry_to_counter,
945 for_each_possible_cpu(cpu) {
950 IP6T_ENTRY_ITERATE(t->entries[cpu],
952 add_entry_to_counter,
955 xt_info_wrunlock(cpu);
960 static struct xt_counters *alloc_counters(struct xt_table *table)
962 unsigned int countersize;
963 struct xt_counters *counters;
964 struct xt_table_info *private = table->private;
966 /* We need atomic snapshot of counters: rest doesn't change
967 (other than comefrom, which userspace doesn't care
969 countersize = sizeof(struct xt_counters) * private->number;
970 counters = vmalloc_node(countersize, numa_node_id());
972 if (counters == NULL)
973 return ERR_PTR(-ENOMEM);
975 get_counters(private, counters);
981 copy_entries_to_user(unsigned int total_size,
982 struct xt_table *table,
983 void __user *userptr)
985 unsigned int off, num;
986 struct ip6t_entry *e;
987 struct xt_counters *counters;
988 const struct xt_table_info *private = table->private;
990 const void *loc_cpu_entry;
992 counters = alloc_counters(table);
993 if (IS_ERR(counters))
994 return PTR_ERR(counters);
996 /* choose the copy that is on our node/cpu, ...
997 * This choice is lazy (because current thread is
998 * allowed to migrate to another cpu)
1000 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1001 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1006 /* FIXME: use iterator macros --RR */
1007 /* ... then go back and fix counters and names */
1008 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1010 const struct ip6t_entry_match *m;
1011 const struct ip6t_entry_target *t;
1013 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1014 if (copy_to_user(userptr + off
1015 + offsetof(struct ip6t_entry, counters),
1017 sizeof(counters[num])) != 0) {
1022 for (i = sizeof(struct ip6t_entry);
1023 i < e->target_offset;
1024 i += m->u.match_size) {
1027 if (copy_to_user(userptr + off + i
1028 + offsetof(struct ip6t_entry_match,
1030 m->u.kernel.match->name,
1031 strlen(m->u.kernel.match->name)+1)
1038 t = ip6t_get_target(e);
1039 if (copy_to_user(userptr + off + e->target_offset
1040 + offsetof(struct ip6t_entry_target,
1042 t->u.kernel.target->name,
1043 strlen(t->u.kernel.target->name)+1) != 0) {
1054 #ifdef CONFIG_COMPAT
1055 static void compat_standard_from_user(void *dst, void *src)
1057 int v = *(compat_int_t *)src;
1060 v += xt_compat_calc_jump(AF_INET6, v);
1061 memcpy(dst, &v, sizeof(v));
1064 static int compat_standard_to_user(void __user *dst, void *src)
1066 compat_int_t cv = *(int *)src;
1069 cv -= xt_compat_calc_jump(AF_INET6, cv);
1070 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1074 compat_calc_match(struct ip6t_entry_match *m, int *size)
1076 *size += xt_compat_match_offset(m->u.kernel.match);
1080 static int compat_calc_entry(struct ip6t_entry *e,
1081 const struct xt_table_info *info,
1082 void *base, struct xt_table_info *newinfo)
1084 struct ip6t_entry_target *t;
1085 unsigned int entry_offset;
1088 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1089 entry_offset = (void *)e - base;
1090 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1091 t = ip6t_get_target(e);
1092 off += xt_compat_target_offset(t->u.kernel.target);
1093 newinfo->size -= off;
1094 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1098 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1099 if (info->hook_entry[i] &&
1100 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1101 newinfo->hook_entry[i] -= off;
1102 if (info->underflow[i] &&
1103 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1104 newinfo->underflow[i] -= off;
1109 static int compat_table_info(const struct xt_table_info *info,
1110 struct xt_table_info *newinfo)
1112 void *loc_cpu_entry;
1114 if (!newinfo || !info)
1117 /* we dont care about newinfo->entries[] */
1118 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1119 newinfo->initial_entries = 0;
1120 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1121 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1122 compat_calc_entry, info, loc_cpu_entry,
1127 static int get_info(struct net *net, void __user *user, int *len, int compat)
1129 char name[IP6T_TABLE_MAXNAMELEN];
1133 if (*len != sizeof(struct ip6t_getinfo)) {
1134 duprintf("length %u != %zu\n", *len,
1135 sizeof(struct ip6t_getinfo));
1139 if (copy_from_user(name, user, sizeof(name)) != 0)
1142 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1143 #ifdef CONFIG_COMPAT
1145 xt_compat_lock(AF_INET6);
1147 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1148 "ip6table_%s", name);
1149 if (t && !IS_ERR(t)) {
1150 struct ip6t_getinfo info;
1151 const struct xt_table_info *private = t->private;
1153 #ifdef CONFIG_COMPAT
1155 struct xt_table_info tmp;
1156 ret = compat_table_info(private, &tmp);
1157 xt_compat_flush_offsets(AF_INET6);
1161 info.valid_hooks = t->valid_hooks;
1162 memcpy(info.hook_entry, private->hook_entry,
1163 sizeof(info.hook_entry));
1164 memcpy(info.underflow, private->underflow,
1165 sizeof(info.underflow));
1166 info.num_entries = private->number;
1167 info.size = private->size;
1168 strcpy(info.name, name);
1170 if (copy_to_user(user, &info, *len) != 0)
1178 ret = t ? PTR_ERR(t) : -ENOENT;
1179 #ifdef CONFIG_COMPAT
1181 xt_compat_unlock(AF_INET6);
1187 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1190 struct ip6t_get_entries get;
1193 if (*len < sizeof(get)) {
1194 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1197 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1199 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1200 duprintf("get_entries: %u != %zu\n",
1201 *len, sizeof(get) + get.size);
1205 t = xt_find_table_lock(net, AF_INET6, get.name);
1206 if (t && !IS_ERR(t)) {
1207 struct xt_table_info *private = t->private;
1208 duprintf("t->private->number = %u\n", private->number);
1209 if (get.size == private->size)
1210 ret = copy_entries_to_user(private->size,
1211 t, uptr->entrytable);
1213 duprintf("get_entries: I've got %u not %u!\n",
1214 private->size, get.size);
1220 ret = t ? PTR_ERR(t) : -ENOENT;
1226 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1227 struct xt_table_info *newinfo, unsigned int num_counters,
1228 void __user *counters_ptr)
1232 struct xt_table_info *oldinfo;
1233 struct xt_counters *counters;
1234 const void *loc_cpu_old_entry;
1237 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1244 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1245 "ip6table_%s", name);
1246 if (!t || IS_ERR(t)) {
1247 ret = t ? PTR_ERR(t) : -ENOENT;
1248 goto free_newinfo_counters_untrans;
1252 if (valid_hooks != t->valid_hooks) {
1253 duprintf("Valid hook crap: %08X vs %08X\n",
1254 valid_hooks, t->valid_hooks);
1259 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1263 /* Update module usage count based on number of rules */
1264 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1265 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1266 if ((oldinfo->number > oldinfo->initial_entries) ||
1267 (newinfo->number <= oldinfo->initial_entries))
1269 if ((oldinfo->number > oldinfo->initial_entries) &&
1270 (newinfo->number <= oldinfo->initial_entries))
1273 /* Get the old counters, and synchronize with replace */
1274 get_counters(oldinfo, counters);
1276 /* Decrease module usage counts and free resource */
1277 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1278 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1280 xt_free_table_info(oldinfo);
1281 if (copy_to_user(counters_ptr, counters,
1282 sizeof(struct xt_counters) * num_counters) != 0)
1291 free_newinfo_counters_untrans:
1298 do_replace(struct net *net, void __user *user, unsigned int len)
1301 struct ip6t_replace tmp;
1302 struct xt_table_info *newinfo;
1303 void *loc_cpu_entry;
1305 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1308 /* overflow check */
1309 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1312 newinfo = xt_alloc_table_info(tmp.size);
1316 /* choose the copy that is on our node/cpu */
1317 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1318 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1324 ret = translate_table(tmp.name, tmp.valid_hooks,
1325 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1326 tmp.hook_entry, tmp.underflow);
1330 duprintf("ip_tables: Translated table\n");
1332 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1333 tmp.num_counters, tmp.counters);
1335 goto free_newinfo_untrans;
1338 free_newinfo_untrans:
1339 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1341 xt_free_table_info(newinfo);
1345 /* We're lazy, and add to the first CPU; overflow works its fey magic
1346 * and everything is OK. */
1348 add_counter_to_entry(struct ip6t_entry *e,
1349 const struct xt_counters addme[],
1352 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1359 do_add_counters(struct net *net, void __user *user, unsigned int len,
1362 unsigned int i, curcpu;
1363 struct xt_counters_info tmp;
1364 struct xt_counters *paddc;
1365 unsigned int num_counters;
1370 const struct xt_table_info *private;
1372 const void *loc_cpu_entry;
1373 #ifdef CONFIG_COMPAT
1374 struct compat_xt_counters_info compat_tmp;
1378 size = sizeof(struct compat_xt_counters_info);
1383 size = sizeof(struct xt_counters_info);
1386 if (copy_from_user(ptmp, user, size) != 0)
1389 #ifdef CONFIG_COMPAT
1391 num_counters = compat_tmp.num_counters;
1392 name = compat_tmp.name;
1396 num_counters = tmp.num_counters;
1400 if (len != size + num_counters * sizeof(struct xt_counters))
1403 paddc = vmalloc_node(len - size, numa_node_id());
1407 if (copy_from_user(paddc, user + size, len - size) != 0) {
1412 t = xt_find_table_lock(net, AF_INET6, name);
1413 if (!t || IS_ERR(t)) {
1414 ret = t ? PTR_ERR(t) : -ENOENT;
1420 private = t->private;
1421 if (private->number != num_counters) {
1423 goto unlock_up_free;
1427 /* Choose the copy that is on our node */
1428 curcpu = smp_processor_id();
1429 xt_info_wrlock(curcpu);
1430 loc_cpu_entry = private->entries[curcpu];
1431 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1433 add_counter_to_entry,
1436 xt_info_wrunlock(curcpu);
1448 #ifdef CONFIG_COMPAT
1449 struct compat_ip6t_replace {
1450 char name[IP6T_TABLE_MAXNAMELEN];
1454 u32 hook_entry[NF_INET_NUMHOOKS];
1455 u32 underflow[NF_INET_NUMHOOKS];
1457 compat_uptr_t counters; /* struct ip6t_counters * */
1458 struct compat_ip6t_entry entries[0];
1462 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1463 unsigned int *size, struct xt_counters *counters,
1466 struct ip6t_entry_target *t;
1467 struct compat_ip6t_entry __user *ce;
1468 u_int16_t target_offset, next_offset;
1469 compat_uint_t origsize;
1474 ce = (struct compat_ip6t_entry __user *)*dstptr;
1475 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1478 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1481 *dstptr += sizeof(struct compat_ip6t_entry);
1482 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1484 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1485 target_offset = e->target_offset - (origsize - *size);
1488 t = ip6t_get_target(e);
1489 ret = xt_compat_target_to_user(t, dstptr, size);
1493 next_offset = e->next_offset - (origsize - *size);
1494 if (put_user(target_offset, &ce->target_offset))
1496 if (put_user(next_offset, &ce->next_offset))
1506 compat_find_calc_match(struct ip6t_entry_match *m,
1508 const struct ip6t_ip6 *ipv6,
1509 unsigned int hookmask,
1510 int *size, unsigned int *i)
1512 struct xt_match *match;
1514 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1515 m->u.user.revision),
1516 "ip6t_%s", m->u.user.name);
1517 if (IS_ERR(match) || !match) {
1518 duprintf("compat_check_calc_match: `%s' not found\n",
1520 return match ? PTR_ERR(match) : -ENOENT;
1522 m->u.kernel.match = match;
1523 *size += xt_compat_match_offset(match);
1530 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1532 if (i && (*i)-- == 0)
1535 module_put(m->u.kernel.match->me);
1540 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1542 struct ip6t_entry_target *t;
1544 if (i && (*i)-- == 0)
1547 /* Cleanup all matches */
1548 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1549 t = compat_ip6t_get_target(e);
1550 module_put(t->u.kernel.target->me);
1555 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1556 struct xt_table_info *newinfo,
1558 unsigned char *base,
1559 unsigned char *limit,
1560 unsigned int *hook_entries,
1561 unsigned int *underflows,
1565 struct ip6t_entry_target *t;
1566 struct xt_target *target;
1567 unsigned int entry_offset;
1571 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1572 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1573 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1574 duprintf("Bad offset %p, limit = %p\n", e, limit);
1578 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1579 sizeof(struct compat_xt_entry_target)) {
1580 duprintf("checking: element %p size %u\n",
1585 /* For purposes of check_entry casting the compat entry is fine */
1586 ret = check_entry((struct ip6t_entry *)e, name);
1590 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1591 entry_offset = (void *)e - (void *)base;
1593 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1594 &e->ipv6, e->comefrom, &off, &j);
1596 goto release_matches;
1598 t = compat_ip6t_get_target(e);
1599 target = try_then_request_module(xt_find_target(AF_INET6,
1601 t->u.user.revision),
1602 "ip6t_%s", t->u.user.name);
1603 if (IS_ERR(target) || !target) {
1604 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1606 ret = target ? PTR_ERR(target) : -ENOENT;
1607 goto release_matches;
1609 t->u.kernel.target = target;
1611 off += xt_compat_target_offset(target);
1613 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1617 /* Check hooks & underflows */
1618 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1619 if ((unsigned char *)e - base == hook_entries[h])
1620 newinfo->hook_entry[h] = hook_entries[h];
1621 if ((unsigned char *)e - base == underflows[h])
1622 newinfo->underflow[h] = underflows[h];
1625 /* Clear counters and comefrom */
1626 memset(&e->counters, 0, sizeof(e->counters));
1633 module_put(t->u.kernel.target->me);
1635 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1640 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1641 unsigned int *size, const char *name,
1642 struct xt_table_info *newinfo, unsigned char *base)
1644 struct ip6t_entry_target *t;
1645 struct xt_target *target;
1646 struct ip6t_entry *de;
1647 unsigned int origsize;
1652 de = (struct ip6t_entry *)*dstptr;
1653 memcpy(de, e, sizeof(struct ip6t_entry));
1654 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1656 *dstptr += sizeof(struct ip6t_entry);
1657 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1659 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1663 de->target_offset = e->target_offset - (origsize - *size);
1664 t = compat_ip6t_get_target(e);
1665 target = t->u.kernel.target;
1666 xt_compat_target_from_user(t, dstptr, size);
1668 de->next_offset = e->next_offset - (origsize - *size);
1669 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1670 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1671 newinfo->hook_entry[h] -= origsize - *size;
1672 if ((unsigned char *)de - base < newinfo->underflow[h])
1673 newinfo->underflow[h] -= origsize - *size;
1678 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1683 struct xt_mtchk_param mtpar;
1687 mtpar.entryinfo = &e->ipv6;
1688 mtpar.hook_mask = e->comefrom;
1689 mtpar.family = NFPROTO_IPV6;
1690 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1692 goto cleanup_matches;
1694 ret = check_target(e, name);
1696 goto cleanup_matches;
1702 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1707 translate_compat_table(const char *name,
1708 unsigned int valid_hooks,
1709 struct xt_table_info **pinfo,
1711 unsigned int total_size,
1712 unsigned int number,
1713 unsigned int *hook_entries,
1714 unsigned int *underflows)
1717 struct xt_table_info *newinfo, *info;
1718 void *pos, *entry0, *entry1;
1725 info->number = number;
1727 /* Init all hooks to impossible value. */
1728 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1729 info->hook_entry[i] = 0xFFFFFFFF;
1730 info->underflow[i] = 0xFFFFFFFF;
1733 duprintf("translate_compat_table: size %u\n", info->size);
1735 xt_compat_lock(AF_INET6);
1736 /* Walk through entries, checking offsets. */
1737 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1738 check_compat_entry_size_and_hooks,
1739 info, &size, entry0,
1740 entry0 + total_size,
1741 hook_entries, underflows, &j, name);
1747 duprintf("translate_compat_table: %u not %u entries\n",
1752 /* Check hooks all assigned */
1753 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1754 /* Only hooks which are valid */
1755 if (!(valid_hooks & (1 << i)))
1757 if (info->hook_entry[i] == 0xFFFFFFFF) {
1758 duprintf("Invalid hook entry %u %u\n",
1759 i, hook_entries[i]);
1762 if (info->underflow[i] == 0xFFFFFFFF) {
1763 duprintf("Invalid underflow %u %u\n",
1770 newinfo = xt_alloc_table_info(size);
1774 newinfo->number = number;
1775 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1776 newinfo->hook_entry[i] = info->hook_entry[i];
1777 newinfo->underflow[i] = info->underflow[i];
1779 entry1 = newinfo->entries[raw_smp_processor_id()];
1782 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1783 compat_copy_entry_from_user,
1784 &pos, &size, name, newinfo, entry1);
1785 xt_compat_flush_offsets(AF_INET6);
1786 xt_compat_unlock(AF_INET6);
1791 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1795 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1799 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1800 compat_release_entry, &j);
1801 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1802 xt_free_table_info(newinfo);
1806 /* And one copy for every other CPU */
1807 for_each_possible_cpu(i)
1808 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1809 memcpy(newinfo->entries[i], entry1, newinfo->size);
1813 xt_free_table_info(info);
1817 xt_free_table_info(newinfo);
1819 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1822 xt_compat_flush_offsets(AF_INET6);
1823 xt_compat_unlock(AF_INET6);
1828 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1831 struct compat_ip6t_replace tmp;
1832 struct xt_table_info *newinfo;
1833 void *loc_cpu_entry;
1835 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1838 /* overflow check */
1839 if (tmp.size >= INT_MAX / num_possible_cpus())
1841 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1844 newinfo = xt_alloc_table_info(tmp.size);
1848 /* choose the copy that is on our node/cpu */
1849 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1850 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1856 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1857 &newinfo, &loc_cpu_entry, tmp.size,
1858 tmp.num_entries, tmp.hook_entry,
1863 duprintf("compat_do_replace: Translated table\n");
1865 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1866 tmp.num_counters, compat_ptr(tmp.counters));
1868 goto free_newinfo_untrans;
1871 free_newinfo_untrans:
1872 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1874 xt_free_table_info(newinfo);
1879 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1884 if (!capable(CAP_NET_ADMIN))
1888 case IP6T_SO_SET_REPLACE:
1889 ret = compat_do_replace(sock_net(sk), user, len);
1892 case IP6T_SO_SET_ADD_COUNTERS:
1893 ret = do_add_counters(sock_net(sk), user, len, 1);
1897 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1904 struct compat_ip6t_get_entries {
1905 char name[IP6T_TABLE_MAXNAMELEN];
1907 struct compat_ip6t_entry entrytable[0];
1911 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1912 void __user *userptr)
1914 struct xt_counters *counters;
1915 const struct xt_table_info *private = table->private;
1919 const void *loc_cpu_entry;
1922 counters = alloc_counters(table);
1923 if (IS_ERR(counters))
1924 return PTR_ERR(counters);
1926 /* choose the copy that is on our node/cpu, ...
1927 * This choice is lazy (because current thread is
1928 * allowed to migrate to another cpu)
1930 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1933 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1934 compat_copy_entry_to_user,
1935 &pos, &size, counters, &i);
1942 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1946 struct compat_ip6t_get_entries get;
1949 if (*len < sizeof(get)) {
1950 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1954 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1957 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1958 duprintf("compat_get_entries: %u != %zu\n",
1959 *len, sizeof(get) + get.size);
1963 xt_compat_lock(AF_INET6);
1964 t = xt_find_table_lock(net, AF_INET6, get.name);
1965 if (t && !IS_ERR(t)) {
1966 const struct xt_table_info *private = t->private;
1967 struct xt_table_info info;
1968 duprintf("t->private->number = %u\n", private->number);
1969 ret = compat_table_info(private, &info);
1970 if (!ret && get.size == info.size) {
1971 ret = compat_copy_entries_to_user(private->size,
1972 t, uptr->entrytable);
1974 duprintf("compat_get_entries: I've got %u not %u!\n",
1975 private->size, get.size);
1978 xt_compat_flush_offsets(AF_INET6);
1982 ret = t ? PTR_ERR(t) : -ENOENT;
1984 xt_compat_unlock(AF_INET6);
1988 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1991 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1995 if (!capable(CAP_NET_ADMIN))
1999 case IP6T_SO_GET_INFO:
2000 ret = get_info(sock_net(sk), user, len, 1);
2002 case IP6T_SO_GET_ENTRIES:
2003 ret = compat_get_entries(sock_net(sk), user, len);
2006 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2013 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2017 if (!capable(CAP_NET_ADMIN))
2021 case IP6T_SO_SET_REPLACE:
2022 ret = do_replace(sock_net(sk), user, len);
2025 case IP6T_SO_SET_ADD_COUNTERS:
2026 ret = do_add_counters(sock_net(sk), user, len, 0);
2030 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2038 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2042 if (!capable(CAP_NET_ADMIN))
2046 case IP6T_SO_GET_INFO:
2047 ret = get_info(sock_net(sk), user, len, 0);
2050 case IP6T_SO_GET_ENTRIES:
2051 ret = get_entries(sock_net(sk), user, len);
2054 case IP6T_SO_GET_REVISION_MATCH:
2055 case IP6T_SO_GET_REVISION_TARGET: {
2056 struct ip6t_get_revision rev;
2059 if (*len != sizeof(rev)) {
2063 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2068 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2073 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2076 "ip6t_%s", rev.name);
2081 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2088 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2089 const struct ip6t_replace *repl)
2092 struct xt_table_info *newinfo;
2093 struct xt_table_info bootstrap
2094 = { 0, 0, 0, { 0 }, { 0 }, { } };
2095 void *loc_cpu_entry;
2096 struct xt_table *new_table;
2098 newinfo = xt_alloc_table_info(repl->size);
2104 /* choose the copy on our node/cpu, but dont care about preemption */
2105 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2106 memcpy(loc_cpu_entry, repl->entries, repl->size);
2108 ret = translate_table(table->name, table->valid_hooks,
2109 newinfo, loc_cpu_entry, repl->size,
2116 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2117 if (IS_ERR(new_table)) {
2118 ret = PTR_ERR(new_table);
2124 xt_free_table_info(newinfo);
2126 return ERR_PTR(ret);
2129 void ip6t_unregister_table(struct xt_table *table)
2131 struct xt_table_info *private;
2132 void *loc_cpu_entry;
2133 struct module *table_owner = table->me;
2135 private = xt_unregister_table(table);
2137 /* Decrease module usage counts and free resources */
2138 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2139 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2140 if (private->number > private->initial_entries)
2141 module_put(table_owner);
2142 xt_free_table_info(private);
2145 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2147 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2148 u_int8_t type, u_int8_t code,
2151 return (type == test_type && code >= min_code && code <= max_code)
2156 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2158 const struct icmp6hdr *ic;
2159 struct icmp6hdr _icmph;
2160 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2162 /* Must not be a fragment. */
2163 if (par->fragoff != 0)
2166 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2168 /* We've been asked to examine this packet, and we
2169 * can't. Hence, no choice but to drop.
2171 duprintf("Dropping evil ICMP tinygram.\n");
2172 *par->hotdrop = true;
2176 return icmp6_type_code_match(icmpinfo->type,
2179 ic->icmp6_type, ic->icmp6_code,
2180 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2183 /* Called when user tries to insert an entry of this type. */
2184 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2186 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2188 /* Must specify no unknown invflags */
2189 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2192 /* The built-in targets: standard (NULL) and error. */
2193 static struct xt_target ip6t_standard_target __read_mostly = {
2194 .name = IP6T_STANDARD_TARGET,
2195 .targetsize = sizeof(int),
2196 .family = NFPROTO_IPV6,
2197 #ifdef CONFIG_COMPAT
2198 .compatsize = sizeof(compat_int_t),
2199 .compat_from_user = compat_standard_from_user,
2200 .compat_to_user = compat_standard_to_user,
2204 static struct xt_target ip6t_error_target __read_mostly = {
2205 .name = IP6T_ERROR_TARGET,
2206 .target = ip6t_error,
2207 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2208 .family = NFPROTO_IPV6,
2211 static struct nf_sockopt_ops ip6t_sockopts = {
2213 .set_optmin = IP6T_BASE_CTL,
2214 .set_optmax = IP6T_SO_SET_MAX+1,
2215 .set = do_ip6t_set_ctl,
2216 #ifdef CONFIG_COMPAT
2217 .compat_set = compat_do_ip6t_set_ctl,
2219 .get_optmin = IP6T_BASE_CTL,
2220 .get_optmax = IP6T_SO_GET_MAX+1,
2221 .get = do_ip6t_get_ctl,
2222 #ifdef CONFIG_COMPAT
2223 .compat_get = compat_do_ip6t_get_ctl,
2225 .owner = THIS_MODULE,
2228 static struct xt_match icmp6_matchstruct __read_mostly = {
2230 .match = icmp6_match,
2231 .matchsize = sizeof(struct ip6t_icmp),
2232 .checkentry = icmp6_checkentry,
2233 .proto = IPPROTO_ICMPV6,
2234 .family = NFPROTO_IPV6,
2237 static int __net_init ip6_tables_net_init(struct net *net)
2239 return xt_proto_init(net, NFPROTO_IPV6);
2242 static void __net_exit ip6_tables_net_exit(struct net *net)
2244 xt_proto_fini(net, NFPROTO_IPV6);
2247 static struct pernet_operations ip6_tables_net_ops = {
2248 .init = ip6_tables_net_init,
2249 .exit = ip6_tables_net_exit,
2252 static int __init ip6_tables_init(void)
2256 ret = register_pernet_subsys(&ip6_tables_net_ops);
2260 /* Noone else will be downing sem now, so we won't sleep */
2261 ret = xt_register_target(&ip6t_standard_target);
2264 ret = xt_register_target(&ip6t_error_target);
2267 ret = xt_register_match(&icmp6_matchstruct);
2271 /* Register setsockopt */
2272 ret = nf_register_sockopt(&ip6t_sockopts);
2276 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2280 xt_unregister_match(&icmp6_matchstruct);
2282 xt_unregister_target(&ip6t_error_target);
2284 xt_unregister_target(&ip6t_standard_target);
2286 unregister_pernet_subsys(&ip6_tables_net_ops);
2291 static void __exit ip6_tables_fini(void)
2293 nf_unregister_sockopt(&ip6t_sockopts);
2295 xt_unregister_match(&icmp6_matchstruct);
2296 xt_unregister_target(&ip6t_error_target);
2297 xt_unregister_target(&ip6t_standard_target);
2299 unregister_pernet_subsys(&ip6_tables_net_ops);
2303 * find the offset to specified header or the protocol number of last header
2304 * if target < 0. "last header" is transport protocol header, ESP, or
2307 * If target header is found, its offset is set in *offset and return protocol
2308 * number. Otherwise, return -1.
2310 * If the first fragment doesn't contain the final protocol header or
2311 * NEXTHDR_NONE it is considered invalid.
2313 * Note that non-1st fragment is special case that "the protocol number
2314 * of last header" is "next header" field in Fragment header. In this case,
2315 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2319 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2320 int target, unsigned short *fragoff)
2322 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2323 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2324 unsigned int len = skb->len - start;
2329 while (nexthdr != target) {
2330 struct ipv6_opt_hdr _hdr, *hp;
2331 unsigned int hdrlen;
2333 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2339 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2342 if (nexthdr == NEXTHDR_FRAGMENT) {
2343 unsigned short _frag_off;
2345 fp = skb_header_pointer(skb,
2346 start+offsetof(struct frag_hdr,
2353 _frag_off = ntohs(*fp) & ~0x7;
2356 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2357 hp->nexthdr == NEXTHDR_NONE)) {
2359 *fragoff = _frag_off;
2365 } else if (nexthdr == NEXTHDR_AUTH)
2366 hdrlen = (hp->hdrlen + 2) << 2;
2368 hdrlen = ipv6_optlen(hp);
2370 nexthdr = hp->nexthdr;
2379 EXPORT_SYMBOL(ip6t_register_table);
2380 EXPORT_SYMBOL(ip6t_unregister_table);
2381 EXPORT_SYMBOL(ip6t_do_table);
2382 EXPORT_SYMBOL(ip6t_ext_hdr);
2383 EXPORT_SYMBOL(ipv6_find_hdr);
2385 module_init(ip6_tables_init);
2386 module_exit(ip6_tables_fini);