2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 #include <net/netfilter/nf_log.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
35 /*#define DEBUG_IP_FIREWALL*/
36 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
37 /*#define DEBUG_IP_FIREWALL_USER*/
39 #ifdef DEBUG_IP_FIREWALL
40 #define dprintf(format, args...) printk(format , ## args)
42 #define dprintf(format, args...)
45 #ifdef DEBUG_IP_FIREWALL_USER
46 #define duprintf(format, args...) printk(format , ## args)
48 #define duprintf(format, args...)
51 #ifdef CONFIG_NETFILTER_DEBUG
52 #define IP_NF_ASSERT(x) \
55 printk("IP_NF_ASSERT: %s:%s:%u\n", \
56 __FUNCTION__, __FILE__, __LINE__); \
59 #define IP_NF_ASSERT(x)
63 /* All the better to debug you with... */
69 We keep a set of rules for each CPU, so we can avoid write-locking
70 them in the softirq when updating the counters and therefore
71 only need to read-lock in the softirq; doing a write_lock_bh() in user
72 context stops packets coming through and allows user context to read
73 the counters or update the rules.
75 Hence the start of any table is given by get_table() below. */
77 /* Returns whether matches rule or not. */
79 ip_packet_match(const struct iphdr *ip,
82 const struct ipt_ip *ipinfo,
88 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
90 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
92 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
94 dprintf("Source or dest mismatch.\n");
96 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
98 NIPQUAD(ipinfo->smsk.s_addr),
99 NIPQUAD(ipinfo->src.s_addr),
100 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
101 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ipinfo->dmsk.s_addr),
104 NIPQUAD(ipinfo->dst.s_addr),
105 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
109 /* Look for ifname matches; this should unroll nicely. */
110 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
111 ret |= (((const unsigned long *)indev)[i]
112 ^ ((const unsigned long *)ipinfo->iniface)[i])
113 & ((const unsigned long *)ipinfo->iniface_mask)[i];
116 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
117 dprintf("VIA in mismatch (%s vs %s).%s\n",
118 indev, ipinfo->iniface,
119 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
123 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
124 ret |= (((const unsigned long *)outdev)[i]
125 ^ ((const unsigned long *)ipinfo->outiface)[i])
126 & ((const unsigned long *)ipinfo->outiface_mask)[i];
129 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
130 dprintf("VIA out mismatch (%s vs %s).%s\n",
131 outdev, ipinfo->outiface,
132 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
136 /* Check specific protocol */
138 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
139 dprintf("Packet protocol %hi does not match %hi.%s\n",
140 ip->protocol, ipinfo->proto,
141 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
145 /* If we have a fragment rule but the packet is not a fragment
146 * then we return zero */
147 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
148 dprintf("Fragment rule but not fragment.%s\n",
149 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
157 ip_checkentry(const struct ipt_ip *ip)
159 if (ip->flags & ~IPT_F_MASK) {
160 duprintf("Unknown flag bits set: %08X\n",
161 ip->flags & ~IPT_F_MASK);
164 if (ip->invflags & ~IPT_INV_MASK) {
165 duprintf("Unknown invflag bits set: %08X\n",
166 ip->invflags & ~IPT_INV_MASK);
173 ipt_error(struct sk_buff *skb,
174 const struct net_device *in,
175 const struct net_device *out,
176 unsigned int hooknum,
177 const struct xt_target *target,
178 const void *targinfo)
181 printk("ip_tables: error: `%s'\n", (char *)targinfo);
187 bool do_match(struct ipt_entry_match *m,
188 const struct sk_buff *skb,
189 const struct net_device *in,
190 const struct net_device *out,
194 /* Stop iteration if it doesn't match */
195 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
196 offset, ip_hdrlen(skb), hotdrop))
202 static inline struct ipt_entry *
203 get_entry(void *base, unsigned int offset)
205 return (struct ipt_entry *)(base + offset);
208 /* All zeroes == unconditional rule. */
210 unconditional(const struct ipt_ip *ip)
214 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
215 if (((__u32 *)ip)[i])
221 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
222 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
223 static const char *hooknames[] = {
224 [NF_INET_PRE_ROUTING] = "PREROUTING",
225 [NF_INET_LOCAL_IN] = "INPUT",
226 [NF_INET_FORWARD] = "FORWARD",
227 [NF_INET_LOCAL_OUT] = "OUTPUT",
228 [NF_INET_POST_ROUTING] = "POSTROUTING",
231 enum nf_ip_trace_comments {
232 NF_IP_TRACE_COMMENT_RULE,
233 NF_IP_TRACE_COMMENT_RETURN,
234 NF_IP_TRACE_COMMENT_POLICY,
237 static const char *comments[] = {
238 [NF_IP_TRACE_COMMENT_RULE] = "rule",
239 [NF_IP_TRACE_COMMENT_RETURN] = "return",
240 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
243 static struct nf_loginfo trace_loginfo = {
244 .type = NF_LOG_TYPE_LOG,
248 .logflags = NF_LOG_MASK,
254 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
255 char *hookname, char **chainname,
256 char **comment, unsigned int *rulenum)
258 struct ipt_standard_target *t = (void *)ipt_get_target(s);
260 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
261 /* Head of user chain: ERROR target with chainname */
262 *chainname = t->target.data;
267 if (s->target_offset == sizeof(struct ipt_entry)
268 && strcmp(t->target.u.kernel.target->name,
269 IPT_STANDARD_TARGET) == 0
271 && unconditional(&s->ip)) {
272 /* Tail of chains: STANDARD target (return/policy) */
273 *comment = *chainname == hookname
274 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
275 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
284 static void trace_packet(struct sk_buff *skb,
286 const struct net_device *in,
287 const struct net_device *out,
289 struct xt_table_info *private,
293 struct ipt_entry *root;
294 char *hookname, *chainname, *comment;
295 unsigned int rulenum = 0;
297 table_base = (void *)private->entries[smp_processor_id()];
298 root = get_entry(table_base, private->hook_entry[hook]);
300 hookname = chainname = (char *)hooknames[hook];
301 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
303 IPT_ENTRY_ITERATE(root,
304 private->size - private->hook_entry[hook],
305 get_chainname_rulenum,
306 e, hookname, &chainname, &comment, &rulenum);
308 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
309 "TRACE: %s:%s:%s:%u ",
310 tablename, chainname, comment, rulenum);
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
316 ipt_do_table(struct sk_buff *skb,
318 const struct net_device *in,
319 const struct net_device *out,
320 struct xt_table *table)
322 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
326 bool hotdrop = false;
327 /* Initializing verdict to NF_DROP keeps gcc happy. */
328 unsigned int verdict = NF_DROP;
329 const char *indev, *outdev;
331 struct ipt_entry *e, *back;
332 struct xt_table_info *private;
336 datalen = skb->len - ip->ihl * 4;
337 indev = in ? in->name : nulldevname;
338 outdev = out ? out->name : nulldevname;
339 /* We handle fragments by dealing with the first fragment as
340 * if it was a normal packet. All other fragments are treated
341 * normally, except that they will NEVER match rules that ask
342 * things we don't know, ie. tcp syn flag or ports). If the
343 * rule is also a fragment-specific rule, non-fragments won't
345 offset = ntohs(ip->frag_off) & IP_OFFSET;
347 read_lock_bh(&table->lock);
348 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
349 private = table->private;
350 table_base = (void *)private->entries[smp_processor_id()];
351 e = get_entry(table_base, private->hook_entry[hook]);
353 /* For return from builtin chain */
354 back = get_entry(table_base, private->underflow[hook]);
359 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
360 struct ipt_entry_target *t;
362 if (IPT_MATCH_ITERATE(e, do_match,
364 offset, &hotdrop) != 0)
367 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
369 t = ipt_get_target(e);
370 IP_NF_ASSERT(t->u.kernel.target);
372 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
373 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
374 /* The packet is traced: log it */
375 if (unlikely(skb->nf_trace))
376 trace_packet(skb, hook, in, out,
377 table->name, private, e);
379 /* Standard target? */
380 if (!t->u.kernel.target->target) {
383 v = ((struct ipt_standard_target *)t)->verdict;
385 /* Pop from stack? */
386 if (v != IPT_RETURN) {
387 verdict = (unsigned)(-v) - 1;
391 back = get_entry(table_base,
395 if (table_base + v != (void *)e + e->next_offset
396 && !(e->ip.flags & IPT_F_GOTO)) {
397 /* Save old back ptr in next entry */
398 struct ipt_entry *next
399 = (void *)e + e->next_offset;
401 = (void *)back - table_base;
402 /* set back pointer to next entry */
406 e = get_entry(table_base, v);
408 /* Targets which reenter must return
410 #ifdef CONFIG_NETFILTER_DEBUG
411 ((struct ipt_entry *)table_base)->comefrom
414 verdict = t->u.kernel.target->target(skb,
420 #ifdef CONFIG_NETFILTER_DEBUG
421 if (((struct ipt_entry *)table_base)->comefrom
423 && verdict == IPT_CONTINUE) {
424 printk("Target %s reentered!\n",
425 t->u.kernel.target->name);
428 ((struct ipt_entry *)table_base)->comefrom
431 /* Target might have changed stuff. */
433 datalen = skb->len - ip->ihl * 4;
435 if (verdict == IPT_CONTINUE)
436 e = (void *)e + e->next_offset;
444 e = (void *)e + e->next_offset;
448 read_unlock_bh(&table->lock);
450 #ifdef DEBUG_ALLOW_ALL
459 /* Figures out from what hook each rule can be called: returns 0 if
460 there are loops. Puts hook bitmask in comefrom. */
462 mark_source_chains(struct xt_table_info *newinfo,
463 unsigned int valid_hooks, void *entry0)
467 /* No recursion; use packet counter to save back ptrs (reset
468 to 0 as we leave), and comefrom to save source hook bitmask */
469 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
470 unsigned int pos = newinfo->hook_entry[hook];
471 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
473 if (!(valid_hooks & (1 << hook)))
476 /* Set initial back pointer. */
477 e->counters.pcnt = pos;
480 struct ipt_standard_target *t
481 = (void *)ipt_get_target(e);
482 int visited = e->comefrom & (1 << hook);
484 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
485 printk("iptables: loop hook %u pos %u %08X.\n",
486 hook, pos, e->comefrom);
489 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
491 /* Unconditional return/END. */
492 if ((e->target_offset == sizeof(struct ipt_entry)
493 && (strcmp(t->target.u.user.name,
494 IPT_STANDARD_TARGET) == 0)
496 && unconditional(&e->ip)) || visited) {
497 unsigned int oldpos, size;
499 if (t->verdict < -NF_MAX_VERDICT - 1) {
500 duprintf("mark_source_chains: bad "
501 "negative verdict (%i)\n",
506 /* Return: backtrack through the last
509 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
510 #ifdef DEBUG_IP_FIREWALL_USER
512 & (1 << NF_INET_NUMHOOKS)) {
513 duprintf("Back unset "
520 pos = e->counters.pcnt;
521 e->counters.pcnt = 0;
523 /* We're at the start. */
527 e = (struct ipt_entry *)
529 } while (oldpos == pos + e->next_offset);
532 size = e->next_offset;
533 e = (struct ipt_entry *)
534 (entry0 + pos + size);
535 e->counters.pcnt = pos;
538 int newpos = t->verdict;
540 if (strcmp(t->target.u.user.name,
541 IPT_STANDARD_TARGET) == 0
543 if (newpos > newinfo->size -
544 sizeof(struct ipt_entry)) {
545 duprintf("mark_source_chains: "
546 "bad verdict (%i)\n",
550 /* This a jump; chase it. */
551 duprintf("Jump rule %u -> %u\n",
554 /* ... this is a fallthru */
555 newpos = pos + e->next_offset;
557 e = (struct ipt_entry *)
559 e->counters.pcnt = pos;
564 duprintf("Finished chain %u\n", hook);
570 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
572 if (i && (*i)-- == 0)
575 if (m->u.kernel.match->destroy)
576 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
577 module_put(m->u.kernel.match->me);
582 check_entry(struct ipt_entry *e, const char *name)
584 struct ipt_entry_target *t;
586 if (!ip_checkentry(&e->ip)) {
587 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
591 if (e->target_offset + sizeof(struct ipt_entry_target) >
595 t = ipt_get_target(e);
596 if (e->target_offset + t->u.target_size > e->next_offset)
602 static inline int check_match(struct ipt_entry_match *m, const char *name,
603 const struct ipt_ip *ip,
604 unsigned int hookmask, unsigned int *i)
606 struct xt_match *match;
609 match = m->u.kernel.match;
610 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
611 name, hookmask, ip->proto,
612 ip->invflags & IPT_INV_PROTO);
613 if (!ret && m->u.kernel.match->checkentry
614 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
616 duprintf("ip_tables: check failed for `%s'.\n",
617 m->u.kernel.match->name);
626 find_check_match(struct ipt_entry_match *m,
628 const struct ipt_ip *ip,
629 unsigned int hookmask,
632 struct xt_match *match;
635 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
637 "ipt_%s", m->u.user.name);
638 if (IS_ERR(match) || !match) {
639 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
640 return match ? PTR_ERR(match) : -ENOENT;
642 m->u.kernel.match = match;
644 ret = check_match(m, name, ip, hookmask, i);
650 module_put(m->u.kernel.match->me);
654 static inline int check_target(struct ipt_entry *e, const char *name)
656 struct ipt_entry_target *t;
657 struct xt_target *target;
660 t = ipt_get_target(e);
661 target = t->u.kernel.target;
662 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
663 name, e->comefrom, e->ip.proto,
664 e->ip.invflags & IPT_INV_PROTO);
665 if (!ret && t->u.kernel.target->checkentry
666 && !t->u.kernel.target->checkentry(name, e, target, t->data,
668 duprintf("ip_tables: check failed for `%s'.\n",
669 t->u.kernel.target->name);
676 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
679 struct ipt_entry_target *t;
680 struct xt_target *target;
684 ret = check_entry(e, name);
689 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
692 goto cleanup_matches;
694 t = ipt_get_target(e);
695 target = try_then_request_module(xt_find_target(AF_INET,
698 "ipt_%s", t->u.user.name);
699 if (IS_ERR(target) || !target) {
700 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
701 ret = target ? PTR_ERR(target) : -ENOENT;
702 goto cleanup_matches;
704 t->u.kernel.target = target;
706 ret = check_target(e, name);
713 module_put(t->u.kernel.target->me);
715 IPT_MATCH_ITERATE(e, cleanup_match, &j);
720 check_entry_size_and_hooks(struct ipt_entry *e,
721 struct xt_table_info *newinfo,
723 unsigned char *limit,
724 const unsigned int *hook_entries,
725 const unsigned int *underflows,
730 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
731 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
732 duprintf("Bad offset %p\n", e);
737 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
738 duprintf("checking: element %p size %u\n",
743 /* Check hooks & underflows */
744 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
745 if ((unsigned char *)e - base == hook_entries[h])
746 newinfo->hook_entry[h] = hook_entries[h];
747 if ((unsigned char *)e - base == underflows[h])
748 newinfo->underflow[h] = underflows[h];
751 /* FIXME: underflows must be unconditional, standard verdicts
752 < 0 (not IPT_RETURN). --RR */
754 /* Clear counters and comefrom */
755 e->counters = ((struct xt_counters) { 0, 0 });
763 cleanup_entry(struct ipt_entry *e, unsigned int *i)
765 struct ipt_entry_target *t;
767 if (i && (*i)-- == 0)
770 /* Cleanup all matches */
771 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
772 t = ipt_get_target(e);
773 if (t->u.kernel.target->destroy)
774 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
775 module_put(t->u.kernel.target->me);
779 /* Checks and translates the user-supplied table segment (held in
782 translate_table(const char *name,
783 unsigned int valid_hooks,
784 struct xt_table_info *newinfo,
788 const unsigned int *hook_entries,
789 const unsigned int *underflows)
794 newinfo->size = size;
795 newinfo->number = number;
797 /* Init all hooks to impossible value. */
798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
799 newinfo->hook_entry[i] = 0xFFFFFFFF;
800 newinfo->underflow[i] = 0xFFFFFFFF;
803 duprintf("translate_table: size %u\n", newinfo->size);
805 /* Walk through entries, checking offsets. */
806 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
807 check_entry_size_and_hooks,
811 hook_entries, underflows, &i);
816 duprintf("translate_table: %u not %u entries\n",
821 /* Check hooks all assigned */
822 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
823 /* Only hooks which are valid */
824 if (!(valid_hooks & (1 << i)))
826 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
827 duprintf("Invalid hook entry %u %u\n",
831 if (newinfo->underflow[i] == 0xFFFFFFFF) {
832 duprintf("Invalid underflow %u %u\n",
838 if (!mark_source_chains(newinfo, valid_hooks, entry0))
841 /* Finally, each sanity check must pass */
843 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
844 find_check_entry, name, size, &i);
847 IPT_ENTRY_ITERATE(entry0, newinfo->size,
852 /* And one copy for every other CPU */
853 for_each_possible_cpu(i) {
854 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
855 memcpy(newinfo->entries[i], entry0, newinfo->size);
863 add_entry_to_counter(const struct ipt_entry *e,
864 struct xt_counters total[],
867 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
874 set_entry_to_counter(const struct ipt_entry *e,
875 struct ipt_counters total[],
878 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
885 get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
892 /* Instead of clearing (by a previous call to memset())
893 * the counters and using adds, we set the counters
894 * with data used by 'current' CPU
895 * We dont care about preemption here.
897 curcpu = raw_smp_processor_id();
900 IPT_ENTRY_ITERATE(t->entries[curcpu],
902 set_entry_to_counter,
906 for_each_possible_cpu(cpu) {
910 IPT_ENTRY_ITERATE(t->entries[cpu],
912 add_entry_to_counter,
918 static inline struct xt_counters * alloc_counters(struct xt_table *table)
920 unsigned int countersize;
921 struct xt_counters *counters;
922 struct xt_table_info *private = table->private;
924 /* We need atomic snapshot of counters: rest doesn't change
925 (other than comefrom, which userspace doesn't care
927 countersize = sizeof(struct xt_counters) * private->number;
928 counters = vmalloc_node(countersize, numa_node_id());
930 if (counters == NULL)
931 return ERR_PTR(-ENOMEM);
933 /* First, sum counters... */
934 write_lock_bh(&table->lock);
935 get_counters(private, counters);
936 write_unlock_bh(&table->lock);
942 copy_entries_to_user(unsigned int total_size,
943 struct xt_table *table,
944 void __user *userptr)
946 unsigned int off, num;
948 struct xt_counters *counters;
949 struct xt_table_info *private = table->private;
953 counters = alloc_counters(table);
954 if (IS_ERR(counters))
955 return PTR_ERR(counters);
957 /* choose the copy that is on our node/cpu, ...
958 * This choice is lazy (because current thread is
959 * allowed to migrate to another cpu)
961 loc_cpu_entry = private->entries[raw_smp_processor_id()];
962 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
967 /* FIXME: use iterator macros --RR */
968 /* ... then go back and fix counters and names */
969 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
971 struct ipt_entry_match *m;
972 struct ipt_entry_target *t;
974 e = (struct ipt_entry *)(loc_cpu_entry + off);
975 if (copy_to_user(userptr + off
976 + offsetof(struct ipt_entry, counters),
978 sizeof(counters[num])) != 0) {
983 for (i = sizeof(struct ipt_entry);
984 i < e->target_offset;
985 i += m->u.match_size) {
988 if (copy_to_user(userptr + off + i
989 + offsetof(struct ipt_entry_match,
991 m->u.kernel.match->name,
992 strlen(m->u.kernel.match->name)+1)
999 t = ipt_get_target(e);
1000 if (copy_to_user(userptr + off + e->target_offset
1001 + offsetof(struct ipt_entry_target,
1003 t->u.kernel.target->name,
1004 strlen(t->u.kernel.target->name)+1) != 0) {
1015 #ifdef CONFIG_COMPAT
1016 static void compat_standard_from_user(void *dst, void *src)
1018 int v = *(compat_int_t *)src;
1021 v += xt_compat_calc_jump(AF_INET, v);
1022 memcpy(dst, &v, sizeof(v));
1025 static int compat_standard_to_user(void __user *dst, void *src)
1027 compat_int_t cv = *(int *)src;
1030 cv -= xt_compat_calc_jump(AF_INET, cv);
1031 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1035 compat_calc_match(struct ipt_entry_match *m, int *size)
1037 *size += xt_compat_match_offset(m->u.kernel.match);
1041 static int compat_calc_entry(struct ipt_entry *e,
1042 const struct xt_table_info *info,
1043 void *base, struct xt_table_info *newinfo)
1045 struct ipt_entry_target *t;
1046 unsigned int entry_offset;
1049 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1050 entry_offset = (void *)e - base;
1051 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1052 t = ipt_get_target(e);
1053 off += xt_compat_target_offset(t->u.kernel.target);
1054 newinfo->size -= off;
1055 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1059 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1060 if (info->hook_entry[i] &&
1061 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1062 newinfo->hook_entry[i] -= off;
1063 if (info->underflow[i] &&
1064 (e < (struct ipt_entry *)(base + info->underflow[i])))
1065 newinfo->underflow[i] -= off;
1070 static int compat_table_info(const struct xt_table_info *info,
1071 struct xt_table_info *newinfo)
1073 void *loc_cpu_entry;
1075 if (!newinfo || !info)
1078 /* we dont care about newinfo->entries[] */
1079 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1080 newinfo->initial_entries = 0;
1081 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1082 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1083 compat_calc_entry, info, loc_cpu_entry,
1088 static int get_info(void __user *user, int *len, int compat)
1090 char name[IPT_TABLE_MAXNAMELEN];
1094 if (*len != sizeof(struct ipt_getinfo)) {
1095 duprintf("length %u != %zu\n", *len,
1096 sizeof(struct ipt_getinfo));
1100 if (copy_from_user(name, user, sizeof(name)) != 0)
1103 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1104 #ifdef CONFIG_COMPAT
1106 xt_compat_lock(AF_INET);
1108 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1109 "iptable_%s", name);
1110 if (t && !IS_ERR(t)) {
1111 struct ipt_getinfo info;
1112 struct xt_table_info *private = t->private;
1114 #ifdef CONFIG_COMPAT
1116 struct xt_table_info tmp;
1117 ret = compat_table_info(private, &tmp);
1118 xt_compat_flush_offsets(AF_INET);
1122 info.valid_hooks = t->valid_hooks;
1123 memcpy(info.hook_entry, private->hook_entry,
1124 sizeof(info.hook_entry));
1125 memcpy(info.underflow, private->underflow,
1126 sizeof(info.underflow));
1127 info.num_entries = private->number;
1128 info.size = private->size;
1129 strcpy(info.name, name);
1131 if (copy_to_user(user, &info, *len) != 0)
1139 ret = t ? PTR_ERR(t) : -ENOENT;
1140 #ifdef CONFIG_COMPAT
1142 xt_compat_unlock(AF_INET);
1148 get_entries(struct ipt_get_entries __user *uptr, int *len)
1151 struct ipt_get_entries get;
1154 if (*len < sizeof(get)) {
1155 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1158 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1160 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1161 duprintf("get_entries: %u != %zu\n",
1162 *len, sizeof(get) + get.size);
1166 t = xt_find_table_lock(AF_INET, get.name);
1167 if (t && !IS_ERR(t)) {
1168 struct xt_table_info *private = t->private;
1169 duprintf("t->private->number = %u\n", private->number);
1170 if (get.size == private->size)
1171 ret = copy_entries_to_user(private->size,
1172 t, uptr->entrytable);
1174 duprintf("get_entries: I've got %u not %u!\n",
1175 private->size, get.size);
1181 ret = t ? PTR_ERR(t) : -ENOENT;
1187 __do_replace(const char *name, unsigned int valid_hooks,
1188 struct xt_table_info *newinfo, unsigned int num_counters,
1189 void __user *counters_ptr)
1193 struct xt_table_info *oldinfo;
1194 struct xt_counters *counters;
1195 void *loc_cpu_old_entry;
1198 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1204 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1205 "iptable_%s", name);
1206 if (!t || IS_ERR(t)) {
1207 ret = t ? PTR_ERR(t) : -ENOENT;
1208 goto free_newinfo_counters_untrans;
1212 if (valid_hooks != t->valid_hooks) {
1213 duprintf("Valid hook crap: %08X vs %08X\n",
1214 valid_hooks, t->valid_hooks);
1219 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1223 /* Update module usage count based on number of rules */
1224 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1225 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1226 if ((oldinfo->number > oldinfo->initial_entries) ||
1227 (newinfo->number <= oldinfo->initial_entries))
1229 if ((oldinfo->number > oldinfo->initial_entries) &&
1230 (newinfo->number <= oldinfo->initial_entries))
1233 /* Get the old counters. */
1234 get_counters(oldinfo, counters);
1235 /* Decrease module usage counts and free resource */
1236 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1237 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1239 xt_free_table_info(oldinfo);
1240 if (copy_to_user(counters_ptr, counters,
1241 sizeof(struct xt_counters) * num_counters) != 0)
1250 free_newinfo_counters_untrans:
1257 do_replace(void __user *user, unsigned int len)
1260 struct ipt_replace tmp;
1261 struct xt_table_info *newinfo;
1262 void *loc_cpu_entry;
1264 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1267 /* overflow check */
1268 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1271 newinfo = xt_alloc_table_info(tmp.size);
1275 /* choose the copy that is on our node/cpu */
1276 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1277 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1283 ret = translate_table(tmp.name, tmp.valid_hooks,
1284 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1285 tmp.hook_entry, tmp.underflow);
1289 duprintf("ip_tables: Translated table\n");
1291 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1292 tmp.num_counters, tmp.counters);
1294 goto free_newinfo_untrans;
1297 free_newinfo_untrans:
1298 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1300 xt_free_table_info(newinfo);
1304 /* We're lazy, and add to the first CPU; overflow works its fey magic
1305 * and everything is OK. */
1307 add_counter_to_entry(struct ipt_entry *e,
1308 const struct xt_counters addme[],
1312 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1314 (long unsigned int)e->counters.pcnt,
1315 (long unsigned int)e->counters.bcnt,
1316 (long unsigned int)addme[*i].pcnt,
1317 (long unsigned int)addme[*i].bcnt);
1320 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1327 do_add_counters(void __user *user, unsigned int len, int compat)
1330 struct xt_counters_info tmp;
1331 struct xt_counters *paddc;
1332 unsigned int num_counters;
1337 struct xt_table_info *private;
1339 void *loc_cpu_entry;
1340 #ifdef CONFIG_COMPAT
1341 struct compat_xt_counters_info compat_tmp;
1345 size = sizeof(struct compat_xt_counters_info);
1350 size = sizeof(struct xt_counters_info);
1353 if (copy_from_user(ptmp, user, size) != 0)
1356 #ifdef CONFIG_COMPAT
1358 num_counters = compat_tmp.num_counters;
1359 name = compat_tmp.name;
1363 num_counters = tmp.num_counters;
1367 if (len != size + num_counters * sizeof(struct xt_counters))
1370 paddc = vmalloc_node(len - size, numa_node_id());
1374 if (copy_from_user(paddc, user + size, len - size) != 0) {
1379 t = xt_find_table_lock(AF_INET, name);
1380 if (!t || IS_ERR(t)) {
1381 ret = t ? PTR_ERR(t) : -ENOENT;
1385 write_lock_bh(&t->lock);
1386 private = t->private;
1387 if (private->number != num_counters) {
1389 goto unlock_up_free;
1393 /* Choose the copy that is on our node */
1394 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1395 IPT_ENTRY_ITERATE(loc_cpu_entry,
1397 add_counter_to_entry,
1401 write_unlock_bh(&t->lock);
1410 #ifdef CONFIG_COMPAT
1411 struct compat_ipt_replace {
1412 char name[IPT_TABLE_MAXNAMELEN];
1416 u32 hook_entry[NF_INET_NUMHOOKS];
1417 u32 underflow[NF_INET_NUMHOOKS];
1419 compat_uptr_t counters; /* struct ipt_counters * */
1420 struct compat_ipt_entry entries[0];
1424 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1425 compat_uint_t *size, struct xt_counters *counters,
1428 struct ipt_entry_target *t;
1429 struct compat_ipt_entry __user *ce;
1430 u_int16_t target_offset, next_offset;
1431 compat_uint_t origsize;
1436 ce = (struct compat_ipt_entry __user *)*dstptr;
1437 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1440 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1443 *dstptr += sizeof(struct compat_ipt_entry);
1444 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1446 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1447 target_offset = e->target_offset - (origsize - *size);
1450 t = ipt_get_target(e);
1451 ret = xt_compat_target_to_user(t, dstptr, size);
1455 next_offset = e->next_offset - (origsize - *size);
1456 if (put_user(target_offset, &ce->target_offset))
1458 if (put_user(next_offset, &ce->next_offset))
1468 compat_find_calc_match(struct ipt_entry_match *m,
1470 const struct ipt_ip *ip,
1471 unsigned int hookmask,
1474 struct xt_match *match;
1476 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1477 m->u.user.revision),
1478 "ipt_%s", m->u.user.name);
1479 if (IS_ERR(match) || !match) {
1480 duprintf("compat_check_calc_match: `%s' not found\n",
1482 return match ? PTR_ERR(match) : -ENOENT;
1484 m->u.kernel.match = match;
1485 *size += xt_compat_match_offset(match);
1492 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1494 if (i && (*i)-- == 0)
1497 module_put(m->u.kernel.match->me);
1502 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1504 struct ipt_entry_target *t;
1506 if (i && (*i)-- == 0)
1509 /* Cleanup all matches */
1510 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1511 t = compat_ipt_get_target(e);
1512 module_put(t->u.kernel.target->me);
1517 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1518 struct xt_table_info *newinfo,
1520 unsigned char *base,
1521 unsigned char *limit,
1522 unsigned int *hook_entries,
1523 unsigned int *underflows,
1527 struct ipt_entry_target *t;
1528 struct xt_target *target;
1529 unsigned int entry_offset;
1532 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1533 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1534 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1535 duprintf("Bad offset %p, limit = %p\n", e, limit);
1539 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1540 sizeof(struct compat_xt_entry_target)) {
1541 duprintf("checking: element %p size %u\n",
1546 /* For purposes of check_entry casting the compat entry is fine */
1547 ret = check_entry((struct ipt_entry *)e, name);
1551 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1552 entry_offset = (void *)e - (void *)base;
1554 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1555 &e->ip, e->comefrom, &off, &j);
1557 goto release_matches;
1559 t = compat_ipt_get_target(e);
1560 target = try_then_request_module(xt_find_target(AF_INET,
1562 t->u.user.revision),
1563 "ipt_%s", t->u.user.name);
1564 if (IS_ERR(target) || !target) {
1565 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1567 ret = target ? PTR_ERR(target) : -ENOENT;
1568 goto release_matches;
1570 t->u.kernel.target = target;
1572 off += xt_compat_target_offset(target);
1574 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1578 /* Check hooks & underflows */
1579 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1580 if ((unsigned char *)e - base == hook_entries[h])
1581 newinfo->hook_entry[h] = hook_entries[h];
1582 if ((unsigned char *)e - base == underflows[h])
1583 newinfo->underflow[h] = underflows[h];
1586 /* Clear counters and comefrom */
1587 memset(&e->counters, 0, sizeof(e->counters));
1594 module_put(t->u.kernel.target->me);
1596 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1601 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1602 unsigned int *size, const char *name,
1603 struct xt_table_info *newinfo, unsigned char *base)
1605 struct ipt_entry_target *t;
1606 struct xt_target *target;
1607 struct ipt_entry *de;
1608 unsigned int origsize;
1613 de = (struct ipt_entry *)*dstptr;
1614 memcpy(de, e, sizeof(struct ipt_entry));
1615 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1617 *dstptr += sizeof(struct ipt_entry);
1618 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1620 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1624 de->target_offset = e->target_offset - (origsize - *size);
1625 t = compat_ipt_get_target(e);
1626 target = t->u.kernel.target;
1627 xt_compat_target_from_user(t, dstptr, size);
1629 de->next_offset = e->next_offset - (origsize - *size);
1630 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1631 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1632 newinfo->hook_entry[h] -= origsize - *size;
1633 if ((unsigned char *)de - base < newinfo->underflow[h])
1634 newinfo->underflow[h] -= origsize - *size;
1639 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1645 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip,
1648 goto cleanup_matches;
1650 ret = check_target(e, name);
1652 goto cleanup_matches;
1658 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1663 translate_compat_table(const char *name,
1664 unsigned int valid_hooks,
1665 struct xt_table_info **pinfo,
1667 unsigned int total_size,
1668 unsigned int number,
1669 unsigned int *hook_entries,
1670 unsigned int *underflows)
1673 struct xt_table_info *newinfo, *info;
1674 void *pos, *entry0, *entry1;
1681 info->number = number;
1683 /* Init all hooks to impossible value. */
1684 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1685 info->hook_entry[i] = 0xFFFFFFFF;
1686 info->underflow[i] = 0xFFFFFFFF;
1689 duprintf("translate_compat_table: size %u\n", info->size);
1691 xt_compat_lock(AF_INET);
1692 /* Walk through entries, checking offsets. */
1693 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1694 check_compat_entry_size_and_hooks,
1695 info, &size, entry0,
1696 entry0 + total_size,
1697 hook_entries, underflows, &j, name);
1703 duprintf("translate_compat_table: %u not %u entries\n",
1708 /* Check hooks all assigned */
1709 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1710 /* Only hooks which are valid */
1711 if (!(valid_hooks & (1 << i)))
1713 if (info->hook_entry[i] == 0xFFFFFFFF) {
1714 duprintf("Invalid hook entry %u %u\n",
1715 i, hook_entries[i]);
1718 if (info->underflow[i] == 0xFFFFFFFF) {
1719 duprintf("Invalid underflow %u %u\n",
1726 newinfo = xt_alloc_table_info(size);
1730 newinfo->number = number;
1731 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1732 newinfo->hook_entry[i] = info->hook_entry[i];
1733 newinfo->underflow[i] = info->underflow[i];
1735 entry1 = newinfo->entries[raw_smp_processor_id()];
1738 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1739 compat_copy_entry_from_user,
1740 &pos, &size, name, newinfo, entry1);
1741 xt_compat_flush_offsets(AF_INET);
1742 xt_compat_unlock(AF_INET);
1747 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1751 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1755 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1756 compat_release_entry, &j);
1757 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1758 xt_free_table_info(newinfo);
1762 /* And one copy for every other CPU */
1763 for_each_possible_cpu(i)
1764 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1765 memcpy(newinfo->entries[i], entry1, newinfo->size);
1769 xt_free_table_info(info);
1773 xt_free_table_info(newinfo);
1775 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1778 xt_compat_flush_offsets(AF_INET);
1779 xt_compat_unlock(AF_INET);
1784 compat_do_replace(void __user *user, unsigned int len)
1787 struct compat_ipt_replace tmp;
1788 struct xt_table_info *newinfo;
1789 void *loc_cpu_entry;
1791 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1794 /* overflow check */
1795 if (tmp.size >= INT_MAX / num_possible_cpus())
1797 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1800 newinfo = xt_alloc_table_info(tmp.size);
1804 /* choose the copy that is on our node/cpu */
1805 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1806 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1812 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1813 &newinfo, &loc_cpu_entry, tmp.size,
1814 tmp.num_entries, tmp.hook_entry,
1819 duprintf("compat_do_replace: Translated table\n");
1821 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1822 tmp.num_counters, compat_ptr(tmp.counters));
1824 goto free_newinfo_untrans;
1827 free_newinfo_untrans:
1828 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1830 xt_free_table_info(newinfo);
1835 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1840 if (!capable(CAP_NET_ADMIN))
1844 case IPT_SO_SET_REPLACE:
1845 ret = compat_do_replace(user, len);
1848 case IPT_SO_SET_ADD_COUNTERS:
1849 ret = do_add_counters(user, len, 1);
1853 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1860 struct compat_ipt_get_entries {
1861 char name[IPT_TABLE_MAXNAMELEN];
1863 struct compat_ipt_entry entrytable[0];
1867 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1868 void __user *userptr)
1870 struct xt_counters *counters;
1871 struct xt_table_info *private = table->private;
1875 void *loc_cpu_entry;
1878 counters = alloc_counters(table);
1879 if (IS_ERR(counters))
1880 return PTR_ERR(counters);
1882 /* choose the copy that is on our node/cpu, ...
1883 * This choice is lazy (because current thread is
1884 * allowed to migrate to another cpu)
1886 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1889 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1890 compat_copy_entry_to_user,
1891 &pos, &size, counters, &i);
1898 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1901 struct compat_ipt_get_entries get;
1904 if (*len < sizeof(get)) {
1905 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1909 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1912 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1913 duprintf("compat_get_entries: %u != %zu\n",
1914 *len, sizeof(get) + get.size);
1918 xt_compat_lock(AF_INET);
1919 t = xt_find_table_lock(AF_INET, get.name);
1920 if (t && !IS_ERR(t)) {
1921 struct xt_table_info *private = t->private;
1922 struct xt_table_info info;
1923 duprintf("t->private->number = %u\n", private->number);
1924 ret = compat_table_info(private, &info);
1925 if (!ret && get.size == info.size) {
1926 ret = compat_copy_entries_to_user(private->size,
1927 t, uptr->entrytable);
1929 duprintf("compat_get_entries: I've got %u not %u!\n",
1930 private->size, get.size);
1933 xt_compat_flush_offsets(AF_INET);
1937 ret = t ? PTR_ERR(t) : -ENOENT;
1939 xt_compat_unlock(AF_INET);
1943 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1946 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1950 if (!capable(CAP_NET_ADMIN))
1954 case IPT_SO_GET_INFO:
1955 ret = get_info(user, len, 1);
1957 case IPT_SO_GET_ENTRIES:
1958 ret = compat_get_entries(user, len);
1961 ret = do_ipt_get_ctl(sk, cmd, user, len);
1968 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1972 if (!capable(CAP_NET_ADMIN))
1976 case IPT_SO_SET_REPLACE:
1977 ret = do_replace(user, len);
1980 case IPT_SO_SET_ADD_COUNTERS:
1981 ret = do_add_counters(user, len, 0);
1985 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1993 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1997 if (!capable(CAP_NET_ADMIN))
2001 case IPT_SO_GET_INFO:
2002 ret = get_info(user, len, 0);
2005 case IPT_SO_GET_ENTRIES:
2006 ret = get_entries(user, len);
2009 case IPT_SO_GET_REVISION_MATCH:
2010 case IPT_SO_GET_REVISION_TARGET: {
2011 struct ipt_get_revision rev;
2014 if (*len != sizeof(rev)) {
2018 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2023 if (cmd == IPT_SO_GET_REVISION_TARGET)
2028 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2031 "ipt_%s", rev.name);
2036 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2043 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2046 struct xt_table_info *newinfo;
2047 struct xt_table_info bootstrap
2048 = { 0, 0, 0, { 0 }, { 0 }, { } };
2049 void *loc_cpu_entry;
2051 newinfo = xt_alloc_table_info(repl->size);
2055 /* choose the copy on our node/cpu, but dont care about preemption */
2056 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2057 memcpy(loc_cpu_entry, repl->entries, repl->size);
2059 ret = translate_table(table->name, table->valid_hooks,
2060 newinfo, loc_cpu_entry, repl->size,
2065 xt_free_table_info(newinfo);
2069 ret = xt_register_table(table, &bootstrap, newinfo);
2071 xt_free_table_info(newinfo);
2078 void ipt_unregister_table(struct xt_table *table)
2080 struct xt_table_info *private;
2081 void *loc_cpu_entry;
2083 private = xt_unregister_table(table);
2085 /* Decrease module usage counts and free resources */
2086 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2087 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2088 xt_free_table_info(private);
2091 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2093 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2094 u_int8_t type, u_int8_t code,
2097 return ((test_type == 0xFF) ||
2098 (type == test_type && code >= min_code && code <= max_code))
2103 icmp_match(const struct sk_buff *skb,
2104 const struct net_device *in,
2105 const struct net_device *out,
2106 const struct xt_match *match,
2107 const void *matchinfo,
2109 unsigned int protoff,
2112 struct icmphdr _icmph, *ic;
2113 const struct ipt_icmp *icmpinfo = matchinfo;
2115 /* Must not be a fragment. */
2119 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2121 /* We've been asked to examine this packet, and we
2122 * can't. Hence, no choice but to drop.
2124 duprintf("Dropping evil ICMP tinygram.\n");
2129 return icmp_type_code_match(icmpinfo->type,
2133 !!(icmpinfo->invflags&IPT_ICMP_INV));
2136 /* Called when user tries to insert an entry of this type. */
2138 icmp_checkentry(const char *tablename,
2140 const struct xt_match *match,
2142 unsigned int hook_mask)
2144 const struct ipt_icmp *icmpinfo = matchinfo;
2146 /* Must specify no unknown invflags */
2147 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2150 /* The built-in targets: standard (NULL) and error. */
2151 static struct xt_target ipt_standard_target __read_mostly = {
2152 .name = IPT_STANDARD_TARGET,
2153 .targetsize = sizeof(int),
2155 #ifdef CONFIG_COMPAT
2156 .compatsize = sizeof(compat_int_t),
2157 .compat_from_user = compat_standard_from_user,
2158 .compat_to_user = compat_standard_to_user,
2162 static struct xt_target ipt_error_target __read_mostly = {
2163 .name = IPT_ERROR_TARGET,
2164 .target = ipt_error,
2165 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2169 static struct nf_sockopt_ops ipt_sockopts = {
2171 .set_optmin = IPT_BASE_CTL,
2172 .set_optmax = IPT_SO_SET_MAX+1,
2173 .set = do_ipt_set_ctl,
2174 #ifdef CONFIG_COMPAT
2175 .compat_set = compat_do_ipt_set_ctl,
2177 .get_optmin = IPT_BASE_CTL,
2178 .get_optmax = IPT_SO_GET_MAX+1,
2179 .get = do_ipt_get_ctl,
2180 #ifdef CONFIG_COMPAT
2181 .compat_get = compat_do_ipt_get_ctl,
2183 .owner = THIS_MODULE,
2186 static struct xt_match icmp_matchstruct __read_mostly = {
2188 .match = icmp_match,
2189 .matchsize = sizeof(struct ipt_icmp),
2190 .checkentry = icmp_checkentry,
2191 .proto = IPPROTO_ICMP,
2195 static int __init ip_tables_init(void)
2199 ret = xt_proto_init(AF_INET);
2203 /* Noone else will be downing sem now, so we won't sleep */
2204 ret = xt_register_target(&ipt_standard_target);
2207 ret = xt_register_target(&ipt_error_target);
2210 ret = xt_register_match(&icmp_matchstruct);
2214 /* Register setsockopt */
2215 ret = nf_register_sockopt(&ipt_sockopts);
2219 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2223 xt_unregister_match(&icmp_matchstruct);
2225 xt_unregister_target(&ipt_error_target);
2227 xt_unregister_target(&ipt_standard_target);
2229 xt_proto_fini(AF_INET);
2234 static void __exit ip_tables_fini(void)
2236 nf_unregister_sockopt(&ipt_sockopts);
2238 xt_unregister_match(&icmp_matchstruct);
2239 xt_unregister_target(&ipt_error_target);
2240 xt_unregister_target(&ipt_standard_target);
2242 xt_proto_fini(AF_INET);
2245 EXPORT_SYMBOL(ipt_register_table);
2246 EXPORT_SYMBOL(ipt_unregister_table);
2247 EXPORT_SYMBOL(ipt_do_table);
2248 module_init(ip_tables_init);
2249 module_exit(ip_tables_fini);